Merge "msm: camera: utils: Fix OOB read due to wrong kmd cmd buffer index"
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
index 09e44e0..46588de 100644
--- a/AndroidKernel.mk
+++ b/AndroidKernel.mk
@@ -1,10 +1,6 @@
 #Android makefile to build kernel as a part of Android Build
 PERL		= perl
 
-ifneq ($(wildcard prebuilts/build-tools/$(HOST_PREBUILT_TAG)/bin/make),)
-  MAKE := prebuilts/build-tools/$(HOST_PREBUILT_TAG)/bin/make
-endif
-
 KERNEL_TARGET := $(strip $(INSTALLED_KERNEL_TARGET))
 ifeq ($(KERNEL_TARGET),)
 INSTALLED_KERNEL_TARGET := $(PRODUCT_OUT)/kernel
diff --git a/Documentation/ABI/testing/procfs-concurrent_time b/Documentation/ABI/testing/procfs-concurrent_time
new file mode 100644
index 0000000..55b4142
--- /dev/null
+++ b/Documentation/ABI/testing/procfs-concurrent_time
@@ -0,0 +1,16 @@
+What:		/proc/uid_concurrent_active_time
+Date:		December 2018
+Contact:	Connor O'Brien <connoro@google.com>
+Description:
+	The /proc/uid_concurrent_active_time file displays aggregated cputime
+	numbers for each uid, broken down by the total number of cores that were
+	active while the uid's task was running.
+
+What:		/proc/uid_concurrent_policy_time
+Date:		December 2018
+Contact:	Connor O'Brien <connoro@google.com>
+Description:
+	The /proc/uid_concurrent_policy_time file displays aggregated cputime
+	numbers for each uid, broken down based on the cpufreq policy
+	of the core used by the uid's task and the number of cores associated
+	with that policy that were active while the uid's task was running.
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index c1513c7..14b2bf2 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -98,3 +98,42 @@
 		The backing_dev file is read-write and set up backing
 		device for zram to write incompressible pages.
 		For using, user should enable CONFIG_ZRAM_WRITEBACK.
+
+What:		/sys/block/zram<id>/idle
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		idle file is write-only and mark zram slot as idle.
+		If system has mounted debugfs, user can see which slots
+		are idle via /sys/kernel/debug/zram/zram<id>/block_state
+
+What:		/sys/block/zram<id>/writeback
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		The writeback file is write-only and trigger idle and/or
+		huge page writeback to backing device.
+
+What:		/sys/block/zram<id>/bd_stat
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		The bd_stat file is read-only and represents backing device's
+		statistics (bd_count, bd_reads, bd_writes) in a format
+		similar to block layer statistics file format.
+
+What:		/sys/block/zram<id>/writeback_limit_enable
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		The writeback_limit_enable file is read-write and specifies
+		eanbe of writeback_limit feature. "1" means eable the feature.
+		No limit "0" is the initial state.
+
+What:		/sys/block/zram<id>/writeback_limit
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		The writeback_limit file is read-write and specifies the maximum
+		amount of writeback ZRAM can do. The limit could be changed
+		in run time.
diff --git a/Documentation/ABI/testing/sysfs-bus-i3c b/Documentation/ABI/testing/sysfs-bus-i3c
new file mode 100644
index 0000000..d7c7485
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i3c
@@ -0,0 +1,146 @@
+What:		/sys/bus/i3c/devices/i3c-<bus-id>
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		An I3C bus. This directory will contain one sub-directory per
+		I3C device present on the bus.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/current_master
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		Expose the master that owns the bus (<bus-id>-<master-pid>) at
+		the time this file is read. Note that bus ownership can change
+		overtime, so there's no guarantee that when the read() call
+		returns, the value returned is still valid.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/mode
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		I3C bus mode. Can be "pure", "mixed-fast" or "mixed-slow". See
+		the I3C specification for a detailed description of what each
+		of these modes implies.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/i3c_scl_frequency
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		The frequency (expressed in Hz) of the SCL signal when
+		operating in I3C SDR mode.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/i2c_scl_frequency
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		The frequency (expressed in Hz) of the SCL signal when
+		operating in I2C mode.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/dynamic_address
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		Dynamic address assigned to the master controller. This
+		address may change if the bus is re-initialized.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/bcr
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		BCR stands for Bus Characteristics Register and express the
+		device capabilities in term of speed, maximum read/write
+		length, etc. See the I3C specification for more details.
+		This entry describes the BCR of the master controller driving
+		the bus.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/dcr
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		DCR stands for Device Characteristics Register and express the
+		device capabilities in term of exposed features. See the I3C
+		specification for more details.
+		This entry describes the DCR of the master controller driving
+		the bus.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/pid
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		PID stands for Provisional ID and is used to uniquely identify
+		a device on a bus. This PID contains information about the
+		vendor, the part and an instance ID so that several devices of
+		the same type can be connected on the same bus.
+		See the I3C specification for more details.
+		This entry describes the PID of the master controller driving
+		the bus.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/hdrcap
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		Expose the HDR (High Data Rate) capabilities of a device.
+		Returns a list of supported HDR mode, each element is separated
+		by space. Modes can be "hdr-ddr", "hdr-tsp" and "hdr-tsl".
+		See the I3C specification for more details about these HDR
+		modes.
+		This entry describes the HDRCAP of the master controller
+		driving the bus.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		An I3C device present on I3C bus identified by <bus-id>. Note
+		that all devices are represented including the master driving
+		the bus.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/dynamic_address
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		Dynamic address assigned to device <bus-id>-<device-pid>. This
+		address may change if the bus is re-initialized.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/bcr
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		BCR stands for Bus Characteristics Register and express the
+		device capabilities in term of speed, maximum read/write
+		length, etc. See the I3C specification for more details.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/dcr
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		DCR stands for Device Characteristics Register and express the
+		device capabilities in term of exposed features. See the I3C
+		specification for more details.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/pid
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		PID stands for Provisional ID and is used to uniquely identify
+		a device on a bus. This PID contains information about the
+		vendor, the part and an instance ID so that several devices of
+		the same type can be connected on the same bus.
+		See the I3C specification for more details.
+
+What:		/sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/hdrcap
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		Expose the HDR (High Data Rate) capabilities of a device.
+		Returns a list of supported HDR mode, each element is separated
+		by space. Modes can be "hdr-ddr", "hdr-tsp" and "hdr-tsl".
+		See the I3C specification for more details about these HDR
+		modes.
+
+What:		/sys/bus/i3c/devices/<bus-id>-<device-pid>
+KernelVersion:  4.20
+Contact:	linux-i3c@vger.kernel.org
+Description:
+		These directories are just symbolic links to
+		/sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 3ac4177..a7ce331 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -92,6 +92,15 @@
 Description:
 		 Controls the number of trials to find a victim segment.
 
+What:		/sys/fs/f2fs/<disk>/migration_granularity
+Date:		October 2018
+Contact:	"Chao Yu" <yuchao0@huawei.com>
+Description:
+		 Controls migration granularity of garbage collection on large
+		 section, it can let GC move partial segment{s} of one section
+		 in one GC cycle, so that dispersing heavy overhead GC to
+		 multiple lightweight one.
+
 What:		/sys/fs/f2fs/<disk>/dir_level
 Date:		March 2014
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt
new file mode 100644
index 0000000..4fb40fe
--- /dev/null
+++ b/Documentation/accounting/psi.txt
@@ -0,0 +1,180 @@
+================================
+PSI - Pressure Stall Information
+================================
+
+:Date: April, 2018
+:Author: Johannes Weiner <hannes@cmpxchg.org>
+
+When CPU, memory or IO devices are contended, workloads experience
+latency spikes, throughput losses, and run the risk of OOM kills.
+
+Without an accurate measure of such contention, users are forced to
+either play it safe and under-utilize their hardware resources, or
+roll the dice and frequently suffer the disruptions resulting from
+excessive overcommit.
+
+The psi feature identifies and quantifies the disruptions caused by
+such resource crunches and the time impact it has on complex workloads
+or even entire systems.
+
+Having an accurate measure of productivity losses caused by resource
+scarcity aids users in sizing workloads to hardware--or provisioning
+hardware according to workload demand.
+
+As psi aggregates this information in realtime, systems can be managed
+dynamically using techniques such as load shedding, migrating jobs to
+other systems or data centers, or strategically pausing or killing low
+priority or restartable batch jobs.
+
+This allows maximizing hardware utilization without sacrificing
+workload health or risking major disruptions such as OOM kills.
+
+Pressure interface
+==================
+
+Pressure information for each resource is exported through the
+respective file in /proc/pressure/ -- cpu, memory, and io.
+
+The format for CPU is as such:
+
+some avg10=0.00 avg60=0.00 avg300=0.00 total=0
+
+and for memory and IO:
+
+some avg10=0.00 avg60=0.00 avg300=0.00 total=0
+full avg10=0.00 avg60=0.00 avg300=0.00 total=0
+
+The "some" line indicates the share of time in which at least some
+tasks are stalled on a given resource.
+
+The "full" line indicates the share of time in which all non-idle
+tasks are stalled on a given resource simultaneously. In this state
+actual CPU cycles are going to waste, and a workload that spends
+extended time in this state is considered to be thrashing. This has
+severe impact on performance, and it's useful to distinguish this
+situation from a state where some tasks are stalled but the CPU is
+still doing productive work. As such, time spent in this subset of the
+stall state is tracked separately and exported in the "full" averages.
+
+The ratios are tracked as recent trends over ten, sixty, and three
+hundred second windows, which gives insight into short term events as
+well as medium and long term trends. The total absolute stall time is
+tracked and exported as well, to allow detection of latency spikes
+which wouldn't necessarily make a dent in the time averages, or to
+average trends over custom time frames.
+
+Monitoring for pressure thresholds
+==================================
+
+Users can register triggers and use poll() to be woken up when resource
+pressure exceeds certain thresholds.
+
+A trigger describes the maximum cumulative stall time over a specific
+time window, e.g. 100ms of total stall time within any 500ms window to
+generate a wakeup event.
+
+To register a trigger user has to open psi interface file under
+/proc/pressure/ representing the resource to be monitored and write the
+desired threshold and time window. The open file descriptor should be
+used to wait for trigger events using select(), poll() or epoll().
+The following format is used:
+
+<some|full> <stall amount in us> <time window in us>
+
+For example writing "some 150000 1000000" into /proc/pressure/memory
+would add 150ms threshold for partial memory stall measured within
+1sec time window. Writing "full 50000 1000000" into /proc/pressure/io
+would add 50ms threshold for full io stall measured within 1sec time window.
+
+Triggers can be set on more than one psi metric and more than one trigger
+for the same psi metric can be specified. However for each trigger a separate
+file descriptor is required to be able to poll it separately from others,
+therefore for each trigger a separate open() syscall should be made even
+when opening the same psi interface file.
+
+Monitors activate only when system enters stall state for the monitored
+psi metric and deactivates upon exit from the stall state. While system is
+in the stall state psi signal growth is monitored at a rate of 10 times per
+tracking window.
+
+The kernel accepts window sizes ranging from 500ms to 10s, therefore min
+monitoring update interval is 50ms and max is 1s. Min limit is set to
+prevent overly frequent polling. Max limit is chosen as a high enough number
+after which monitors are most likely not needed and psi averages can be used
+instead.
+
+When activated, psi monitor stays active for at least the duration of one
+tracking window to avoid repeated activations/deactivations when system is
+bouncing in and out of the stall state.
+
+Notifications to the userspace are rate-limited to one per tracking window.
+
+The trigger will de-register when the file descriptor used to define the
+trigger  is closed.
+
+Userspace monitor usage example
+===============================
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <poll.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Monitor memory partial stall with 1s tracking window size
+ * and 150ms threshold.
+ */
+int main() {
+	const char trig[] = "some 150000 1000000";
+	struct pollfd fds;
+	int n;
+
+	fds.fd = open("/proc/pressure/memory", O_RDWR | O_NONBLOCK);
+	if (fds.fd < 0) {
+		printf("/proc/pressure/memory open error: %s\n",
+			strerror(errno));
+		return 1;
+	}
+	fds.events = POLLPRI;
+
+	if (write(fds.fd, trig, strlen(trig) + 1) < 0) {
+		printf("/proc/pressure/memory write error: %s\n",
+			strerror(errno));
+		return 1;
+	}
+
+	printf("waiting for events...\n");
+	while (1) {
+		n = poll(&fds, 1, -1);
+		if (n < 0) {
+			printf("poll error: %s\n", strerror(errno));
+			return 1;
+		}
+		if (fds.revents & POLLERR) {
+			printf("got POLLERR, event source is gone\n");
+			return 0;
+		}
+		if (fds.revents & POLLPRI) {
+			printf("event triggered!\n");
+		} else {
+			printf("unknown event received: 0x%x\n", fds.revents);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+Cgroup2 interface
+=================
+
+In a system with a CONFIG_CGROUP=y kernel and the cgroup2 filesystem
+mounted, pressure stall information is also tracked for tasks grouped
+into cgroups. Each subdirectory in the cgroupfs mountpoint contains
+cpu.pressure, memory.pressure, and io.pressure files; the format is
+the same as the /proc/pressure/ files.
+
+Per-cgroup psi monitors can be specified and used the same way as
+system-wide ones.
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 184193b..52d093b 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -966,6 +966,12 @@
 	$PERIOD duration.  "max" for $MAX indicates no limit.  If only
 	one number is written, $MAX is updated.
 
+  cpu.pressure
+	A read-only nested-key file which exists on non-root cgroups.
+
+	Shows pressure stall information for CPU. See
+	Documentation/accounting/psi.txt for details.
+
 
 Memory
 ------
@@ -1271,6 +1277,12 @@
 	higher than the limit for an extended period of time.  This
 	reduces the impact on the workload and memory management.
 
+  memory.pressure
+	A read-only nested-key file which exists on non-root cgroups.
+
+	Shows pressure stall information for memory. See
+	Documentation/accounting/psi.txt for details.
+
 
 Usage Guidelines
 ~~~~~~~~~~~~~~~~
@@ -1408,6 +1420,12 @@
 
 	  8:16 rbps=2097152 wbps=max riops=max wiops=max
 
+  io.pressure
+	A read-only nested-key file which exists on non-root cgroups.
+
+	Shows pressure stall information for IO. See
+	Documentation/accounting/psi.txt for details.
+
 
 Writeback
 ~~~~~~~~~
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1e3f86d..0a55b3e 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2080,6 +2080,9 @@
 			off
 				Disables hypervisor mitigations and doesn't
 				emit any warnings.
+				It also drops the swap size and available
+				RAM limit restriction on both hypervisor and
+				bare metal.
 
 			Default is 'flush'.
 
@@ -3487,6 +3490,10 @@
 			before loading.
 			See Documentation/blockdev/ramdisk.txt.
 
+	psi=		[KNL] Enable or disable pressure stall information
+			tracking.
+			Format: <bool>
+
 	psmouse.proto=	[HW,MOUSE] Highest PS2 mouse protocol extension to
 			probe for; one of (bare|imps|exps|lifebook|any).
 	psmouse.rate=	[HW,MOUSE] Set desired mouse report rate, in reports
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
index bae52b84..9f5924f 100644
--- a/Documentation/admin-guide/l1tf.rst
+++ b/Documentation/admin-guide/l1tf.rst
@@ -405,6 +405,9 @@
 
   off		Disables hypervisor mitigations and doesn't emit any
 		warnings.
+		It also drops the swap size and available RAM limit restrictions
+		on both hypervisor and bare metal.
+
   ============  =============================================================
 
 The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
@@ -576,7 +579,8 @@
   The kernel default mitigations for vulnerable processors are:
 
   - PTE inversion to protect against malicious user space. This is done
-    unconditionally and cannot be controlled.
+    unconditionally and cannot be controlled. The swap storage is limited
+    to ~16TB.
 
   - L1D conditional flushing on VMENTER when EPT is enabled for
     a guest.
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 875b2b5..6e5c2bb 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -156,19 +156,23 @@
 A brief description of exported device attributes. For more details please
 read Documentation/ABI/testing/sysfs-block-zram.
 
-Name            access            description
-----            ------            -----------
-disksize          RW    show and set the device's disk size
-initstate         RO    shows the initialization state of the device
-reset             WO    trigger device reset
-mem_used_max      WO    reset the `mem_used_max' counter (see later)
-mem_limit         WO    specifies the maximum amount of memory ZRAM can use
-                        to store the compressed data
-max_comp_streams  RW    the number of possible concurrent compress operations
-comp_algorithm    RW    show and change the compression algorithm
-compact           WO    trigger memory compaction
-debug_stat        RO    this file is used for zram debugging purposes
-backing_dev	  RW	set up backend storage for zram to write out
+Name            	access            description
+----            	------            -----------
+disksize          	RW	show and set the device's disk size
+initstate         	RO	shows the initialization state of the device
+reset             	WO	trigger device reset
+mem_used_max      	WO	reset the `mem_used_max' counter (see later)
+mem_limit         	WO	specifies the maximum amount of memory ZRAM can use
+				to store the compressed data
+writeback_limit   	WO	specifies the maximum amount of write IO zram can
+				write out to backing device as 4KB unit
+writeback_limit_enable  RW	show and set writeback_limit feature
+max_comp_streams  	RW	the number of possible concurrent compress operations
+comp_algorithm    	RW	show and change the compression algorithm
+compact           	WO	trigger memory compaction
+debug_stat        	RO	this file is used for zram debugging purposes
+backing_dev	  	RW	set up backend storage for zram to write out
+idle		  	WO	mark allocated slot as idle
 
 
 User space is advised to use the following files to read the device statistics.
@@ -220,6 +224,17 @@
  pages_compacted  the number of pages freed during compaction
  huge_pages	  the number of incompressible pages
 
+File /sys/block/zram<id>/bd_stat
+
+The stat file represents device's backing device statistics. It consists of
+a single line of text and contains the following stats separated by whitespace:
+ bd_count	size of data written in backing device.
+		Unit: 4K bytes
+ bd_reads	the number of reads from backing device
+		Unit: 4K bytes
+ bd_writes	the number of writes to backing device
+		Unit: 4K bytes
+
 9) Deactivate:
 	swapoff /dev/zram0
 	umount /dev/zram1
@@ -237,11 +252,79 @@
 
 = writeback
 
-With incompressible pages, there is no memory saving with zram.
-Instead, with CONFIG_ZRAM_WRITEBACK, zram can write incompressible page
+With CONFIG_ZRAM_WRITEBACK, zram can write idle/incompressible page
 to backing storage rather than keeping it in memory.
-User should set up backing device via /sys/block/zramX/backing_dev
-before disksize setting.
+To use the feature, admin should set up backing device via
+
+	"echo /dev/sda5 > /sys/block/zramX/backing_dev"
+
+before disksize setting. It supports only partition at this moment.
+If admin want to use incompressible page writeback, they could do via
+
+	"echo huge > /sys/block/zramX/write"
+
+To use idle page writeback, first, user need to declare zram pages
+as idle.
+
+	"echo all > /sys/block/zramX/idle"
+
+From now on, any pages on zram are idle pages. The idle mark
+will be removed until someone request access of the block.
+IOW, unless there is access request, those pages are still idle pages.
+
+Admin can request writeback of those idle pages at right timing via
+
+	"echo idle > /sys/block/zramX/writeback"
+
+With the command, zram writeback idle pages from memory to the storage.
+
+If there are lots of write IO with flash device, potentially, it has
+flash wearout problem so that admin needs to design write limitation
+to guarantee storage health for entire product life.
+
+To overcome the concern, zram supports "writeback_limit" feature.
+The "writeback_limit_enable"'s default value is 0 so that it doesn't limit
+any writeback. IOW, if admin want to apply writeback budget, he should
+enable writeback_limit_enable via
+
+	$ echo 1 > /sys/block/zramX/writeback_limit_enable
+
+Once writeback_limit_enable is set, zram doesn't allow any writeback
+until admin set the budget via /sys/block/zramX/writeback_limit.
+
+(If admin doesn't enable writeback_limit_enable, writeback_limit's value
+assigned via /sys/block/zramX/writeback_limit is meaninless.)
+
+If admin want to limit writeback as per-day 400M, he could do it
+like below.
+
+	$ MB_SHIFT=20
+	$ 4K_SHIFT=12
+	$ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
+		/sys/block/zram0/writeback_limit.
+	$ echo 1 > /sys/block/zram0/writeback_limit_enable
+
+If admin want to allow further write again once the bugdet is exausted,
+he could do it like below
+
+	$ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
+		/sys/block/zram0/writeback_limit
+
+If admin want to see remaining writeback budget since he set,
+
+	$ cat /sys/block/zramX/writeback_limit
+
+If admin want to disable writeback limit, he could do
+
+	$ echo 0 > /sys/block/zramX/writeback_limit_enable
+
+The writeback_limit count will reset whenever you reset zram(e.g.,
+system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of
+writeback happened until you reset the zram to allocate extra writeback
+budget in next setting is user's job.
+
+If admin want to measure writeback count in a certain period, he could
+know it via /sys/block/zram0/bd_stat's 3rd column.
 
 = memory tracking
 
@@ -251,16 +334,17 @@
 If you enable the feature, you could see block state via
 /sys/kernel/debug/zram/zram0/block_state". The output is as follows,
 
-	  300    75.033841 .wh
-	  301    63.806904 s..
-	  302    63.806919 ..h
+	  300    75.033841 .wh.
+	  301    63.806904 s...
+	  302    63.806919 ..hi
 
 First column is zram's block index.
 Second column is access time since the system was booted
 Third column is state of the block.
 (s: same page
 w: written page to backing store
-h: huge page)
+h: huge page
+i: idle page)
 
 First line of above example says 300th block is accessed at 75.033841sec
 and the block's state is huge so it is written back to the backing
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index d34a63c..2577aee 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -102,6 +102,10 @@
 * Optional properties for all components:
 	* reg-names: names corresponding to each reg property value.
 
+	* qcom,proxy-regs: List of regulators required.
+
+	* qcom,proxy-clks: List of additional clocks required.
+
 * Optional properties for ETM/PTMs:
 
 	* arm,cp14: must be present if the system accesses ETM/PTM management
@@ -165,9 +169,8 @@
 	* qcom,msr-fix-req: boolean, indicating if MSRs need to be programmed
 	  after enabling the subunit.
 
-	* qcom,tpdm-clks: List of additional clocks required.
-
-	* qcom,tpdm-regs: List of regulators required.
+	* qcom,hw-enable-check: Check if the tpdm need to be probed as some tpdms
+	  are not enabled in secure device.
 
 * Optional properties for CSRs:
 
diff --git a/Documentation/devicetree/bindings/arm/msm/android.txt b/Documentation/devicetree/bindings/arm/msm/android.txt
index 7b8b790..32e418f 100644
--- a/Documentation/devicetree/bindings/arm/msm/android.txt
+++ b/Documentation/devicetree/bindings/arm/msm/android.txt
@@ -53,6 +53,38 @@
 		};
 	};
 
+odm:
+-----------------
+
+odm partition specification.
+
+Required properties:
+
+-compatible: "android, odm"
+-dev: block device corresponding to odm partition
+-type: file system type of odm partition
+-mnt_flags: mount flags
+-fsmgr_flags: fsmgr flags
+
+Example:
+
+       firmware: firmware {
+               android {
+                       compatible = "android,firmware";
+                       fstab {
+                               compatible = "android,fstab";
+                               odm {
+                                       compatible = "android,odm";
+                                       dev = "/dev/block/platform/soc/1da4000.ufshc/by-name/odm";
+                                       type = "ext4";
+                                       mnt_flags = "ro,barrier=1,discard";
+                                       fsmgr_flags = "wait,slotselect";
+                                       status = "ok";
+                               };
+                       };
+               };
+       };
+
 system:
 -----------------
 
diff --git a/Documentation/devicetree/bindings/arm/msm/proxy-client.txt b/Documentation/devicetree/bindings/arm/msm/proxy-client.txt
new file mode 100644
index 0000000..29cfaf9
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/proxy-client.txt
@@ -0,0 +1,34 @@
+Bus Proxy Client Bindings
+
+Bus proxy client provides means to cast proxy bandwidth votes during bootup
+which is removed at the end of boot. This feature can be used in situations
+where a shared resource can be scaled between several possible perfomance
+levels and hardware requires that it be at a high level at the beginning of
+boot before the client has probed and voted for required bandwidth.
+
+Required properties:
+- compatible:			Must be "qcom,bus-proxy-client".
+
+Optional properties:
+- qcom,msm-bus,name:		String representing the client-name.
+- qcom,msm-bus,num-cases:	Total number of usecases.
+- qcom,msm-bus,active-only:	Boolean context flag for requests in active or
+				dual (active & sleep) contex.
+- qcom,msm-bus,num-paths:	Total number of master-slave pairs.
+- qcom,msm-bus,vectors-KBps:	Arrays of unsigned integers representing:
+				master-id, slave-id, arbitrated bandwidth
+				in KBps, instantaneous bandwidth in KBps.
+
+Example:
+
+	qcom,proxy-client {
+		compatible = "qcom,bus-proxy-client";
+		qcom,msm-bus,name = "proxy_client";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,vectors-KBps =
+			<22 512 0 0>, <23 512 0 0>,
+			<22 512 0 6400000>, <23 512 0 6400000>,
+			<22 512 0 6400000>, <23 512 0 6400000>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
index 1248478..940c12b 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -11,7 +11,7 @@
 - compatible:
 	Usage: required
 	Value type: <string>
-	Definition: must be "qcom,kona-llcc" or "qcom,lito-llcc"
+	Definition: must be "qcom,llcc-v1" or "qcom,llcc-v2"
 
 - reg:
 	Usage: required
@@ -41,7 +41,7 @@
 Example:
 
 	cache-controller@9200000 {
-		compatible = "qcom,kona-llcc";
+		compatible = "qcom,llcc-v2";
 		reg = <0x9200000 0x200000> <0x9600000 0x50000>;
 		reg-names = "llcc_base", "llcc_broadcast_base";
 		interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt
new file mode 100644
index 0000000..d82d521
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt
@@ -0,0 +1,19 @@
+Qualcomm Technologies, Inc. SMSM Point-to-Point (SMP2P) Sleepstate driver
+
+Required properties:
+-compatible : should be one of the following:
+- "qcom,smp2p-sleepstate"
+-qcom,smem-states : the relevant outgoing smp2p entry
+- interrupt-parent: specifies the phandle to the parent interrupt controller
+  this one is cascaded from
+- interrupts: specifies the interrupt number, the irq line to be used
+- interrupt-names: Interrupt name string, must be "smp2p-sleepstate-in"
+
+Example:
+qcom,smp2p_sleepstate {
+	compatible = "qcom,smp2p-sleepstate";
+	qcom,smem-states = <&sleepstate_smp2p_out 0>;
+	interrupt-parent = <&sleepstate_smp2p_in>;
+	interrupts = <0 0>;
+	interrupt-names = "smp2p-sleepstate-in";
+};
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index 23e4bd5..acc1915 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -9,11 +9,6 @@
 Required properties:
 
 - compatible: "qcom,wil6210"
-- qcom,smmu-support: Boolean flag indicating whether PCIe has SMMU support
-- qcom,smmu-s1-en: Boolean flag indicating whether SMMU stage1 should be enabled
-- qcom,smmu-fast-map: Boolean flag indicating whether SMMU fast mapping should be enabled
-- qcom,smmu-coherent: Boolean flag indicating SMMU dma and page table coherency
-- qcom,smmu-mapping: specifies the base address and size of SMMU space
 - qcom,pcie-parent: phandle for the PCIe root complex to which 11ad card is connected
 - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
   the below optional properties:
@@ -29,6 +24,7 @@
 - qcom,use-ext-supply: Boolean flag to indicate if 11ad SIP uses external power supply
 - vdd-supply: phandle to 11ad VDD regulator node
 - vddio-supply: phandle to 11ad VDDIO regulator node
+- vdd-ldo-supply: phandle to 11ad VDD LDO regulator node
 - qcom,use-ext-clocks: Boolean flag to indicate if 11ad SIP uses external clocks
 - clocks	    : List of phandle and clock specifier pairs
 - clock-names       : List of clock input name strings sorted in the same
@@ -39,11 +35,6 @@
 Example:
 	wil6210: qcom,wil6210 {
 		compatible = "qcom,wil6210";
-		qcom,smmu-support;
-		qcom,smmu-s1-en;
-		qcom,smmu-fast-map;
-		qcom,smmu-coherent;
-		qcom,smmu-mapping = <0x20000000 0xe0000000>;
 		qcom,pcie-parent = <&pcie1>;
 		qcom,wigig-en = <&tlmm 94 0>;
 		qcom,wigig-dc = <&tlmm 81 0>;
@@ -56,6 +47,7 @@
 		qcom,use-ext-supply;
 		vdd-supply= <&pm8998_s7>;
 		vddio-supply= <&pm8998_s5>;
+		vdd-ldo-supply = <&pm8150_l15>;
 		qcom,use-ext-clocks;
 		clocks = <&clock_gcc clk_rf_clk3>,
 			 <&clock_gcc clk_rf_clk3_pin>;
@@ -63,3 +55,32 @@
 		qcom,keep-radio-on-during-sleep;
 	};
 
+Wil6210 client node under PCIe RP node needed for SMMU initialization by
+PCI framework when devices are discovered.
+
+Required properties:
+
+- qcom,iommu-dma-addr-pool: specifies the base address and size of SMMU space
+- qcom,iommu-dma: define the SMMU mode - bypass/fastmap/disabled
+- qcom,iommu-pagetable: indicating SMMU dma and page table coherency
+
+Example:
+&pcie1_rp {
+	#address-cells = <5>;
+	#size-cells = <0>;
+
+	wil6210_pci: wil6210_pci {
+		reg = <0 0 0 0 0>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		qcom,iommu-group = <&wil6210_pci_iommu_group>;
+
+		wil6210_pci_iommu_group: wil6210_pci_iommu_group {
+				qcom,iommu-dma-addr-pool = <0x20000000 0xe0000000>;
+				qcom,iommu-dma = "fastmap";
+				qcom,iommu-pagetable = "coherent";
+		};
+       };
+};
diff --git a/Documentation/devicetree/bindings/batterydata/batterydata.txt b/Documentation/devicetree/bindings/batterydata/batterydata.txt
index 3bd1122..b3d1adc 100644
--- a/Documentation/devicetree/bindings/batterydata/batterydata.txt
+++ b/Documentation/devicetree/bindings/batterydata/batterydata.txt
@@ -120,13 +120,28 @@
 			The threshold values in range should be in ascending
 			and shouldn't overlap. It support 8 ranges at max.
 - qcom,jeita-soft-thresholds: A tuple entry to specify ADC code for battery's soft JEITA
-				threshold.
-				<SOFT_COLD_ADC_CODE, SOFT_HOT_ADC_CODE>.
+			threshold. <SOFT_COLD_ADC_CODE, SOFT_HOT_ADC_CODE>.
 - qcom,jeita-hard-thresholds: A tuple entry to specify ADC code for battery's hard JEITA
-				threshold.
-				<HARD_COLD_ADC_CODE, HARD_HOT_ADC_CODE>.
+			threshold. <HARD_COLD_ADC_CODE, HARD_HOT_ADC_CODE>.
+- qcom,jeita-soft-hys-thresholds: A tuple entry to specify ADC code for battery's soft JEITA
+			threshold with hysteresis adjustment.
+			<SOFT_COLD_ADC_CODE, SOFT_HOT_ADC_CODE>.
+			These "hysteresis" values should be specified if
+			"qcom,jeita-soft-thresholds" are specified. Without which SW JEITA
+			compensation won't function properly.
+- qcom,jeita-soft-fcc-ua: A tuple entry to specify the values of Fast
+			charging current (in uA) that needs to be applied during
+			soft JEITA conditions (cool/warm).
+			Element 0 - FCC value for soft cool.
+			Element 1 - FCC value for soft warm.
+- qcom,jeita-soft-fv-uv: A tuple entry to specify the values of Float
+			voltage (in uV) that needs to be applied during soft
+			JEITA conditions (cool/warm).
+			Element 0 - FV value for soft cool.
+			Element 1 - FV value for soft warm.
 - qcom,batt-age-level:  Battery age level. This is used only when multiple
 			profile loading is supported.
+
 Profile data node required subnodes:
 - qcom,fcc-temp-lut : An 1-dimensional lookup table node that encodes
 			temperature to fcc lookup. The units for this lookup
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
index 51c13d1..7fb8e3c 100644
--- a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
@@ -6,6 +6,7 @@
 - compatible : Shall contain one of the following:
 		"qcom,kona-dispcc",
 		"qcom,sdm845-dispcc"
+		"qcom,lito-dispcc"
 - reg : shall contain base register location and length.
 - vdd_mm-supply: phandle to the MM_CX rail that needs to be voted on behalf
 of the clocks.
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
index f58755c..58c4e29 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
@@ -3,7 +3,8 @@
 
 Required properties :
 - compatible: shall contain one of the following:
-		"qcom,gpucc-kona".
+		"qcom,gpucc-kona"
+		"qcom,gpucc-lito".
 - reg: shall contain base register offset and size.
 - reg-names: names of registers listed in the same order as in the reg property.
 		Must contain "cc_base".
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
index 4f91bba..3283ff0 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
@@ -9,6 +9,7 @@
 - compatible : Shall contain one of the following:
 		"qcom,kona-rpmh-clk",
 		"qcom,sdm845-rpmh-clk"
+		"qcom,lito-rpmh-clk"
 
 - #clock-cells : must contain 1
 
diff --git a/Documentation/devicetree/bindings/display/msm/mdss-pll.txt b/Documentation/devicetree/bindings/display/msm/mdss-pll.txt
index c3f0ca7..5f62830 100644
--- a/Documentation/devicetree/bindings/display/msm/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdss-pll.txt
@@ -20,7 +20,7 @@
                         "qcom,mdss_dsi_pll_7nm",   "qcom,mdss_dp_pll_7nm",
 			"qcom,mdss_dsi_pll_28lpm", "qcom,mdss_dsi_pll_14nm",
 			"qcom,mdss_dp_pll_14nm", "qcom,mdss_dsi_pll_7nm_v2",
-			"qcom,mdss_hdmi_pll_28lpm"
+			"qcom,mdss_hdmi_pll_28lpm","qcom,mdss_dsi_pll_7nm_v4_1"
 - cell-index:		Specifies the controller used
 - reg:			offset and length of the register set for the device.
 - reg-names :		names to refer to register sets related to this device
diff --git a/Documentation/devicetree/bindings/display/msm/sde-dp.txt b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
index a17b738..7881230 100644
--- a/Documentation/devicetree/bindings/display/msm/sde-dp.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
@@ -100,11 +100,14 @@
 - compatible:			Must be "qcom,msm-ext-disp"
 - qcom,dp-low-power-hw-hpd:	Low power hardware HPD feature enable control node
 - qcom,phy-version:		Phy version
+- qcom,pn-swap-lane-map:	P/N swap configuration of each lane
 - pinctrl-names:		List of names to assign mdss pin states defined in pinctrl device node
 				Refer to pinctrl-bindings.txt
 - pinctrl-<0..n>:		Lists phandles each pointing to the pin configuration node within a pin
 				controller. These pin configurations are installed in the pinctrl
 				device node. Refer to pinctrl-bindings.txt
+- qcom,max-lclk-frequency-khz:	An integer specifying the max. link clock in KHz supported by Display Port.
+- qcom,mst-fixed-topology-ports: u32 values of which MST output port to reserve, start from one
 
 [Optional child nodes]: These nodes are for devices which are
 dependent on msm_ext_disp. If msm_ext_disp is disabled then
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 99de140..a3f21d0 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -312,6 +312,13 @@
 				defined in reg property.
 - qcom,sde-reg-dma-broadcast-disabled: Boolean property to indicate if broadcast
 				functionality in the register dma hardware block should be used.
+- qcom,sde-reg-dma-xin-id:	VBIF clients id (xin) corresponding
+				to the LUTDMA block.
+- qcom,sde-reg-dma-clk-ctrl:	Array of 2 cell property describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register.
 - qcom,sde-dram-channels:	This represents the number of channels in the
 				Bus memory controller.
 - qcom,sde-num-nrt-paths:	Integer property represents the number of non-realtime
@@ -371,6 +378,10 @@
 				priority for realtime clients.
 - qcom,sde-vbif-qos-nrt-remap:	This array is used to program vbif qos remapper register
 				priority for non-realtime clients.
+- qcom,sde-vbif-qos-cwb-remap:	This array is used to program vbif qos remapper register
+				priority for concurrent writeback clients.
+- qcom,sde-vbif-qos-lutdma-remap:	This array is used to program vbif qos remapper register
+				priority for lutdma client.
 - qcom,sde-danger-lut:		Array of 5 cell property, with a format of
 				<linear, tile, nrt, cwb, tile-qseed>,
 				indicating the danger luts on sspp.
@@ -733,6 +744,15 @@
 
     qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
     qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+    qcom,sde-vbif-qos-cwb-remap = <3 3 4 4 5 5 6 3>;
+    qcom,sde-vbif-qos-lutdma-remap = <3 3 3 3 4 4 4 4>;
+
+    qcom,sde-reg-dma-off = <0>;
+    qcom,sde-reg-dma-version = <0x00010002>;
+    qcom,sde-reg-dma-trigger-off = <0x119c>;
+    qcom,sde-reg-dma-broadcast-disabled = <0>;
+    qcom,sde-reg-dma-xin-id = <7>;
+    qcom,sde-reg-dma-clk-ctrl = <0x2bc 20>;
 
     qcom,sde-sspp-vig-blocks {
         qcom,sde-vig-csc-off = <0x320>;
diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt
index aededdb..f9a7c98 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.txt
+++ b/Documentation/devicetree/bindings/eeprom/at24.txt
@@ -27,6 +27,7 @@
                 "atmel,24c256",
                 "atmel,24c512",
                 "atmel,24c1024",
+                "atmel,24c2048",
 
                 If <manufacturer> is not "atmel", then a fallback must be used
                 with the same <model> and "atmel" as manufacturer.
diff --git a/Documentation/devicetree/bindings/i3c/i3c.txt b/Documentation/devicetree/bindings/i3c/i3c.txt
new file mode 100644
index 0000000..ab729a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/i3c.txt
@@ -0,0 +1,138 @@
+Generic device tree bindings for I3C busses
+===========================================
+
+This document describes generic bindings that should be used to describe I3C
+busses in a device tree.
+
+Required properties
+-------------------
+
+- #address-cells  - should be <3>. Read more about addresses below.
+- #size-cells     - should be <0>.
+- compatible      - name of the I3C master controller driving the I3C bus
+
+For other required properties e.g. to describe register sets,
+clocks, etc. check the binding documentation of the specific driver.
+The node describing an I3C bus should be named i3c-master.
+
+Optional properties
+-------------------
+
+These properties may not be supported by all I3C master drivers. Each I3C
+master bindings should specify which of them are supported.
+
+- i3c-scl-hz: frequency of the SCL signal used for I3C transfers.
+	      When undefined the core sets it to 12.5MHz.
+
+- i2c-scl-hz: frequency of the SCL signal used for I2C transfers.
+	      When undefined, the core looks at LVR (Legacy Virtual Register)
+	      values of I2C devices described in the device tree to determine
+	      the maximum I2C frequency.
+
+I2C devices
+===========
+
+Each I2C device connected to the bus should be described in a subnode. All
+properties described in Documentation/devicetree/bindings/i2c/i2c.txt are
+valid here, but several new properties have been added.
+
+New constraint on existing properties:
+--------------------------------------
+- reg: contains 3 cells
+  + first cell : still encoding the I2C address
+
+  + second cell: shall be 0
+
+  + third cell: shall encode the I3C LVR (Legacy Virtual Register)
+	bit[31:8]: unused/ignored
+	bit[7:5]: I2C device index. Possible values
+	* 0: I2C device has a 50 ns spike filter
+	* 1: I2C device does not have a 50 ns spike filter but supports high
+	     frequency on SCL
+	* 2: I2C device does not have a 50 ns spike filter and is not tolerant
+	     to high frequencies
+	* 3-7: reserved
+
+	bit[4]: tell whether the device operates in FM (Fast Mode) or FM+ mode
+	* 0: FM+ mode
+	* 1: FM mode
+
+	bit[3:0]: device type
+	* 0-15: reserved
+
+The I2C node unit-address should always match the first cell of the reg
+property: <device-type>@<i2c-address>.
+
+I3C devices
+===========
+
+All I3C devices are supposed to support DAA (Dynamic Address Assignment), and
+are thus discoverable. So, by default, I3C devices do not have to be described
+in the device tree.
+This being said, one might want to attach extra resources to these devices,
+and those resources may have to be described in the device tree, which in turn
+means we have to describe I3C devices.
+
+Another use case for describing an I3C device in the device tree is when this
+I3C device has a static I2C address and we want to assign it a specific I3C
+dynamic address before the DAA takes place (so that other devices on the bus
+can't take this dynamic address).
+
+The I3C device should be names <device-type>@<static-i2c-address>,<i3c-pid>,
+where device-type is describing the type of device connected on the bus
+(gpio-controller, sensor, ...).
+
+Required properties
+-------------------
+- reg: contains 3 cells
+  + first cell : encodes the static I2C address. Should be 0 if the device does
+		 not have one (0 is not a valid I2C address).
+
+  + second and third cells: should encode the ProvisionalID. The second cell
+			    contains the manufacturer ID left-shifted by 1.
+			    The third cell contains ORing of the part ID
+			    left-shifted by 16, the instance ID left-shifted
+			    by 12 and the extra information. This encoding is
+			    following the PID definition provided by the I3C
+			    specification.
+
+Optional properties
+-------------------
+- assigned-address: dynamic address to be assigned to this device. This
+		    property is only valid if the I3C device has a static
+		    address (first cell of the reg property != 0).
+
+
+Example:
+
+	i3c-master@d040000 {
+		compatible = "cdns,i3c-master";
+		clocks = <&coreclock>, <&i3csysclock>;
+		clock-names = "pclk", "sysclk";
+		interrupts = <3 0>;
+		reg = <0x0d040000 0x1000>;
+		#address-cells = <3>;
+		#size-cells = <0>;
+		i2c-scl-hz = <100000>;
+
+		/* I2C device. */
+		nunchuk: nunchuk@52 {
+			compatible = "nintendo,nunchuk";
+			reg = <0x52 0x0 0x10>;
+		};
+
+		/* I3C device with a static I2C address. */
+		thermal_sensor: sensor@68,39200144004 {
+			reg = <0x68 0x392 0x144004>;
+			assigned-address = <0xa>;
+		};
+
+		/*
+		 * I3C device without a static I2C address but requiring
+		 * resources described in the DT.
+		 */
+		sensor@0,39200154004 {
+			reg = <0x0 0x392 0x154004>;
+			clocks = <&clock_provider 0>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/i3c/qcom,geni-i3c.txt b/Documentation/devicetree/bindings/i3c/qcom,geni-i3c.txt
new file mode 100644
index 0000000..59425ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/qcom,geni-i3c.txt
@@ -0,0 +1,48 @@
+Qualcomm Technologies, Inc. GENI I3C master block
+
+Generic bindings document for GENI I3C master controller driver.
+
+Required properties:
+- compatible: shall be "qcom,geni-i3c".
+- clocks: shall reference the se clock.
+- clock-names: shall contain clock name corresponding to the serial engine.
+- interrupts: the interrupt line connected to this I3C master.
+- reg: I3C master registers.
+- qcom,wrapper-core: Wrapper QUPv3 core containing this I3C controller.
+
+Optional properties:
+- se-clock-frequency: Source serial clock frequency to use.
+- dfs-index: Dynamic frequency scaling table index to use.
+
+Mandatory properties defined by the generic binding (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details):
+
+- #address-cells: shall be set to 3.
+- #size-cells: shall be set to 0.
+
+Optional properties defined by the generic binding (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details):
+
+- i2c-scl-hz: frequency for i2c transfers.
+- i3c-scl-hz: frequency for i3c transfers.
+
+I3C device connected on the bus follow the generic description (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details).
+
+Example:
+        i3c0: i3c@980000 {
+		compatible = "qcom,geni-i3c";
+		reg = <0x980000 0x4000>,
+			<0xec30000 0x10000>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se0_i3c_active>;
+		pinctrl-1 = <&qupv3_se0_i3c_sleep>;
+		interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <3>;
+		#size-cells = <0>;
+		qcom,wrapper-core = <&qupv3_0>;
+	};
diff --git a/Documentation/devicetree/bindings/input/qti-haptics.txt b/Documentation/devicetree/bindings/input/qti-haptics.txt
index 78c674a..b86bae9 100644
--- a/Documentation/devicetree/bindings/input/qti-haptics.txt
+++ b/Documentation/devicetree/bindings/input/qti-haptics.txt
@@ -59,19 +59,6 @@
 		specified in the LRA actuator datasheet. Allowed values are:
 		0 to 20475. If this is not specified, 5715us play rate is used.
 
-- qcom,external-waveform-source
-  Usage:      optional
-  Value type: <string>
-  Definition: The haptics module supports to play with internal constant
-		Vmax strength or play with patterns specified in its internal
-		8-bytes waveform buffer. It can also play with the audio
-		LINE-IN signal or PWM waveform coming from LINE-IN/PWM pin.
-		This property specify the kind of the waveform resources
-		on the LINE-IN/PWM pins. Allowed values are: "audio", "pwm".
-		If this is not specified, internal signals (Vmax or buffer)
-		will be selected according to the requriement of the playing
-		waveforms.
-
 - vdd-supply
   Usage:      optional
   Value type: <phandle>
@@ -114,7 +101,7 @@
 		notification event.
 
 - qcom,wf-pattern
-  Usage:      required
+  Usage:      optional
   Value type: <prop-encoded-array>
   Definition: Specifies the waveform pattern in a byte array that will be
 		played for the effect-id. The bit fields of each byte are:
@@ -172,6 +159,19 @@
   Definition: If specified, the hardware feature of LRA auto resonance detection
 		is disabled.
 
+- qcom,wf-line-in-audio
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag to indicate if the effect is playing the audio signal
+		comes into LINE-IN pin. If this is specified, the pattern
+		specified in "qcom,wf-pattern" will be ignored.
+
+- qcom,wf-line-in-pwm
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag to indicate if the effect is playing the PWM signal
+		comes into LINE-IN pin. If this is specified, the pattern
+		specified in "qcom,wf-pattern" will be ignored.
 Example:
   qcom,haptics@c000 {
 	compatible = "qcom,haptics";
@@ -200,4 +200,10 @@
 		qcom,wf-play-rate-us = <6250>;
 		qcom,wf-pattern = [7e 7e 7e];
 	};
+
+	wf_6 {
+		/* RINGTONE_x effect */
+		qcom,effect-id = <6>;
+		qcom,wf-line-in-audio;
+	};
   };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 9c7181b..c14f9ba 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -138,6 +138,15 @@
                   clients who do not detach, it's not possible to keep regulator
                   vote while smmu is attached. Type is <u32>.
 
+- qcom,no-dynamic-asid:
+		  Clients that uses the dynamic domains will have an unique asid
+		  per each domain and all domains can share the same context bank.
+		  When ASID based invalidation is used, on some hardware revisions,
+		  as a result of multiple ASID's associated with the same context
+		  bank, TLB entries are not invalidated properly. On such systems,
+		  we can choose to have a single ASID associated with all domains
+		  for a context bank.
+
 - clocks        : List of clocks to be used during SMMU register access. See
                   Documentation/devicetree/bindings/clock/clock-bindings.txt
                   for information about the format. For each clock specified
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index f31ced7..2f948e8 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -120,17 +120,15 @@
   boot/dts/include/dt-bindings/msm/msm-bus-ids.h for list of acceptable slaves
 
 Optional properties:
-- qcom,bus-governor : governor to use when scaling bus, generally any commonly
-  found devfreq governor might be used.  In addition to those governors, the
-  custom Venus governors, "msm-vidc-ddr" or "msm-vidc-llcc" are also
-  acceptable values.
-  In the absence of this property the "performance" governor is used.
-- qcom,bus-rage-kbps : an array of two items (<min max>) that indicate the
+- qcom,bus-range-kbps : an array of two items (<min max>) that indicate the
   minimum and maximum acceptable votes for the bus.
   In the absence of this property <0 INT_MAX> is used.
 - qcom,ubwc-10bit : UBWC 10 bit content has different bus requirements,
   this tag will be used to pick the appropriate bus as per the session profile
   as shown below in example.
+- qcom,mode : Type of BW calculations to use.
+		"performance" - Use highest valid BW vote.
+		"venus-ddr", "venus-llcc" - Calculate for DDR, LLCC path.
 
 Memory Heaps
 ============
diff --git a/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt b/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt
new file mode 100644
index 0000000..69debce
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies, Inc. IPC Router FIFO Transport
+
+Required properties:
+- compatible:	should be "qcom,ipcr-fifo-xprt"
+- reg:		the irq register to raise an interrupt
+- interrupts:	the receiving interrupt line
+- qcom,ipc-shm:	reference to shared memory phandle
+
+Example:
+
+	fifo_vipc_irq@176 {
+		compatible = "qcom,ipcr-fifo-xprt";
+		reg = <0x176>;
+		interrupts = <0x0 0x142 0x1>;
+		qcom,ipc-shm = <&ipc-shm>;
+	};
+
+	ipc-shm: shared-buffer@85af7000 {
+		compatible = "qcom,hypervisor-shared-memory";
+		phandle = <0x1e4>;
+		reg = <0x0 0x85af7000 0x0 0x9000>;
+		label = "ipc_shm";
+		qcom,tx-is-first;
+	};
+
diff --git a/Documentation/devicetree/bindings/pci/pci-msm.txt b/Documentation/devicetree/bindings/pci/pci-msm.txt
index e9d411a..362b19e 100644
--- a/Documentation/devicetree/bindings/pci/pci-msm.txt
+++ b/Documentation/devicetree/bindings/pci/pci-msm.txt
@@ -310,6 +310,11 @@
 	Value type: <u32>
 	Definition: Offset from PCIe PHY base to check if PCIe PHY status
 
+- qcom,phy-status-bit:
+	Usage: required
+	Value type: <u32>
+	Definition: BIT to check PCIe PHY status
+
 - qcom,phy-power-down-offset:
 	Usage: required
 	Value type: <u32>
@@ -468,6 +473,7 @@
 
 		qcom,pcie-phy-ver = <0x2101>; /* v2 version 1.01 */
 		qcom,phy-status-offset = <0x814>;
+		qcom,phy-status-bit = <6>;
 		qcom,phy-power-down-offset = <0x840>;
 		qcom,phy-sequence = <0x0840 0x03 0x0
 				0x0094 0x08 0x0
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,kona-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,kona-pinctrl.txt
index 03f1fd9..78d6e0d 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,kona-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,kona-pinctrl.txt
@@ -40,6 +40,11 @@
 	Definition: must be 2. Specifying the pin number and flags, as defined
 		    in <dt-bindings/gpio/gpio.h>
 
+- wakeup-parent:
+	Usage: optional
+	Value type: <phandle>
+	Definition: A phandle to the wakeup interrupt controller for the SoC.
+
 Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
 a general description of GPIO and interrupt bindings.
 
@@ -183,4 +188,5 @@
 		#gpio-cells = <2>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
+		wakeup-parent = <&pdc>;
 	};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lito-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,lito-pinctrl.txt
index 3fcfc54..c7acea3 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,lito-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,lito-pinctrl.txt
@@ -40,6 +40,11 @@
 	Definition: must be 2. Specifying the pin number and flags, as defined
 		    in <dt-bindings/gpio/gpio.h>
 
+- wakeup-parent:
+	Usage: optional
+	Value type: <phandle>
+	Definition: A phandle to the wakeup interrupt controller for the SoC.
+
 Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
 a general description of GPIO and interrupt bindings.
 
@@ -183,4 +188,5 @@
 		#gpio-cells = <2>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
+		wakeup-parent = <&pdc>;
 	};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
index 94500c5..465b39e 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
@@ -37,6 +37,18 @@
 		    The first cell will be used to define gpio number and the
 		    second denotes the flags for this gpio.
 
+- #qcom,slew-reg:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: Register base of the slew register and length.
+
+- #qcom,lpi-slew-offset-tbl:
+	Usage: optional
+	Value type: <u32-array>
+	Definition: Offset table that points to each pin's shift value
+		    position in bits in the slew register base for slew
+		    settings.
+
 Please refer to ../gpio/gpio.txt for general description of GPIO bindings.
 
 Please refer to pinctrl-bindings.txt in this directory for details of the
@@ -123,12 +135,18 @@
 	Value type: <u32>
 	Definition: Selects the drive strength for the specified pins.
 
+- slew-rate:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects the slew rate for the specified pins.
+
 Example:
 
 	lpi_tlmm: lpi_pinctrl@152c000 {
 		compatible = "qcom,lpi-pinctrl";
 		qcom,num-gpios = <32>;
 		reg = <0x152c000 0>;
+		qcom,slew-reg = <0x355a000 0x0>;
 		gpio-controller;
 		#gpio-cells = <2>;
 		qcom,lpi-offset-tbl = <0x00000010>, <0x00000020>,
@@ -142,6 +160,13 @@
 				<0x00000170>, <0x00000180>,
 				<0x00000190>, <0x00000200>,
 				<0x00000210>;
+		qcom,lpi-slew-offset-tbl = <0x00000000>, <0x00000002>,
+				<0x00000004>, <0x00000008>,
+				<0x0000000A>, <0x0000000C>,
+				<0x00000000>, <0x00000000>,
+				<0x00000000>, <0x00000000>,
+				<0x00000010>, <0x00000012>,
+				<0x00000000>, <0x00000000>;
 
 		hph_comp_active: hph_comp_active {
 			mux {
@@ -165,6 +190,7 @@
 			config {
 				pins = "gpio22";
 				qcom,drive-strength = <2>;
+				slew-rate = <1>;
 			};
 		};
 	};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index b439928..625e5d8 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -59,6 +59,8 @@
                                 a pipe reset via the IPA uC is required
 - qcom,ipa-wdi2:		Boolean context flag to indicate whether
 				using wdi-2.0 or not
+- qcom,ipa-wdi3-over-gsi:       Boolean context flag to indicate whether
+                                using wdi-3.0 or not
 - qcom,bandwidth-vote-for-ipa:	Boolean context flag to indicate whether
 				ipa clock voting is done by bandwidth
 				voting via msm-bus-scale driver or not
@@ -92,6 +94,20 @@
 				over pcie bus or not.
 - qcom,ipa-wdi2_over_gsi: Boolean context flag to indicate WDI2 offload over GSI
 				supported or not.
+- qcom,register-collection-on-crash: Boolean that controls IPA/GSI register
+				collection upon system crash (i.e. SSR).
+- qcom,testbus-collection-on-crash: Boolean that controls testbus register
+				collection upon system crash.
+- qcom,non-tn-collection-on-crash: Boolean to control a certain subset of IPA/GSI
+				register collection relative to an SSR.  Accessing
+				these registers can cause stalling, hence this
+				control.
+- qcom,entire-ipa-block-size: Complete size of the ipa block in which all
+				registers, collected upon crash, reside.
+- qcom,secure-debug-check-action: Drives secure memory debug check. Three values allowed:
+				0 (use scm call),
+				1 (override scm call as though it returned true), and
+				2 (override scm call as though it returned false)
 
 Optional properties:
 -qcom,ipa-pipe-mem: Specifies the base physical address and the
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa_mpm.txt b/Documentation/devicetree/bindings/platform/msm/ipa_mpm.txt
new file mode 100644
index 0000000..a32b320
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ipa_mpm.txt
@@ -0,0 +1,23 @@
+* Qualcomm Technologies, Inc. IPA MHI Prime Manager driver module
+
+This module enables IPA Modem to IPA  APQ communication using
+MHI Prime.
+
+Required properties:
+- compatible:		Must be "qcom,ipa-mpm".
+- qcom,mhi-chdb-base:	MHI channel doorbell base address in MMIO space.
+- qcom,mhi-erdb-base:	MHI event doorbell base address in MMIO space.
+
+Optional:
+- qcom,iova-mapping:	Start address and size of the carved IOVA space
+				dedicated for MHI control structures
+				(such as transfer rings, event rings, doorbells).
+				If not present, SMMU S1 is considered to be in bypass mode.
+
+Example:
+	ipa_mpm: qcom,ipa-mpm {
+		compatible = "qcom,ipa-mpm";
+		qcom,mhi-chdb-base = <0x40300300>;
+		qcom,mhi-erdb-base = <0x40300700>;
+		qcom,iova-mapping = <0x10000000 0x1FFFFFFF>;
+	}
diff --git a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
index 7da95f8..a42f491 100644
--- a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
+++ b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
@@ -13,6 +13,9 @@
 
 Optional properties:
 - qcom,iommu-s1-bypass:	Boolean flag to bypass IOMMU stage 1 translation.
+- qcom,msm-bus,num-paths: Number of paths to put vote for.
+- qcom,msm-bus,vectors-bus-ids: Master and slave Endpoint IDs for DDR
+				and Corex/2x paths.
 
 Optional subnodes:
 qcom,iommu_qupv3_geni_se_cb:	Child node representing the QUPV3 context
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
index 342787f..e37bbb7 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
@@ -52,6 +52,15 @@
 		    is not specified, then the default value used will be
 		    2812 mV.
 
+- qcom,fg-sys-min-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage threshold (in mV) which describes the system
+		    minimum voltage as per the hardware recommendation. This
+		    is not used for any configuration but only for calculating
+		    the available power. If this property is not specified,
+		    then the default value used is 2800 mV.
+
 - qcom,fg-sys-term-current
 	Usage:      optional
 	Value type: <u32>
@@ -419,6 +428,37 @@
 		    multiple battery profiles to be specified for a battery for
 		    proper functionality.
 
+- qcom,soc-hi-res
+	Usage:      optional
+	Value type: <empty>
+	Definition: A boolean property that when specified shows high
+		    resolution of monotonic SOC under CAPACITY_RAW property
+		    during charging in the scale of 0-10000.
+
+- qcom,soc-scale-mode-en
+	Usage:      optional
+	Value type: <boolean>
+	Definition: A boolean property that when specified will enable scaling
+		    of the SOC linearly, based on the filtered battery voltage
+		    after crossing below a Vbatt threshold.
+
+- qcom,soc-scale-vbatt-mv
+	Usage:      optional
+	Value type: <u32>
+	Definition: Threshold voltage to decide when SOC should
+		    be scaled based on filtered voltage when
+		    qcom,soc-scale-mode-en is specified. If this
+		    is not specified, then the default value is 3400.
+		    Unit is in mV.
+
+- qcom,soc-scale-time-ms
+	Usage:      optional
+	Value type: <u32>
+	Definition: Timer value for doing SOC calculation based on
+		    filtered voltage when qcom,soc-scale-mode-en is
+		    specified. If this is not specified, then the
+		    default value is 10000. Unit is in ms.
+
 ==========================================================
 Second Level Nodes - Peripherals managed by FG Gen4 driver
 ==========================================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt
index d997edd..fb27728 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt
@@ -30,6 +30,18 @@
   Definition: Specifies the interrupt name for Qnovo5. There is only one
 		interrupt named as "ptrain-done".
 
+- pinctrl-N:
+  Usage:      optional
+  Value type: <phandle>
+  Definition: Specifies the pinctrl configuration that needs to be applied
+		when the charger is removed for controlling external FET.
+
+- pinctrl-names:
+  Usage:      optional
+  Value type: <string>
+  Definition: Specifies the names for pinctrl configurations defined above.
+		Allowed names are "q_state1" and "q_state2".
+
 Example:
 
 qcom,qpnp-qnovo@b000 {
@@ -37,4 +49,7 @@
 	reg = <0xb000 0x100>;
 	interrupts = <0x2 0xb0 0x1 IRQ_TYPE_NONE>;
 	interrupt-names = "ptrain-done";
+	pinctrl-names = "q_state1", "q_state2";
+	pinctrl-0 = <&qnovo_fet_ctrl_state1>;
+	pinctrl-1 = <&qnovo_fet_ctrl_state2>;
 };
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
index 2515f05..16f8750 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
@@ -201,6 +201,12 @@
   Value type: bool
   Definition: Boolean flag which when present enables step-charging.
 
+- qcom,typec-legacy-rp-icl
+  Usage:       optional
+  Value type:  bool
+  Definition:  Boolean property to enable setting ICL based on Rp for
+		Type-C non-compliant legacy cables.
+
 - qcom,wd-bark-time-secs
   Usage:      optional
   Value type: <u32>
@@ -251,6 +257,60 @@
   Definition: Boolean flag which when present enables stepwise change in FCC.
 		The default stepping rate is 100mA/sec.
 
+- qcom,disable-suspend-on-collapse
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present disables suspend on collapse
+		feature of charger hardware.
+
+- qcom,uusb-moisture-protection-enable
+	Usage:      optional
+	Value type: bool
+	Definition: Boolean flag which when present enables mositure protection
+		    feature for uUSB connector type.
+
+- qcom,hvdcp-autonomous-enable
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present enables hardware-controlled
+		operation of HVDCP.
+
+- qcom,usb-pd-disable
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present disables USB-PD operation.
+
+- qcom,lpd-disable
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present disables liquid presence
+		detection.
+
+- qcom,hw-die-temp-mitigation
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present enables h/w based thermal
+		mitigation.
+
+- qcom,hw-connector-mitigation
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present enables h/w based
+		connector temperature mitigation.
+
+- qcom,hw-skin-temp-mitigation
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present enables h/w based skin
+		temperature mitigation.
+
+- qcom,connector-internal-pull-kohm
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies internal pull-up configuration to be applied to
+		connector THERM. The only valid values are (0/30/100/400).
+		If not specified 100K is used as default pull-up.
+
 =============================================
 Second Level Nodes - SMB5 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/prng/msm-rng.txt b/Documentation/devicetree/bindings/prng/msm-rng.txt
new file mode 100644
index 0000000..917c2fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/prng/msm-rng.txt
@@ -0,0 +1,18 @@
+* RNG (Random Number Generator)
+
+Required properties:
+- compatible : Should be "qcom,msm-rng"
+- reg        : Offset and length of the register set for the device
+
+Optional property:
+- qcom,msm-rng-iface-clk : If the device uses iface-clk.
+- qcom,no-qrng-config    : Flag to decide whether the driver do the hardware configuration or not.
+
+Example:
+
+	qcom,msm-rng@f9bff000 {
+		compatible = "qcom,msm-rng";
+		reg = <0xf9bff000 0x200>;
+		qcom,msm-rng-iface-clk;
+		qcom,no-qrng-config;
+	};
diff --git a/Documentation/devicetree/bindings/qbt_handler/qbt_handler.txt b/Documentation/devicetree/bindings/qbt_handler/qbt_handler.txt
new file mode 100644
index 0000000..168aa24
--- /dev/null
+++ b/Documentation/devicetree/bindings/qbt_handler/qbt_handler.txt
@@ -0,0 +1,35 @@
+Qualcomm Technologies, Inc. QBT_HANDLER Specific Bindings
+
+QBT is a fingerprint sensor ASIC capable of performing fingerprint image scans
+and detecting finger presence on the sensor using programmable firmware.
+
+=======================
+Required Node Structure
+=======================
+
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: "qcom,qbt-handler".
+
+- qcom,ipc-gpio
+  Usage:      required
+  Value type: <phandle>
+  Definition: phandle for GPIO to be used for IPC.
+
+- qcom,finger-detect-gpio
+  Usage:      required
+  Value type: <phandle>
+  Definition: phandle for GPIO to be used for finger detect.
+
+=======
+Example
+=======
+
+qcom,qbt_handler {
+                compatible = "qcom,qbt-handler";
+                qcom,ipc-gpio = <&tlmm 23 0>;
+                pinctrl-names = "default";
+                pinctrl-0 = <&key_home_default>;
+                qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+};
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
index 19a9d359..ddbcf45 100644
--- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -18,6 +18,9 @@
  - qcom,retain-periph: Presence denotes a hardware requirement to leave the
 		     forced periph memory retention signal in the core's clock
 		     branch control registers asserted.
+ - qcom,retain-regs: Presence denotes a hardware requirement to enable the
+		     usage of retention registers which maintain their state
+		     after the GDSC is disabled and re-enabled.
  - qcom,skip-logic-collapse: Presence denotes a requirement to leave power to
                              the core's logic enabled.
  - qcom,support-hw-trigger: Presence denotes a hardware feature to switch
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-amoled-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-amoled-regulator.txt
index dca7de3..eaf2ded 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-amoled-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-amoled-regulator.txt
@@ -61,6 +61,12 @@
 		    controlled by SWIRE signal. When this is specified, output
 		    voltage of the regulator is not controlled by SW.
 
+- qcom,aod-pd-control:
+	Usage:      optional
+	Value type: <empty>
+	Definition: A boolean property to specify that the pull down control
+		    for AB/IBB needs to be configured during AOD mode.
+
 Example:
 
 pm8150a_amoled: oledb@e000 {
@@ -82,6 +88,7 @@
 		regulator-min-microvolt = <4600000>;
 		regulator-max-microvolt = <6100000>;
 		qcom,swire-control;
+		qcom,aod-pd-control;
 	};
 
 	ibb_vreg: ibb@dc00 {
@@ -91,5 +98,6 @@
 		regulator-min-microvolt = <800000>;
 		regulator-max-microvolt = <5400000>;
 		qcom,swire-control;
+		qcom,aod-pd-control;
 	};
 };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
index 800508a..08330eb 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
@@ -11,6 +11,10 @@
  - reg: Should be address and size of EUD register space
  - reg-names: Should be "eud_base"
 
+Optional properties:
+ - reg-names: Can be "eud_mode_mgr2" for secure eud
+ - qcom,secure-eud-en: To enable secure eud
+
 Driver notifies clients via extcon for VBUS spoof attach/detach
 and charger enable/disable events. Clients registered for these
 notifications should have extcon property set to eud.
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index f566b5e..80ac39a 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -220,6 +220,13 @@
                             inCall Music Delivery port ID is 32773.
                             incall Music 2 Delivery port ID is 32770.
 
+Optional properties:
+
+ - qcom,msm-dai-q6-slim-dev-id : The Slimbus HW device (instance) ID associated
+                                 with Slimbus ports.
+                                 0 - Slimbus HW device ID 0 (first instance)
+                                 1 - Slimbus HW device ID 1 (second instance)
+
 * msm_dai_cdc_dma
 
 [First Level Nodes]
@@ -553,6 +560,7 @@
 		qcom,msm-dai-q6-sb-0-rx {
 			compatible = "qcom,msm-dai-q6-dev";
 			qcom,msm-dai-q6-dev-id = <16384>;
+			qcom,msm-dai-q6-slim-dev-id = <0>;
 		};
 
 		qcom,msm-dai-q6-sb-0-tx {
@@ -1952,4 +1960,4 @@
 		voice_mhi_voting;
 	};
 
-};
\ No newline at end of file
+};
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index bb5f74f..db50c09 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -8,6 +8,7 @@
 	soundwire core registers.
  - clock-names : clock names defined for WSA macro
  - clocks : clock handles defined for WSA  macro
+ - qcom,default-clk-id: Default clk ID used for WSA macro
  - qcom,wsa-swr-gpios: phandle for SWR data and clock GPIOs of WSA macro
  - qcom,wsa-bcl-pmic-params: u8 array of PMIC ID, SID and PPID in same order
 			 required to be configured to receive interrupts
@@ -24,6 +25,7 @@
 		<&clock_audio_wsa_2 0>;
 		qcom,wsa-swr-gpios = &wsa_swr_gpios;
 		qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		swr_0: wsa_swr_master {
 			compatible = "qcom,swr-mstr";
 			wsa881x_1: wsa881x@20170212 {
@@ -43,6 +45,7 @@
 	soundwire core registers.
  - clock-names : clock names defined for VA macro
  - clocks : clock handles defined for VA macro
+ - qcom,default-clk-id: Default clk ID used for VA macro
  - va-vdd-micb-supply: phandle of mic bias supply's regulator device tree node
  - qcom,va-vdd-micb-voltage: mic bias supply's voltage level min and max in mV
  - qcom,va-vdd-micb-current: mic bias supply's max current in mA
@@ -61,6 +64,7 @@
 		reg = <0x0C490000 0x0>;
 		clock-names = "va_core_clk";
 		clocks = <&clock_audio_va 0>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		va-vdd-micb-supply = <&S4A>;
 		qcom,va-vdd-micb-voltage = <1800000 1800000>;
 		qcom,va-vdd-micb-current = <11200>;
@@ -78,6 +82,7 @@
 	soundwire core registers.
  - clock-names : clock names defined for RX macro
  - clocks : clock handles defined for RX macro
+ - qcom,default-clk-id: Default clk ID used for RX macro
  - qcom,rx-swr-gpios: phandle for SWR data and clock GPIOs of RX macro
  - qcom,rx_mclk_mode_muxsel: register address for RX macro MCLK mode mux select
  - qcom,rx-bcl-pmic-params: u8 array of PMIC ID, SID and PPID in same order
@@ -96,6 +101,7 @@
 		qcom,rx-swr-gpios = <&rx_swr_gpios>;
 		qcom,rx_mclk_mode_muxsel = <0x62C25020>;
 		qcom,rx-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		swr_1: rx_swr_master {
 			compatible = "qcom,swr-mstr";
 			wcd938x_rx_slave: wcd938x-rx-slave {
@@ -220,3 +226,35 @@
 	qcom,cdc-on-demand-supplies = "cdc-vdd-buck",
 				      "cdc-vdd-mic-bias";
 };
+
+Bolero Clock Resource Manager
+
+Required Properties:
+ - compatible = "qcom,bolero-clk-rsc-mngr";
+ - qcom,fs-gen-sequence: Register sequence for fs clock generation
+ - clock-names : clock names defined for WSA macro
+ - clocks : clock handles defined for WSA  macro
+
+Optional Properties:
+ - qcom,rx_mclk_mode_muxsel: register address for RX macro MCLK mode mux select
+ - qcom,wsa_mclk_mode_muxsel: register address for WSA macro MCLK mux select
+ - qcom,va_mclk_mode_muxsel: register address for VA macro MCLK mode mux select
+
+Example:
+&bolero {
+	bolero-clock-rsc-manager {
+		compatible = "qcom,bolero-clk-rsc-mngr";
+		qcom,fs-gen-sequence = <0x3000 0x1>,
+				<0x3004 0x1>, <0x3080 0x2>;
+		qcom,rx_mclk_mode_muxsel = <0x033240D8>;
+		qcom,wsa_mclk_mode_muxsel = <0x033220D8>;
+		qcom,va_mclk_mode_muxsel = <0x033A0000>;
+		clock-names = "tx_core_clk", "tx_npl_clk", "rx_core_clk",
+			"rx_npl_clk", "wsa_core_clk", "wsa_npl_clk",
+			"va_core_clk", "va_npl_clk";
+		clocks = <&clock_audio_tx_1 0>, <&clock_audio_tx_2 0>,
+			<&clock_audio_rx_1 0>, <&clock_audio_rx_2 0>,
+			<&clock_audio_wsa_1 0>, <&clock_audio_wsa_2 0>,
+			<&clock_audio_va_1 0>, <&clock_audio_va_2 0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt
index adb382b..12cf027 100644
--- a/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt
+++ b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt
@@ -33,16 +33,17 @@
 		are the only acceptable sensor names,
 		1. pa
 		2. pa1
-		3. qfe_pa0
-		4. qfe_wtr0
-		5. modem_tsens
-		6. qfe_mmw0
-		7. qfe_mmw1
-		8. qfe_mmw2
-		9. qfe_mmw3
-		10. xo_therm
-		11. qfe_pa_mdm
-		12. qfe_pa_wtr
+		3. pa2
+		4. qfe_pa0
+		5. qfe_wtr0
+		6. modem_tsens
+		7. qfe_mmw0
+		8. qfe_mmw1
+		9. qfe_mmw2
+		10. qfe_mmw3
+		11. xo_therm
+		12. qfe_pa_mdm
+		13. qfe_pa_wtr
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 842292b..c1508e4 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -29,9 +29,9 @@
 - vdda-pll-supply   : phandle to PHY PLL and Power-Gen block power supply
 - clocks	    : List of phandle and clock specifier pairs
 - clock-names       : List of clock input name strings sorted in the same
-		      order as the clocks property. "ref_clk_src", "ref_clk",
+		      order as the clocks property. "ref_clk_src",
 		      "tx_iface_clk" & "rx_iface_clk" are mandatory but
-		      "ref_clk_parent" is optional
+		      "ref_clk_parent" and "ref_clk" are optional
 
 Optional properties:
 - vdda-phy-max-microamp : specifies max. load that can be drawn from phy supply
diff --git a/Documentation/driver-api/i3c/device-driver-api.rst b/Documentation/driver-api/i3c/device-driver-api.rst
new file mode 100644
index 0000000..85bc3381
--- /dev/null
+++ b/Documentation/driver-api/i3c/device-driver-api.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+I3C device driver API
+=====================
+
+.. kernel-doc:: include/linux/i3c/device.h
+
+.. kernel-doc:: drivers/i3c/device.c
diff --git a/Documentation/driver-api/i3c/index.rst b/Documentation/driver-api/i3c/index.rst
new file mode 100644
index 0000000..783d6da
--- /dev/null
+++ b/Documentation/driver-api/i3c/index.rst
@@ -0,0 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+I3C subsystem
+=============
+
+.. toctree::
+
+   protocol
+   device-driver-api
+   master-driver-api
diff --git a/Documentation/driver-api/i3c/master-driver-api.rst b/Documentation/driver-api/i3c/master-driver-api.rst
new file mode 100644
index 0000000..332552b
--- /dev/null
+++ b/Documentation/driver-api/i3c/master-driver-api.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================================
+I3C master controller driver API
+================================
+
+.. kernel-doc:: drivers/i3c/master.c
+
+.. kernel-doc:: include/linux/i3c/master.h
diff --git a/Documentation/driver-api/i3c/protocol.rst b/Documentation/driver-api/i3c/protocol.rst
new file mode 100644
index 0000000..dae3b6d
--- /dev/null
+++ b/Documentation/driver-api/i3c/protocol.rst
@@ -0,0 +1,203 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+I3C protocol
+============
+
+Disclaimer
+==========
+
+This chapter will focus on aspects that matter to software developers. For
+everything hardware related (like how things are transmitted on the bus, how
+collisions are prevented, ...) please have a look at the I3C specification.
+
+This document is just a brief introduction to the I3C protocol and the concepts
+it brings to the table. If you need more information, please refer to the MIPI
+I3C specification (can be downloaded here
+http://resources.mipi.org/mipi-i3c-v1-download).
+
+Introduction
+============
+
+The I3C (pronounced 'eye-three-see') is a MIPI standardized protocol designed
+to overcome I2C limitations (limited speed, external signals needed for
+interrupts, no automatic detection of the devices connected to the bus, ...)
+while remaining power-efficient.
+
+I3C Bus
+=======
+
+An I3C bus is made of several I3C devices and possibly some I2C devices as
+well, but let's focus on I3C devices for now.
+
+An I3C device on the I3C bus can have one of the following roles:
+
+* Master: the device is driving the bus. It's the one in charge of initiating
+  transactions or deciding who is allowed to talk on the bus (slave generated
+  events are possible in I3C, see below).
+* Slave: the device acts as a slave, and is not able to send frames to another
+  slave on the bus. The device can still send events to the master on
+  its own initiative if the master allowed it.
+
+I3C is a multi-master protocol, so there might be several masters on a bus,
+though only one device can act as a master at a given time. In order to gain
+bus ownership, a master has to follow a specific procedure.
+
+Each device on the I3C bus has to be assigned a dynamic address to be able to
+communicate. Until this is done, the device should only respond to a limited
+set of commands. If it has a static address (also called legacy I2C address),
+the device can reply to I2C transfers.
+
+In addition to these per-device addresses, the protocol defines a broadcast
+address in order to address all devices on the bus.
+
+Once a dynamic address has been assigned to a device, this address will be used
+for any direct communication with the device. Note that even after being
+assigned a dynamic address, the device should still process broadcast messages.
+
+I3C Device discovery
+====================
+
+The I3C protocol defines a mechanism to automatically discover devices present
+on the bus, their capabilities and the functionalities they provide. In this
+regard I3C is closer to a discoverable bus like USB than it is to I2C or SPI.
+
+The discovery mechanism is called DAA (Dynamic Address Assignment), because it
+not only discovers devices but also assigns them a dynamic address.
+
+During DAA, each I3C device reports 3 important things:
+
+* BCR: Bus Characteristic Register. This 8-bit register describes the device bus
+  related capabilities
+* DCR: Device Characteristic Register. This 8-bit register describes the
+  functionalities provided by the device
+* Provisional ID: A 48-bit unique identifier. On a given bus there should be no
+  Provisional ID collision, otherwise the discovery mechanism may fail.
+
+I3C slave events
+================
+
+The I3C protocol allows slaves to generate events on their own, and thus allows
+them to take temporary control of the bus.
+
+This mechanism is called IBI for In Band Interrupts, and as stated in the name,
+it allows devices to generate interrupts without requiring an external signal.
+
+During DAA, each device on the bus has been assigned an address, and this
+address will serve as a priority identifier to determine who wins if 2 different
+devices are generating an interrupt at the same moment on the bus (the lower the
+dynamic address the higher the priority).
+
+Masters are allowed to inhibit interrupts if they want to. This inhibition
+request can be broadcast (applies to all devices) or sent to a specific
+device.
+
+I3C Hot-Join
+============
+
+The Hot-Join mechanism is similar to USB hotplug. This mechanism allows
+slaves to join the bus after it has been initialized by the master.
+
+This covers the following use cases:
+
+* the device is not powered when the bus is probed
+* the device is hotplugged on the bus through an extension board
+
+This mechanism is relying on slave events to inform the master that a new
+device joined the bus and is waiting for a dynamic address.
+
+The master is then free to address the request as it wishes: ignore it or
+assign a dynamic address to the slave.
+
+I3C transfer types
+==================
+
+If you omit SMBus (which is just a standardization on how to access registers
+exposed by I2C devices), I2C has only one transfer type.
+
+I3C defines 3 different classes of transfer in addition to I2C transfers which
+are here for backward compatibility with I2C devices.
+
+I3C CCC commands
+----------------
+
+CCC (Common Command Code) commands are meant to be used for anything that is
+related to bus management and all features that are common to a set of devices.
+
+CCC commands contain an 8-bit CCC ID describing the command that is executed.
+The MSB of this ID specifies whether this is a broadcast command (bit7 = 0) or a
+unicast one (bit7 = 1).
+
+The command ID can be followed by a payload. Depending on the command, this
+payload is either sent by the master sending the command (write CCC command),
+or sent by the slave receiving the command (read CCC command). Of course, read
+accesses only apply to unicast commands.
+Note that, when sending a CCC command to a specific device, the device address
+is passed in the first byte of the payload.
+
+The payload length is not explicitly passed on the bus, and should be extracted
+from the CCC ID.
+
+Note that vendors can use a dedicated range of CCC IDs for their own commands
+(0x61-0x7f and 0xe0-0xef).
+
+I3C Private SDR transfers
+-------------------------
+
+Private SDR (Single Data Rate) transfers should be used for anything that is
+device specific and does not require high transfer speed.
+
+It is the equivalent of I2C transfers but in the I3C world. Each transfer is
+passed the device address (dynamic address assigned during DAA), a payload
+and a direction.
+
+The only difference with I2C is that the transfer is much faster (typical clock
+frequency is 12.5MHz).
+
+I3C HDR commands
+----------------
+
+HDR commands should be used for anything that is device specific and requires
+high transfer speed.
+
+The first thing attached to an HDR command is the HDR mode. There are currently
+3 different modes defined by the I3C specification (refer to the specification
+for more details):
+
+* HDR-DDR: Double Data Rate mode
+* HDR-TSP: Ternary Symbol Pure. Only usable on busses with no I2C devices
+* HDR-TSL: Ternary Symbol Legacy. Usable on busses with I2C devices
+
+When sending an HDR command, the whole bus has to enter HDR mode, which is done
+using a broadcast CCC command.
+Once the bus has entered a specific HDR mode, the master sends the HDR command.
+An HDR command is made of:
+
+* one 16-bits command word in big endian
+* N 16-bits data words in big endian
+
+Those words may be wrapped with specific preambles/post-ambles which depend on
+the chosen HDR mode and are detailed here (see the specification for more
+details).
+
+The 16-bits command word is made of:
+
+* bit[15]: direction bit, read is 1, write is 0
+* bit[14:8]: command code. Identifies the command being executed, the amount of
+  data words and their meaning
+* bit[7:1]: I3C address of the device this command is addressed to
+* bit[0]: reserved/parity-bit
+
+Backward compatibility with I2C devices
+=======================================
+
+The I3C protocol has been designed to be backward compatible with I2C devices.
+This backward compatibility allows one to connect a mix of I2C and I3C devices
+on the same bus, though, in order to be really efficient, I2C devices should
+be equipped with 50 ns spike filters.
+
+I2C devices can't be discovered like I3C ones and have to be statically
+declared. In order to let the master know what these devices are capable of
+(both in terms of bus related limitations and functionalities), the software
+has to provide some information, which is done through the LVR (Legacy I2C
+Virtual Register).
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 6d9f2f9..cc6a33f 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -32,6 +32,7 @@
    pci
    spi
    i2c
+   i3c/index
    hsi
    edac
    scsi
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index eef7d9d..f205898 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -102,6 +102,29 @@
 such as metadata and extended attributes are reported for the upper
 directory only.  These attributes of the lower directory are hidden.
 
+credentials
+-----------
+
+By default, all access to the upper, lower and work directories is the
+recorded mounter's MAC and DAC credentials.  The incoming accesses are
+checked against the caller's credentials.
+
+In the case where caller MAC or DAC credentials do not overlap, a
+use case available in older versions of the driver, the
+override_creds mount flag can be turned off and help when the use
+pattern has caller with legitimate credentials where the mounter
+does not.  Several unintended side effects will occur though.  The
+caller without certain key capabilities or lower privilege will not
+always be able to delete files or directories, create nodes, or
+search some restricted directories.  The ability to search and read
+a directory entry is spotty as a result of the cache mechanism not
+retesting the credentials because of the assumption, a privileged
+caller can fill cache, then a lower privilege can read the directory
+cache.  The uneven security model where cache, upperdir and workdir
+are opened at privilege, but accessed without creating a form of
+privilege escalation, should only be used with strict understanding
+of the side effects and of the security policies.
+
 whiteouts and opaque directories
 --------------------------------
 
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 02ba213..a9ce10b 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -499,7 +499,9 @@
 
 Note that there is no guarantee that every flag and associated mnemonic will
 be present in all further kernel releases. Things get changed, the flags may
-be vanished or the reverse -- new added.
+be vanished or the reverse -- new added. Interpretation of their meaning
+might change in future as well. So each consumer of these flags has to
+follow each specific kernel version for the exact semantic.
 
 The "Name" field will only be present on a mapping that has been named by
 userspace, and will show the name passed in by userspace.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
index 381e5b2..46c8d8b 100644
--- a/Documentation/networking/regulatory.txt
+++ b/Documentation/networking/regulatory.txt
@@ -200,5 +200,23 @@
 Statically compiled regulatory database
 ---------------------------------------
 
-When a database should be fixed into the kernel, it can be provided as a
-firmware file at build time that is then linked into the kernel.
+In most situations the userland solution using CRDA as described
+above is the preferred solution.  However in some cases a set of
+rules built into the kernel itself may be desirable.  To account
+for this situation, a configuration option has been provided
+(i.e. CONFIG_CFG80211_INTERNAL_REGDB).  With this option enabled,
+the wireless database information contained in net/wireless/db.txt is
+used to generate a data structure encoded in net/wireless/regdb.c.
+That option also enables code in net/wireless/reg.c which queries
+the data in regdb.c as an alternative to using CRDA.
+
+The file net/wireless/db.txt should be kept up-to-date with the db.txt
+file available in the git repository here:
+
+    git://git.kernel.org/pub/scm/linux/kernel/git/sforshee/wireless-regdb.git
+
+Again, most users in most situations should be using the CRDA package
+provided with their distribution, and in most other situations users
+should be building and using CRDA on their own rather than using
+this option.  If you are not absolutely sure that you should be using
+CONFIG_CFG80211_INTERNAL_REGDB then _DO_NOT_USE_IT_.
diff --git a/Documentation/power/energy-model.txt b/Documentation/power/energy-model.txt
new file mode 100644
index 0000000..5a23c6f
--- /dev/null
+++ b/Documentation/power/energy-model.txt
@@ -0,0 +1,169 @@
+                           ====================
+                           Energy Model of CPUs
+                           ====================
+
+1. Overview
+-----------
+
+The Energy Model (EM) framework serves as an interface between drivers knowing
+the power consumed by CPUs at various performance levels, and the kernel
+subsystems willing to use that information to make energy-aware decisions.
+
+The source of the information about the power consumed by CPUs can vary greatly
+from one platform to another. These power costs can be estimated using
+devicetree data in some cases. In others, the firmware will know better.
+Alternatively, userspace might be best positioned. And so on. In order to avoid
+each and every client subsystem to re-implement support for each and every
+possible source of information on its own, the EM framework intervenes as an
+abstraction layer which standardizes the format of power cost tables in the
+kernel, hence enabling to avoid redundant work.
+
+The figure below depicts an example of drivers (Arm-specific here, but the
+approach is applicable to any architecture) providing power costs to the EM
+framework, and interested clients reading the data from it.
+
+       +---------------+  +-----------------+  +---------------+
+       | Thermal (IPA) |  | Scheduler (EAS) |  |     Other     |
+       +---------------+  +-----------------+  +---------------+
+               |                   | em_pd_energy()    |
+               |                   | em_cpu_get()      |
+               +---------+         |         +---------+
+                         |         |         |
+                         v         v         v
+                        +---------------------+
+                        |    Energy Model     |
+                        |     Framework       |
+                        +---------------------+
+                           ^       ^       ^
+                           |       |       | em_register_perf_domain()
+                +----------+       |       +---------+
+                |                  |                 |
+        +---------------+  +---------------+  +--------------+
+        |  cpufreq-dt   |  |   arm_scmi    |  |    Other     |
+        +---------------+  +---------------+  +--------------+
+                ^                  ^                 ^
+                |                  |                 |
+        +--------------+   +---------------+  +--------------+
+        | Device Tree  |   |   Firmware    |  |      ?       |
+        +--------------+   +---------------+  +--------------+
+
+The EM framework manages power cost tables per 'performance domain' in the
+system. A performance domain is a group of CPUs whose performance is scaled
+together. Performance domains generally have a 1-to-1 mapping with CPUFreq
+policies. All CPUs in a performance domain are required to have the same
+micro-architecture. CPUs in different performance domains can have different
+micro-architectures.
+
+
+2. Core APIs
+------------
+
+  2.1 Config options
+
+CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
+
+
+  2.2 Registration of performance domains
+
+Drivers are expected to register performance domains into the EM framework by
+calling the following API:
+
+  int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
+			      struct em_data_callback *cb);
+
+Drivers must specify the CPUs of the performance domains using the cpumask
+argument, and provide a callback function returning <frequency, power> tuples
+for each capacity state. The callback function provided by the driver is free
+to fetch data from any relevant location (DT, firmware, ...), and by any mean
+deemed necessary. See Section 3. for an example of driver implementing this
+callback, and kernel/power/energy_model.c for further documentation on this
+API.
+
+
+  2.3 Accessing performance domains
+
+Subsystems interested in the energy model of a CPU can retrieve it using the
+em_cpu_get() API. The energy model tables are allocated once upon creation of
+the performance domains, and kept in memory untouched.
+
+The energy consumed by a performance domain can be estimated using the
+em_pd_energy() API. The estimation is performed assuming that the schedutil
+CPUfreq governor is in use.
+
+More details about the above APIs can be found in include/linux/energy_model.h.
+
+
+3. Example driver
+-----------------
+
+This section provides a simple example of a CPUFreq driver registering a
+performance domain in the Energy Model framework using the (fake) 'foo'
+protocol. The driver implements an est_power() function to be provided to the
+EM framework.
+
+ -> drivers/cpufreq/foo_cpufreq.c
+
+01	static int est_power(unsigned long *mW, unsigned long *KHz, int cpu)
+02	{
+03		long freq, power;
+04
+05		/* Use the 'foo' protocol to ceil the frequency */
+06		freq = foo_get_freq_ceil(cpu, *KHz);
+07		if (freq < 0);
+08			return freq;
+09
+10		/* Estimate the power cost for the CPU at the relevant freq. */
+11		power = foo_estimate_power(cpu, freq);
+12		if (power < 0);
+13			return power;
+14
+15		/* Return the values to the EM framework */
+16		*mW = power;
+17		*KHz = freq;
+18
+19		return 0;
+20	}
+21
+22	static int foo_cpufreq_init(struct cpufreq_policy *policy)
+23	{
+24		struct em_data_callback em_cb = EM_DATA_CB(est_power);
+25		int nr_opp, ret;
+26
+27		/* Do the actual CPUFreq init work ... */
+28		ret = do_foo_cpufreq_init(policy);
+29		if (ret)
+30			return ret;
+31
+32		/* Find the number of OPPs for this policy */
+33		nr_opp = foo_get_nr_opp(policy);
+34
+35		/* And register the new performance domain */
+36		em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
+37
+38	        return 0;
+39	}
+
+
+4. Support for legacy Energy Models (DEPRECATED)
+------------------------------------------------
+
+The Android kernel version 4.14 and before used a different type of EM for EAS,
+referred to as the 'legacy' EM. The legacy EM relies on the out-of-tree
+'sched-energy-costs' devicetree bindings to provide the kernel with power costs.
+The usage of such bindings in Android has now been DEPRECATED in favour of the
+mainline equivalents.
+
+The currently supported alternatives to populate the EM include:
+ - using a firmware-based solution such as Arm SCMI (supported in
+   drivers/cpufreq/scmi-cpufreq.c);
+ - using the 'dynamic-power-coefficient' devicetree binding together with
+   PM_OPP. See the of_dev_pm_opp_get_cpu_power() helper in PM_OPP, and the
+   reference implementation in drivers/cpufreq/cpufreq-dt.c.
+
+In order to ease the transition to the new EM format, Android 4.19 also provides
+a compatibility driver able to load a legacy EM from DT into the EM framework.
+*** Please note that THIS FEATURE WILL NOT BE AVAILABLE in future Android
+kernels, and as such it must be considered only as a temporary workaround. ***
+
+If you know what you're doing and still want to use this driver, you need to set
+CONFIG_LEGACY_ENERGY_MODEL_DT=y in your kernel configuration to enable it.
diff --git a/Documentation/scheduler/sched-energy.txt b/Documentation/scheduler/sched-energy.txt
new file mode 100644
index 0000000..197d81f
--- /dev/null
+++ b/Documentation/scheduler/sched-energy.txt
@@ -0,0 +1,425 @@
+			   =======================
+			   Energy Aware Scheduling
+			   =======================
+
+1. Introduction
+---------------
+
+Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict
+the impact of its decisions on the energy consumed by CPUs. EAS relies on an
+Energy Model (EM) of the CPUs to select an energy efficient CPU for each task,
+with a minimal impact on throughput. This document aims at providing an
+introduction on how EAS works, what are the main design decisions behind it, and
+details what is needed to get it to run.
+
+Before going any further, please note that at the time of writing:
+
+   /!\ EAS does not support platforms with symmetric CPU topologies /!\
+
+EAS operates only on heterogeneous CPU topologies (such as Arm big.LITTLE)
+because this is where the potential for saving energy through scheduling is
+the highest.
+
+The actual EM used by EAS is _not_ maintained by the scheduler, but by a
+dedicated framework. For details about this framework and what it provides,
+please refer to its documentation (see Documentation/power/energy-model.txt).
+
+
+2. Background and Terminology
+-----------------------------
+
+To make it clear from the start:
+ - energy = [joule] (resource like a battery on powered devices)
+ - power = energy/time = [joule/second] = [watt]
+
+The goal of EAS is to minimize energy, while still getting the job done. That
+is, we want to maximize:
+
+	performance [inst/s]
+	--------------------
+	    power [W]
+
+which is equivalent to minimizing:
+
+	energy [J]
+	-----------
+	instruction
+
+while still getting 'good' performance. It is essentially an alternative
+optimization objective to the current performance-only objective for the
+scheduler. This alternative considers two objectives: energy-efficiency and
+performance.
+
+The idea behind introducing an EM is to allow the scheduler to evaluate the
+implications of its decisions rather than blindly applying energy-saving
+techniques that may have positive effects only on some platforms. At the same
+time, the EM must be as simple as possible to minimize the scheduler latency
+impact.
+
+In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time
+for the scheduler to decide where a task should run (during wake-up), the EM
+is used to break the tie between several good CPU candidates and pick the one
+that is predicted to yield the best energy consumption without harming the
+system's throughput. The predictions made by EAS rely on specific elements of
+knowledge about the platform's topology, which include the 'capacity' of CPUs,
+and their respective energy costs.
+
+
+3. Topology information
+-----------------------
+
+EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to
+differentiate CPUs with different computing throughput. The 'capacity' of a CPU
+represents the amount of work it can absorb when running at its highest
+frequency compared to the most capable CPU of the system. Capacity values are
+normalized in a 1024 range, and are comparable with the utilization signals of
+tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks
+to capacity and utilization values, EAS is able to estimate how big/busy a
+task/CPU is, and to take this into consideration when evaluating performance vs
+energy trade-offs. The capacity of CPUs is provided via arch-specific code
+through the arch_scale_cpu_capacity() callback.
+
+The rest of platform knowledge used by EAS is directly read from the Energy
+Model (EM) framework. The EM of a platform is composed of a power cost table
+per 'performance domain' in the system (see Documentation/power/energy-model.txt
+for futher details about performance domains).
+
+The scheduler manages references to the EM objects in the topology code when the
+scheduling domains are built, or re-built. For each root domain (rd), the
+scheduler maintains a singly linked list of all performance domains intersecting
+the current rd->span. Each node in the list contains a pointer to a struct
+em_perf_domain as provided by the EM framework.
+
+The lists are attached to the root domains in order to cope with exclusive
+cpuset configurations. Since the boundaries of exclusive cpusets do not
+necessarily match those of performance domains, the lists of different root
+domains can contain duplicate elements.
+
+Example 1.
+    Let us consider a platform with 12 CPUs, split in 3 performance domains
+    (pd0, pd4 and pd8), organized as follows:
+
+	          CPUs:   0 1 2 3 4 5 6 7 8 9 10 11
+	          PDs:   |--pd0--|--pd4--|---pd8---|
+	          RDs:   |----rd1----|-----rd2-----|
+
+    Now, consider that userspace decided to split the system with two
+    exclusive cpusets, hence creating two independent root domains, each
+    containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the
+    above figure. Since pd4 intersects with both rd1 and rd2, it will be
+    present in the linked list '->pd' attached to each of them:
+       * rd1->pd: pd0 -> pd4
+       * rd2->pd: pd4 -> pd8
+
+    Please note that the scheduler will create two duplicate list nodes for
+    pd4 (one for each list). However, both just hold a pointer to the same
+    shared data structure of the EM framework.
+
+Since the access to these lists can happen concurrently with hotplug and other
+things, they are protected by RCU, like the rest of topology structures
+manipulated by the scheduler.
+
+EAS also maintains a static key (sched_energy_present) which is enabled when at
+least one root domain meets all conditions for EAS to start. Those conditions
+are summarized in Section 6.
+
+
+4. Energy-Aware task placement
+------------------------------
+
+EAS overrides the CFS task wake-up balancing code. It uses the EM of the
+platform and the PELT signals to choose an energy-efficient target CPU during
+wake-up balance. When EAS is enabled, select_task_rq_fair() calls
+find_energy_efficient_cpu() to do the placement decision. This function looks
+for the CPU with the highest spare capacity (CPU capacity - CPU utilization) in
+each performance domain since it is the one which will allow us to keep the
+frequency the lowest. Then, the function checks if placing the task there could
+save energy compared to leaving it on prev_cpu, i.e. the CPU where the task ran
+in its previous activation.
+
+find_energy_efficient_cpu() uses compute_energy() to estimate what will be the
+energy consumed by the system if the waking task was migrated. compute_energy()
+looks at the current utilization landscape of the CPUs and adjusts it to
+'simulate' the task migration. The EM framework provides the em_pd_energy() API
+which computes the expected energy consumption of each performance domain for
+the given utilization landscape.
+
+An example of energy-optimized task placement decision is detailed below.
+
+Example 2.
+    Let us consider a (fake) platform with 2 independent performance domains
+    composed of two CPUs each. CPU0 and CPU1 are little CPUs; CPU2 and CPU3
+    are big.
+
+    The scheduler must decide where to place a task P whose util_avg = 200
+    and prev_cpu = 0.
+
+    The current utilization landscape of the CPUs is depicted on the graph
+    below. CPUs 0-3 have a util_avg of 400, 100, 600 and 500 respectively
+    Each performance domain has three Operating Performance Points (OPPs).
+    The CPU capacity and power cost associated with each OPP is listed in
+    the Energy Model table. The util_avg of P is shown on the figures
+    below as 'PP'.
+
+    CPU util.
+      1024                 - - - - - - -              Energy Model
+                                               +-----------+-------------+
+                                               |  Little   |     Big     |
+       768                 =============       +-----+-----+------+------+
+                                               | Cap | Pwr | Cap  | Pwr  |
+                                               +-----+-----+------+------+
+       512  ===========    - ##- - - - -       | 170 | 50  | 512  | 400  |
+                             ##     ##         | 341 | 150 | 768  | 800  |
+       341  -PP - - - -      ##     ##         | 512 | 300 | 1024 | 1700 |
+             PP              ##     ##         +-----+-----+------+------+
+       170  -## - - - -      ##     ##
+             ##     ##       ##     ##
+           ------------    -------------
+            CPU0   CPU1     CPU2   CPU3
+
+      Current OPP: =====       Other OPP: - - -     util_avg (100 each): ##
+
+
+    find_energy_efficient_cpu() will first look for the CPUs with the
+    maximum spare capacity in the two performance domains. In this example,
+    CPU1 and CPU3. Then it will estimate the energy of the system if P was
+    placed on either of them, and check if that would save some energy
+    compared to leaving P on CPU0. EAS assumes that OPPs follow utilization
+    (which is coherent with the behaviour of the schedutil CPUFreq
+    governor, see Section 6. for more details on this topic).
+
+    Case 1. P is migrated to CPU1
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+      1024                 - - - - - - -
+
+                                            Energy calculation:
+       768                 =============     * CPU0: 200 / 341 * 150 = 88
+                                             * CPU1: 300 / 341 * 150 = 131
+                                             * CPU2: 600 / 768 * 800 = 625
+       512  - - - - - -    - ##- - - - -     * CPU3: 500 / 768 * 800 = 520
+                             ##     ##          => total_energy = 1364
+       341  ===========      ##     ##
+                    PP       ##     ##
+       170  -## - - PP-      ##     ##
+             ##     ##       ##     ##
+           ------------    -------------
+            CPU0   CPU1     CPU2   CPU3
+
+
+    Case 2. P is migrated to CPU3
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+      1024                 - - - - - - -
+
+                                            Energy calculation:
+       768                 =============     * CPU0: 200 / 341 * 150 = 88
+                                             * CPU1: 100 / 341 * 150 = 43
+                                    PP       * CPU2: 600 / 768 * 800 = 625
+       512  - - - - - -    - ##- - -PP -     * CPU3: 700 / 768 * 800 = 729
+                             ##     ##          => total_energy = 1485
+       341  ===========      ##     ##
+                             ##     ##
+       170  -## - - - -      ##     ##
+             ##     ##       ##     ##
+           ------------    -------------
+            CPU0   CPU1     CPU2   CPU3
+
+
+    Case 3. P stays on prev_cpu / CPU 0
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+      1024                 - - - - - - -
+
+                                            Energy calculation:
+       768                 =============     * CPU0: 400 / 512 * 300 = 234
+                                             * CPU1: 100 / 512 * 300 = 58
+                                             * CPU2: 600 / 768 * 800 = 625
+       512  ===========    - ##- - - - -     * CPU3: 500 / 768 * 800 = 520
+                             ##     ##          => total_energy = 1437
+       341  -PP - - - -      ##     ##
+             PP              ##     ##
+       170  -## - - - -      ##     ##
+             ##     ##       ##     ##
+           ------------    -------------
+            CPU0   CPU1     CPU2   CPU3
+
+
+    From these calculations, the Case 1 has the lowest total energy. So CPU 1
+    is be the best candidate from an energy-efficiency standpoint.
+
+Big CPUs are generally more power hungry than the little ones and are thus used
+mainly when a task doesn't fit the littles. However, little CPUs aren't always
+necessarily more energy-efficient than big CPUs. For some systems, the high OPPs
+of the little CPUs can be less energy-efficient than the lowest OPPs of the
+bigs, for example. So, if the little CPUs happen to have enough utilization at
+a specific point in time, a small task waking up at that moment could be better
+of executing on the big side in order to save energy, even though it would fit
+on the little side.
+
+And even in the case where all OPPs of the big CPUs are less energy-efficient
+than those of the little, using the big CPUs for a small task might still, under
+specific conditions, save energy. Indeed, placing a task on a little CPU can
+result in raising the OPP of the entire performance domain, and that will
+increase the cost of the tasks already running there. If the waking task is
+placed on a big CPU, its own execution cost might be higher than if it was
+running on a little, but it won't impact the other tasks of the little CPUs
+which will keep running at a lower OPP. So, when considering the total energy
+consumed by CPUs, the extra cost of running that one task on a big core can be
+smaller than the cost of raising the OPP on the little CPUs for all the other
+tasks.
+
+The examples above would be nearly impossible to get right in a generic way, and
+for all platforms, without knowing the cost of running at different OPPs on all
+CPUs of the system. Thanks to its EM-based design, EAS should cope with them
+correctly without too many troubles. However, in order to ensure a minimal
+impact on throughput for high-utilization scenarios, EAS also implements another
+mechanism called 'over-utilization'.
+
+
+5. Over-utilization
+-------------------
+
+From a general standpoint, the use-cases where EAS can help the most are those
+involving a light/medium CPU utilization. Whenever long CPU-bound tasks are
+being run, they will require all of the available CPU capacity, and there isn't
+much that can be done by the scheduler to save energy without severly harming
+throughput. In order to avoid hurting performance with EAS, CPUs are flagged as
+'over-utilized' as soon as they are used at more than 80% of their compute
+capacity. As long as no CPUs are over-utilized in a root domain, load balancing
+is disabled and EAS overridess the wake-up balancing code. EAS is likely to load
+the most energy efficient CPUs of the system more than the others if that can be
+done without harming throughput. So, the load-balancer is disabled to prevent
+it from breaking the energy-efficient task placement found by EAS. It is safe to
+do so when the system isn't overutilized since being below the 80% tipping point
+implies that:
+
+    a. there is some idle time on all CPUs, so the utilization signals used by
+       EAS are likely to accurately represent the 'size' of the various tasks
+       in the system;
+    b. all tasks should already be provided with enough CPU capacity,
+       regardless of their nice values;
+    c. since there is spare capacity all tasks must be blocking/sleeping
+       regularly and balancing at wake-up is sufficient.
+
+As soon as one CPU goes above the 80% tipping point, at least one of the three
+assumptions above becomes incorrect. In this scenario, the 'overutilized' flag
+is raised for the entire root domain, EAS is disabled, and the load-balancer is
+re-enabled. By doing so, the scheduler falls back onto load-based algorithms for
+wake-up and load balance under CPU-bound conditions. This provides a better
+respect of the nice values of tasks.
+
+Since the notion of overutilization largely relies on detecting whether or not
+there is some idle time in the system, the CPU capacity 'stolen' by higher
+(than CFS) scheduling classes (as well as IRQ) must be taken into account. As
+such, the detection of overutilization accounts for the capacity used not only
+by CFS tasks, but also by the other scheduling classes and IRQ.
+
+
+6. Dependencies and requirements for EAS
+----------------------------------------
+
+Energy Aware Scheduling depends on the CPUs of the system having specific
+hardware properties and on other features of the kernel being enabled. This
+section lists these dependencies and provides hints as to how they can be met.
+
+
+  6.1 - Asymmetric CPU topology
+
+As mentioned in the introduction, EAS is only supported on platforms with
+asymmetric CPU topologies for now. This requirement is checked at run-time by
+looking for the presence of the SD_ASYM_CPUCAPACITY flag when the scheduling
+domains are built.
+
+The flag is set/cleared automatically by the scheduler topology code whenever
+there are CPUs with different capacities in a root domain. The capacities of
+CPUs are provided by arch-specific code through the arch_scale_cpu_capacity()
+callback. As an example, arm and arm64 share an implementation of this callback
+which uses a combination of CPUFreq data and device-tree bindings to compute the
+capacity of CPUs (see drivers/base/arch_topology.c for more details).
+
+So, in order to use EAS on your platform your architecture must implement the
+arch_scale_cpu_capacity() callback, and some of the CPUs must have a lower
+capacity than others.
+
+Please note that EAS is not fundamentally incompatible with SMP, but no
+significant savings on SMP platforms have been observed yet. This restriction
+could be amended in the future if proven otherwise.
+
+
+  6.2 - Energy Model presence
+
+EAS uses the EM of a platform to estimate the impact of scheduling decisions on
+energy. So, your platform must provide power cost tables to the EM framework in
+order to make EAS start. To do so, please refer to documentation of the
+independent EM framework in Documentation/power/energy-model.txt.
+
+Please also note that the scheduling domains need to be re-built after the
+EM has been registered in order to start EAS.
+
+
+  6.3 - Energy Model complexity
+
+The task wake-up path is very latency-sensitive. When the EM of a platform is
+too complex (too many CPUs, too many performance domains, too many performance
+states, ...), the cost of using it in the wake-up path can become prohibitive.
+The energy-aware wake-up algorithm has a complexity of:
+
+	C = Nd * (Nc + Ns)
+
+with: Nd the number of performance domains; Nc the number of CPUs; and Ns the
+total number of OPPs (ex: for two perf. domains with 4 OPPs each, Ns = 8).
+
+A complexity check is performed at the root domain level, when scheduling
+domains are built. EAS will not start on a root domain if its C happens to be
+higher than the completely arbitrary EM_MAX_COMPLEXITY threshold (2048 at the
+time of writing).
+
+If you really want to use EAS but the complexity of your platform's Energy
+Model is too high to be used with a single root domain, you're left with only
+two possible options:
+
+    1. split your system into separate, smaller, root domains using exclusive
+       cpusets and enable EAS locally on each of them. This option has the
+       benefit to work out of the box but the drawback of preventing load
+       balance between root domains, which can result in an unbalanced system
+       overall;
+    2. submit patches to reduce the complexity of the EAS wake-up algorithm,
+       hence enabling it to cope with larger EMs in reasonable time.
+
+
+  6.4 - Schedutil governor
+
+EAS tries to predict at which OPP will the CPUs be running in the close future
+in order to estimate their energy consumption. To do so, it is assumed that OPPs
+of CPUs follow their utilization.
+
+Although it is very difficult to provide hard guarantees regarding the accuracy
+of this assumption in practice (because the hardware might not do what it is
+told to do, for example), schedutil as opposed to other CPUFreq governors at
+least _requests_ frequencies calculated using the utilization signals.
+Consequently, the only sane governor to use together with EAS is schedutil,
+because it is the only one providing some degree of consistency between
+frequency requests and energy predictions.
+
+Using EAS with any other governor than schedutil is not supported.
+
+
+  6.5 Scale-invariant utilization signals
+
+In order to make accurate prediction across CPUs and for all performance
+states, EAS needs frequency-invariant and CPU-invariant PELT signals. These can
+be obtained using the architecture-defined arch_scale{cpu,freq}_capacity()
+callbacks.
+
+Using EAS on a platform that doesn't implement these two callbacks is not
+supported.
+
+
+  6.6 Multithreading (SMT)
+
+EAS in its current form is SMT unaware and is not able to leverage
+multithreaded hardware to save energy. EAS considers threads as independent
+CPUs, which can actually be counter-productive for both performance and energy.
+
+EAS on SMT is not supported.
diff --git a/Makefile b/Makefile
index dc0d097..277bce1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 12
+SUBLEVEL = 30
 EXTRAVERSION =
 NAME = "People's Front"
 
@@ -487,21 +487,21 @@
 ifeq ($(cc-name),clang)
 ifneq ($(CROSS_COMPILE),)
 CLANG_TRIPLE	?= $(CROSS_COMPILE)
-CLANG_TARGET	:= --target=$(notdir $(CLANG_TRIPLE:%-=%))
-ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_TARGET)), y)
+CLANG_FLAGS	:= --target=$(notdir $(CLANG_TRIPLE:%-=%))
+ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
 $(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
 endif
 GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
-CLANG_PREFIX	:= --prefix=$(GCC_TOOLCHAIN_DIR)
+CLANG_FLAGS	+= --prefix=$(GCC_TOOLCHAIN_DIR)
 GCC_TOOLCHAIN	:= $(realpath $(GCC_TOOLCHAIN_DIR)/..)
 endif
 ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC	:= --gcc-toolchain=$(GCC_TOOLCHAIN)
+CLANG_FLAGS	+= --gcc-toolchain=$(GCC_TOOLCHAIN)
 endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+CLANG_FLAGS	+= -no-integrated-as
+KBUILD_CFLAGS	+= $(CLANG_FLAGS)
+KBUILD_AFLAGS	+= $(CLANG_FLAGS)
+export CLANG_FLAGS
 endif
 
 RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
@@ -1005,11 +1005,6 @@
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
-    ifdef CONFIG_UNWINDER_ORC
-      $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
-    else
-      $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
-    endif
     SKIP_STACK_VALIDATION := 1
     export SKIP_STACK_VALIDATION
   endif
@@ -1166,6 +1161,14 @@
 
 PHONY += prepare-objtool
 prepare-objtool: $(objtool_target)
+ifeq ($(SKIP_STACK_VALIDATION),1)
+ifdef CONFIG_UNWINDER_ORC
+	@echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+	@false
+else
+	@echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+endif
+endif
 
 ifdef cfi-flags
   ifeq ($(call cc-option, $(cfi-flags)),)
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 4d17cac..432402c 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -56,15 +56,15 @@
 
 #elif defined(CONFIG_ALPHA_DP264) || \
       defined(CONFIG_ALPHA_LYNX)  || \
-      defined(CONFIG_ALPHA_SHARK) || \
-      defined(CONFIG_ALPHA_EIGER)
+      defined(CONFIG_ALPHA_SHARK)
 # define NR_IRQS	64
 
 #elif defined(CONFIG_ALPHA_TITAN)
 #define NR_IRQS		80
 
 #elif defined(CONFIG_ALPHA_RAWHIDE) || \
-	defined(CONFIG_ALPHA_TAKARA)
+      defined(CONFIG_ALPHA_TAKARA) || \
+      defined(CONFIG_ALPHA_EIGER)
 # define NR_IRQS	128
 
 #elif defined(CONFIG_ALPHA_WILDFIRE)
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index d73dc47..188fc92 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -78,7 +78,7 @@
 /* Macro for exception fixup code to access integer registers.  */
 #define dpf_reg(r)							\
 	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 :	\
-				 (r) <= 18 ? (r)+8 : (r)-10])
+				 (r) <= 18 ? (r)+10 : (r)-10])
 
 asmlinkage void
 do_page_fault(unsigned long address, unsigned long mmcsr,
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index a90c4f1..ac69f30 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -26,6 +26,7 @@
 	select GENERIC_IRQ_SHOW
 	select GENERIC_PCI_IOMAP
 	select GENERIC_PENDING_IRQ if SMP
+	select GENERIC_SCHED_CLOCK
 	select GENERIC_SMP_IDLE_THREAD
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_TRACEHOOK
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 8da87fe..99e6d89 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -340,7 +340,7 @@
 /*
  * __ffs: Similar to ffs, but zero based (0-31)
  */
-static inline __attribute__ ((const)) int __ffs(unsigned long word)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
 {
 	if (!word)
 		return word;
@@ -400,9 +400,9 @@
 /*
  * __ffs: Similar to ffs, but zero based (0-31)
  */
-static inline __attribute__ ((const)) int __ffs(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
 {
-	int n;
+	unsigned long n;
 
 	asm volatile(
 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index ff7d323..db681cf 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -52,6 +52,17 @@
 #define cache_line_size()	SMP_CACHE_BYTES
 #define ARCH_DMA_MINALIGN	SMP_CACHE_BYTES
 
+/*
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
+ * alignment for any atomic64_t embedded in buffer.
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
+ * value of 4 (and not 8) in ARC ABI.
+ */
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
+#define ARCH_SLAB_MINALIGN	8
+#endif
+
 extern void arc_cache_init(void);
 extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
 extern void read_decode_cache_bcr(void);
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 9185541..6958545 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -103,7 +103,8 @@
 
 	/* counts condition */
 	[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
+	/* All jump instructions that are taken */
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
 	[PERF_COUNT_ARC_BPOK]         = "bpok",	  /* NP-NT, PT-T, PNT-NT */
 #ifdef CONFIG_ISA_ARCV2
 	[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 8b90d25..1f945d0 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -17,6 +17,7 @@
 #include <asm/entry.h>
 #include <asm/arcregs.h>
 #include <asm/cache.h>
+#include <asm/irqflags.h>
 
 .macro CPU_EARLY_SETUP
 
@@ -47,6 +48,15 @@
 	sr	r5, [ARC_REG_DC_CTRL]
 
 1:
+
+#ifdef CONFIG_ISA_ARCV2
+	; Unaligned access is disabled at reset, so re-enable early as
+	; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
+	; by default
+	lr	r5, [status32]
+	bset	r5, r5, STATUS_AD_BIT
+	kflag	r5
+#endif
 .endm
 
 	.section .init.text, "ax",@progbits
@@ -93,9 +103,9 @@
 #ifdef CONFIG_ARC_UBOOT_SUPPORT
 	; Uboot - kernel ABI
 	;    r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
-	;    r1 = magic number (board identity, unused as of now
+	;    r1 = magic number (always zero as of now)
 	;    r2 = pointer to uboot provided cmdline or external DTB in mem
-	; These are handled later in setup_arch()
+	; These are handled later in handle_uboot_args()
 	st	r0, [@uboot_tag]
 	st	r2, [@uboot_arg]
 #endif
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index b2cae79..62a30e5 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -449,43 +449,80 @@
 	arc_chk_core_config();
 }
 
-static inline int is_kernel(unsigned long addr)
+static inline bool uboot_arg_invalid(unsigned long addr)
 {
-	if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
-		return 1;
-	return 0;
+	/*
+	 * Check that it is a untranslated address (although MMU is not enabled
+	 * yet, it being a high address ensures this is not by fluke)
+	 */
+	if (addr < PAGE_OFFSET)
+		return true;
+
+	/* Check that address doesn't clobber resident kernel image */
+	return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
+}
+
+#define IGNORE_ARGS		"Ignore U-boot args: "
+
+/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
+#define UBOOT_TAG_NONE		0
+#define UBOOT_TAG_CMDLINE	1
+#define UBOOT_TAG_DTB		2
+
+void __init handle_uboot_args(void)
+{
+	bool use_embedded_dtb = true;
+	bool append_cmdline = false;
+
+#ifdef CONFIG_ARC_UBOOT_SUPPORT
+	/* check that we know this tag */
+	if (uboot_tag != UBOOT_TAG_NONE &&
+	    uboot_tag != UBOOT_TAG_CMDLINE &&
+	    uboot_tag != UBOOT_TAG_DTB) {
+		pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
+		goto ignore_uboot_args;
+	}
+
+	if (uboot_tag != UBOOT_TAG_NONE &&
+            uboot_arg_invalid((unsigned long)uboot_arg)) {
+		pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
+		goto ignore_uboot_args;
+	}
+
+	/* see if U-boot passed an external Device Tree blob */
+	if (uboot_tag == UBOOT_TAG_DTB) {
+		machine_desc = setup_machine_fdt((void *)uboot_arg);
+
+		/* external Device Tree blob is invalid - use embedded one */
+		use_embedded_dtb = !machine_desc;
+	}
+
+	if (uboot_tag == UBOOT_TAG_CMDLINE)
+		append_cmdline = true;
+
+ignore_uboot_args:
+#endif
+
+	if (use_embedded_dtb) {
+		machine_desc = setup_machine_fdt(__dtb_start);
+		if (!machine_desc)
+			panic("Embedded DT invalid\n");
+	}
+
+	/*
+	 * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
+	 * append processing can only happen after.
+	 */
+	if (append_cmdline) {
+		/* Ensure a whitespace between the 2 cmdlines */
+		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+		strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
+	}
 }
 
 void __init setup_arch(char **cmdline_p)
 {
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
-	/* make sure that uboot passed pointer to cmdline/dtb is valid */
-	if (uboot_tag && is_kernel((unsigned long)uboot_arg))
-		panic("Invalid uboot arg\n");
-
-	/* See if u-boot passed an external Device Tree blob */
-	machine_desc = setup_machine_fdt(uboot_arg);	/* uboot_tag == 2 */
-	if (!machine_desc)
-#endif
-	{
-		/* No, so try the embedded one */
-		machine_desc = setup_machine_fdt(__dtb_start);
-		if (!machine_desc)
-			panic("Embedded DT invalid\n");
-
-		/*
-		 * If we are here, it is established that @uboot_arg didn't
-		 * point to DT blob. Instead if u-boot says it is cmdline,
-		 * append to embedded DT cmdline.
-		 * setup_machine_fdt() would have populated @boot_command_line
-		 */
-		if (uboot_tag == 1) {
-			/* Ensure a whitespace between the 2 cmdlines */
-			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
-			strlcat(boot_command_line, uboot_arg,
-				COMMAND_LINE_SIZE);
-		}
-	}
+	handle_uboot_args();
 
 	/* Save unparsed command line copy for /proc/cmdline */
 	*cmdline_p = boot_command_line;
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index e8d9fb4..5c66633 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,6 +18,8 @@
 #include <asm/arcregs.h>
 #include <asm/irqflags.h>
 
+#define ARC_PATH_MAX	256
+
 /*
  * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
  *   -Prints 3 regs per line and a CR.
@@ -58,11 +60,12 @@
 	print_reg_file(&(cregs->r13), 13);
 }
 
-static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+static void print_task_path_n_nm(struct task_struct *tsk)
 {
 	char *path_nm = NULL;
 	struct mm_struct *mm;
 	struct file *exe_file;
+	char buf[ARC_PATH_MAX];
 
 	mm = get_task_mm(tsk);
 	if (!mm)
@@ -72,7 +75,7 @@
 	mmput(mm);
 
 	if (exe_file) {
-		path_nm = file_path(exe_file, buf, 255);
+		path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
 		fput(exe_file);
 	}
 
@@ -80,10 +83,9 @@
 	pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
 }
 
-static void show_faulting_vma(unsigned long address, char *buf)
+static void show_faulting_vma(unsigned long address)
 {
 	struct vm_area_struct *vma;
-	char *nm = buf;
 	struct mm_struct *active_mm = current->active_mm;
 
 	/* can't use print_vma_addr() yet as it doesn't check for
@@ -96,8 +98,11 @@
 	 * if the container VMA is not found
 	 */
 	if (vma && (vma->vm_start <= address)) {
+		char buf[ARC_PATH_MAX];
+		char *nm = "?";
+
 		if (vma->vm_file) {
-			nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+			nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
 			if (IS_ERR(nm))
 				nm = "?";
 		}
@@ -173,13 +178,8 @@
 {
 	struct task_struct *tsk = current;
 	struct callee_regs *cregs;
-	char *buf;
 
-	buf = (char *)__get_free_page(GFP_KERNEL);
-	if (!buf)
-		return;
-
-	print_task_path_n_nm(tsk, buf);
+	print_task_path_n_nm(tsk);
 	show_regs_print_info(KERN_INFO);
 
 	show_ecr_verbose(regs);
@@ -189,7 +189,7 @@
 		(void *)regs->blink, (void *)regs->ret);
 
 	if (user_mode(regs))
-		show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+		show_faulting_vma(regs->ret); /* faulting code, not data */
 
 	pr_info("[STAT32]: 0x%08lx", regs->status32);
 
@@ -221,8 +221,6 @@
 	cregs = (struct callee_regs *)current->thread.callee_reg;
 	if (cregs)
 		show_callee_regs(cregs);
-
-	free_page((unsigned long)buf);
 }
 
 void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 62ad4bc..f230bb7 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -7,11 +7,39 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/cache.h>
 
-#undef PREALLOC_NOT_AVAIL
+/*
+ * The memset implementation below is optimized to use prefetchw and prealloc
+ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
+ * If you want to implement optimized memset for other possible L1 data cache
+ * line lengths (32B and 128B) you should rewrite code carefully checking
+ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
+ * don't belongs to memset area.
+ */
+
+#if L1_CACHE_SHIFT == 6
+
+.macro PREALLOC_INSTR	reg, off
+	prealloc	[\reg, \off]
+.endm
+
+.macro PREFETCHW_INSTR	reg, off
+	prefetchw	[\reg, \off]
+.endm
+
+#else
+
+.macro PREALLOC_INSTR
+.endm
+
+.macro PREFETCHW_INSTR
+.endm
+
+#endif
 
 ENTRY_CFI(memset)
-	prefetchw [r0]		; Prefetch the write location
+	PREFETCHW_INSTR	r0, 0	; Prefetch the first write location
 	mov.f	0, r2
 ;;; if size is zero
 	jz.d	[blink]
@@ -48,11 +76,8 @@
 
 	lpnz	@.Lset64bytes
 	;; LOOP START
-#ifdef PREALLOC_NOT_AVAIL
-	prefetchw [r3, 64]	;Prefetch the next write location
-#else
-	prealloc  [r3, 64]
-#endif
+	PREALLOC_INSTR	r3, 64	; alloc next line w/o fetching
+
 #ifdef CONFIG_ARC_HAS_LL64
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]
@@ -85,7 +110,6 @@
 	lsr.f	lp_count, r2, 5 ;Last remaining  max 124 bytes
 	lpnz	.Lset32bytes
 	;; LOOP START
-	prefetchw   [r3, 32]	;Prefetch the next write location
 #ifdef CONFIG_ARC_HAS_LL64
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index ba14506..f890b2f 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -138,7 +138,8 @@
 	 */
 
 	memblock_add_node(low_mem_start, low_mem_sz, 0);
-	memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
+	memblock_reserve(CONFIG_LINUX_LINK_BASE,
+			 __pa(_end) - CONFIG_LINUX_LINK_BASE);
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	if (initrd_start)
diff --git a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
index df12276..c2ece0b 100644
--- a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
@@ -13,7 +13,7 @@
 		bootargs = "console=ttyS4,115200 earlyprintk";
 	};
 
-	memory {
+	memory@80000000 {
 		reg = <0x80000000 0x40000000>;
 	};
 
diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
index 7a291de..22dade6 100644
--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
@@ -13,7 +13,7 @@
 		bootargs = "earlyprintk";
 	};
 
-	memory {
+	memory@80000000 {
 		reg = <0x80000000 0x20000000>;
 	};
 
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
index d598b63..024e52a 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
@@ -14,7 +14,7 @@
 		bootargs = "console=ttyS4,115200 earlyprintk";
 	};
 
-	memory {
+	memory@80000000 {
 		reg = <0x80000000 0x40000000>;
 	};
 
@@ -322,4 +322,3 @@
 &adc {
 	status = "okay";
 };
-
diff --git a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
index 43ed139..33d7045 100644
--- a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
@@ -17,7 +17,7 @@
 		bootargs = "console=ttyS4,115200 earlyprintk";
 	};
 
-	memory {
+	memory@80000000 {
 		reg = <0x80000000 0x20000000>;
 	};
 
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index f9b7579..016616c 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -94,6 +94,28 @@
 		regulator-boot-on;
 	};
 
+	baseboard_3v3: fixedregulator-3v3 {
+		/* TPS73701DCQ */
+		compatible = "regulator-fixed";
+		regulator-name = "baseboard_3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&vbat>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	baseboard_1v8: fixedregulator-1v8 {
+		/* TPS73701DCQ */
+		compatible = "regulator-fixed";
+		regulator-name = "baseboard_1v8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vbat>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
 	backlight_lcd: backlight-regulator {
 		compatible = "regulator-fixed";
 		regulator-name = "lcd_backlight_pwr";
@@ -105,7 +127,7 @@
 
 	sound {
 		compatible = "simple-audio-card";
-		simple-audio-card,name = "DA850/OMAP-L138 EVM";
+		simple-audio-card,name = "DA850-OMAPL138 EVM";
 		simple-audio-card,widgets =
 			"Line", "Line In",
 			"Line", "Line Out";
@@ -210,10 +232,9 @@
 
 		/* Regulators */
 		IOVDD-supply = <&vdcdc2_reg>;
-		/* Derived from VBAT: Baseboard 3.3V / 1.8V */
-		AVDD-supply = <&vbat>;
-		DRVDD-supply = <&vbat>;
-		DVDD-supply = <&vbat>;
+		AVDD-supply = <&baseboard_3v3>;
+		DRVDD-supply = <&baseboard_3v3>;
+		DVDD-supply = <&baseboard_1v8>;
 	};
 	tca6416: gpio@20 {
 		compatible = "ti,tca6416";
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index 0177e3e..3a2fa6e 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -39,9 +39,39 @@
 		};
 	};
 
+	vcc_5vd: fixedregulator-vcc_5vd {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_5vd";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+	};
+
+	vcc_3v3d: fixedregulator-vcc_3v3d {
+		/* TPS650250 - VDCDC1 */
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_3v3d";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&vcc_5vd>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	vcc_1v8d: fixedregulator-vcc_1v8d {
+		/* TPS650250 - VDCDC2 */
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_1v8d";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vcc_5vd>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
 	sound {
 		compatible = "simple-audio-card";
-		simple-audio-card,name = "DA850/OMAP-L138 LCDK";
+		simple-audio-card,name = "DA850-OMAPL138 LCDK";
 		simple-audio-card,widgets =
 			"Line", "Line In",
 			"Line", "Line Out";
@@ -221,6 +251,12 @@
 		compatible = "ti,tlv320aic3106";
 		reg = <0x18>;
 		status = "okay";
+
+		/* Regulators */
+		IOVDD-supply = <&vcc_3v3d>;
+		AVDD-supply = <&vcc_3v3d>;
+		DRVDD-supply = <&vcc_3v3d>;
+		DVDD-supply = <&vcc_1v8d>;
 	};
 };
 
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 47aa53b..559659b 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -476,7 +476,7 @@
 		clocksource: timer@20000 {
 			compatible = "ti,da830-timer";
 			reg = <0x20000 0x1000>;
-			interrupts = <12>, <13>;
+			interrupts = <21>, <22>;
 			interrupt-names = "tint12", "tint34";
 			clocks = <&pll0_auxclk>;
 		};
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 27a1ee2..94efca7 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -168,6 +168,9 @@
 			interrupt-controller;
 			#interrupt-cells = <3>;
 			interrupt-parent = <&gic>;
+			clock-names = "clkout8";
+			clocks = <&cmu CLK_FIN_PLL>;
+			#clock-cells = <1>;
 		};
 
 		mipi_phy: video-phy {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index a09e46c..00820d2 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -49,7 +49,7 @@
 	};
 
 	emmc_pwrseq: pwrseq {
-		pinctrl-0 = <&sd1_cd>;
+		pinctrl-0 = <&emmc_rstn>;
 		pinctrl-names = "default";
 		compatible = "mmc-pwrseq-emmc";
 		reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
@@ -161,12 +161,6 @@
 	cpu0-supply = <&buck2_reg>;
 };
 
-/* RSTN signal for eMMC */
-&sd1_cd {
-	samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-	samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-};
-
 &pinctrl_1 {
 	gpio_power_key: power_key {
 		samsung,pins = "gpx1-3";
@@ -184,6 +178,11 @@
 		samsung,pins = "gpx3-7";
 		samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
 	};
+
+	emmc_rstn: emmc-rstn {
+		samsung,pins = "gpk1-2";
+		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+	};
 };
 
 &ehci {
diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
index 2f4f408..27214e6 100644
--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
@@ -334,7 +334,7 @@
 			buck8_reg: BUCK8 {
 				regulator-name = "vdd_1.8v_ldo";
 				regulator-min-microvolt = <800000>;
-				regulator-max-microvolt = <1500000>;
+				regulator-max-microvolt = <2000000>;
 				regulator-always-on;
 				regulator-boot-on;
 			};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
index 03611d50..e84544b 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
@@ -26,8 +26,7 @@
 			"Speakers", "SPKL",
 			"Speakers", "SPKR";
 
-		assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
-				<&clock CLK_MOUT_EPLL>,
+		assigned-clocks = <&clock CLK_MOUT_EPLL>,
 				<&clock CLK_MOUT_MAU_EPLL>,
 				<&clock CLK_MOUT_USER_MAU_EPLL>,
 				<&clock_audss EXYNOS_MOUT_AUDSS>,
@@ -36,8 +35,7 @@
 				<&clock_audss EXYNOS_DOUT_AUD_BUS>,
 				<&clock_audss EXYNOS_DOUT_I2S>;
 
-		assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
-				<&clock CLK_FOUT_EPLL>,
+		assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
 				<&clock CLK_MOUT_EPLL>,
 				<&clock CLK_MOUT_MAU_EPLL>,
 				<&clock CLK_MAU_EPLL>,
@@ -48,7 +46,6 @@
 				<0>,
 				<0>,
 				<0>,
-				<0>,
 				<196608001>,
 				<(196608002 / 2)>,
 				<196608000>;
@@ -84,4 +81,6 @@
 
 &i2s0 {
 	status = "okay";
+	assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
+	assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
 };
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
index 4a30cc8..122174e 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
@@ -33,8 +33,7 @@
 		compatible = "samsung,odroid-xu3-audio";
 		model = "Odroid-XU4";
 
-		assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
-				<&clock CLK_MOUT_EPLL>,
+		assigned-clocks = <&clock CLK_MOUT_EPLL>,
 				<&clock CLK_MOUT_MAU_EPLL>,
 				<&clock CLK_MOUT_USER_MAU_EPLL>,
 				<&clock_audss EXYNOS_MOUT_AUDSS>,
@@ -43,8 +42,7 @@
 				<&clock_audss EXYNOS_DOUT_AUD_BUS>,
 				<&clock_audss EXYNOS_DOUT_I2S>;
 
-		assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
-				<&clock CLK_FOUT_EPLL>,
+		assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
 				<&clock CLK_MOUT_EPLL>,
 				<&clock CLK_MOUT_MAU_EPLL>,
 				<&clock CLK_MAU_EPLL>,
@@ -55,7 +53,6 @@
 				<0>,
 				<0>,
 				<0>,
-				<0>,
 				<196608001>,
 				<(196608002 / 2)>,
 				<196608000>;
@@ -79,6 +76,8 @@
 
 &i2s0 {
 	status = "okay";
+	assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
+	assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
 };
 
 &pwm {
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index 6f258b5..502a361 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -274,20 +274,16 @@
 				read-only;
 			};
 			/*
-			 * Between the boot loader and the rootfs is the kernel
-			 * in a custom Storlink format flashed from the boot
-			 * menu. The rootfs is in squashfs format.
+			 * This firmware image contains the kernel catenated
+			 * with the squashfs root filesystem. For some reason
+			 * this is called "upgrade" on the vendor system.
 			 */
-			partition@1800c0 {
-				label = "rootfs";
-				reg = <0x001800c0 0x01dbff40>;
-				read-only;
-			};
-			partition@1f40000 {
+			partition@40000 {
 				label = "upgrade";
-				reg = <0x01f40000 0x00040000>;
+				reg = <0x00040000 0x01f40000>;
 				read-only;
 			};
+			/* RGDB, Residental Gateway Database? */
 			partition@1f80000 {
 				label = "rgdb";
 				reg = <0x01f80000 0x00040000>;
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index 469cce2..6e80254 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -477,6 +477,15 @@
 };
 
 &gpio1 {
+	gpio-line-names = "", "", "", "",
+			  "", "", "", "",
+			  "", "hp-amp-shutdown-b", "", "",
+			  "", "", "", "",
+			  "", "", "", "",
+			  "", "", "", "",
+			  "", "", "", "",
+			  "", "", "", "";
+
 	unused-sd3-wp-gpio {
 		/*
 		 * See pinctrl_esdhc1 below for more details on this
@@ -501,9 +510,6 @@
 	hpa1: amp@60 {
 		compatible = "ti,tpa6130a2";
 		reg = <0x60>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_ampgpio>;
-		power-gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
 		Vdd-supply = <&reg_3p3v>;
 	};
 
@@ -677,7 +683,10 @@
 };
 
 &iomuxc {
-	pinctrl_ampgpio: ampgpiogrp {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hog>;
+
+	pinctrl_hog: hoggrp {
 		fsl,pins = <
 			MX51_PAD_GPIO1_9__GPIO1_9		0x5e
 		>;
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 844caa3..50083ce 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -462,7 +462,7 @@
 			};
 
 			gpt: gpt@2098000 {
-				compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
+				compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
 				reg = <0x02098000 0x4000>;
 				interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks IMX6SX_CLK_GPT_BUS>,
diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
index d8aac4a..177d21f 100644
--- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
+++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
@@ -86,13 +86,17 @@
 		compatible = "regulator-fixed";
 		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
-		clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
-		clock-names = "slow";
 		regulator-name = "reg_wlan";
 		startup-delay-us = <70000>;
 		gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
 		enable-active-high;
 	};
+
+	usdhc2_pwrseq: usdhc2_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+		clock-names = "ext_clock";
+	};
 };
 
 &adc1 {
@@ -375,6 +379,7 @@
 	bus-width = <4>;
 	non-removable;
 	vmmc-supply = <&reg_wlan>;
+	mmc-pwrseq = <&usdhc2_pwrseq>;
 	cap-power-off-card;
 	keep-power-in-suspend;
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index 21973eb..f27b384 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -100,6 +100,19 @@
 		regulator-min-microvolt = <1800000>;
 		regulator-max-microvolt = <1800000>;
 	};
+
+	usdhc2_pwrseq: usdhc2_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+		clock-names = "ext_clock";
+	};
+};
+
+&clks {
+	assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
+			  <&clks IMX7D_CLKO2_ROOT_DIV>;
+	assigned-clock-parents = <&clks IMX7D_CKIL>;
+	assigned-clock-rates = <0>, <32768>;
 };
 
 &i2c4 {
@@ -199,12 +212,13 @@
 
 &usdhc2 { /* Wifi SDIO */
 	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_usdhc2>;
+	pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>;
 	no-1-8-v;
 	non-removable;
 	keep-power-in-suspend;
 	wakeup-source;
 	vmmc-supply = <&reg_ap6212>;
+	mmc-pwrseq = <&usdhc2_pwrseq>;
 	status = "okay";
 };
 
@@ -301,6 +315,12 @@
 };
 
 &iomuxc_lpsr {
+	pinctrl_wifi_clk: wificlkgrp {
+		fsl,pins = <
+			MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2	0x7d
+		>;
+	};
+
 	pinctrl_wdog: wdoggrp {
 		fsl,pins = <
 			MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B	0x74
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index cbaf06f..eb91746 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -36,8 +36,8 @@
 		compatible = "gpio-fan";
 		pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
 		pinctrl-names = "default";
-		gpios = <&gpio1 14 GPIO_ACTIVE_LOW
-			 &gpio1 13 GPIO_ACTIVE_LOW>;
+		gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
+			 &gpio1 13 GPIO_ACTIVE_HIGH>;
 		gpio-fan,speed-map = <0    0
 				      3000 1
 				      6000 2>;
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 0d9faf1..a86b890 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -263,7 +263,7 @@
 			compatible = "amlogic,meson6-dwmac", "snps,dwmac";
 			reg = <0xc9410000 0x10000
 			       0xc1108108 0x4>;
-			interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
+			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "macirq";
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
index ef3177d3..8fdeeff 100644
--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
@@ -125,7 +125,6 @@
 		/* Realtek RTL8211F (0x001cc916) */
 		eth_phy: ethernet-phy@0 {
 			reg = <0>;
-			eee-broken-1000t;
 			interrupt-parent = <&gpio_intc>;
 			/* GPIOH_3 */
 			interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@@ -172,8 +171,7 @@
 		cap-sd-highspeed;
 		disable-wp;
 
-		cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-		cd-inverted;
+		cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
 		vmmc-supply = <&tflash_vdd>;
 		vqmmc-supply = <&tf_io>;
diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
index f585361..6ac02be 100644
--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
+++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
@@ -206,8 +206,7 @@
 		cap-sd-highspeed;
 		disable-wp;
 
-		cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-		cd-inverted;
+		cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
 		vmmc-supply = <&vcc_3v3>;
 	};
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index 766bbb8..47e5b63 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -220,12 +220,15 @@
 				status = "disabled";
 			};
 
-			twsi2: i2c@d4025000 {
+			twsi2: i2c@d4031000 {
 				compatible = "mrvl,mmp-twsi";
-				reg = <0xd4025000 0x1000>;
-				interrupts = <58>;
+				reg = <0xd4031000 0x1000>;
+				interrupt-parent = <&intcmux17>;
+				interrupts = <0>;
 				clocks = <&soc_clocks MMP2_CLK_TWSI1>;
 				resets = <&soc_clocks MMP2_CLK_TWSI1>;
+				#address-cells = <1>;
+				#size-cells = <0>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index ddc7a7b..f57acf8 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -105,7 +105,7 @@
 			interrupts-extended = <
 				&cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
 				&cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
-				&cpcap 48 1
+				&cpcap 48 0
 			>;
 			interrupt-names =
 				"id_ground", "id_float", "se0conn", "vbusvld",
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 0d9b853..e142e6c 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -370,6 +370,19 @@
 		compatible = "ti,omap2-onenand";
 		reg = <0 0 0x20000>;	/* CS0, offset 0, IO size 128K */
 
+		/*
+		 * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
+		 * bootloader set values when booted with v4.19 using both N950
+		 * and N9 devices (OneNAND Manufacturer: Samsung):
+		 *
+		 *   gpmc cs0 before gpmc_cs_program_settings:
+		 *   cs0 GPMC_CS_CONFIG1: 0xfd001202
+		 *   cs0 GPMC_CS_CONFIG2: 0x00181800
+		 *   cs0 GPMC_CS_CONFIG3: 0x00030300
+		 *   cs0 GPMC_CS_CONFIG4: 0x18001804
+		 *   cs0 GPMC_CS_CONFIG5: 0x03171d1d
+		 *   cs0 GPMC_CS_CONFIG6: 0x97080000
+		 */
 		gpmc,sync-read;
 		gpmc,sync-write;
 		gpmc,burst-length = <16>;
@@ -379,26 +392,27 @@
 		gpmc,device-width = <2>;
 		gpmc,mux-add-data = <2>;
 		gpmc,cs-on-ns = <0>;
-		gpmc,cs-rd-off-ns = <87>;
-		gpmc,cs-wr-off-ns = <87>;
+		gpmc,cs-rd-off-ns = <122>;
+		gpmc,cs-wr-off-ns = <122>;
 		gpmc,adv-on-ns = <0>;
-		gpmc,adv-rd-off-ns = <10>;
-		gpmc,adv-wr-off-ns = <10>;
-		gpmc,oe-on-ns = <15>;
-		gpmc,oe-off-ns = <87>;
+		gpmc,adv-rd-off-ns = <15>;
+		gpmc,adv-wr-off-ns = <15>;
+		gpmc,oe-on-ns = <20>;
+		gpmc,oe-off-ns = <122>;
 		gpmc,we-on-ns = <0>;
-		gpmc,we-off-ns = <87>;
-		gpmc,rd-cycle-ns = <112>;
-		gpmc,wr-cycle-ns = <112>;
-		gpmc,access-ns = <81>;
+		gpmc,we-off-ns = <122>;
+		gpmc,rd-cycle-ns = <148>;
+		gpmc,wr-cycle-ns = <148>;
+		gpmc,access-ns = <117>;
 		gpmc,page-burst-access-ns = <15>;
 		gpmc,bus-turnaround-ns = <0>;
 		gpmc,cycle2cycle-delay-ns = <0>;
 		gpmc,wait-monitoring-ns = <0>;
-		gpmc,clk-activation-ns = <5>;
-		gpmc,wr-data-mux-bus-ns = <30>;
-		gpmc,wr-access-ns = <81>;
-		gpmc,sync-clk-ps = <15000>;
+		gpmc,clk-activation-ns = <10>;
+		gpmc,wr-data-mux-bus-ns = <40>;
+		gpmc,wr-access-ns = <117>;
+
+		gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
 
 		/*
 		 * MTD partition table corresponding to Nokia's MeeGo 1.2
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 490726b..9dc7ec7 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -33,6 +33,7 @@
 		gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;  /* gpio line 48 */
 		enable-active-high;
 		regulator-boot-on;
+		startup-delay-us = <25000>;
 	};
 
 	vbat: fixedregulator-vbat {
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index ab6f640..8b8db9d 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -317,7 +317,8 @@
 
 	palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
 		pinctrl-single,pins = <
-			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+			/* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
 		>;
 	};
 
@@ -385,7 +386,8 @@
 
 	palmas: palmas@48 {
 		compatible = "ti,palmas";
-		interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
+		/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
 		reg = <0x48>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
@@ -651,7 +653,8 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&twl6040_pins>;
 
-		interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
+		/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+		interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
 
 		/* audpwron gpio defined in the board specific dts */
 
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index 5e21fb4..e78d371 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -181,6 +181,13 @@
 			OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6)  /* llib_wakereqin.gpio1_wk15 */
 		>;
 	};
+
+	palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+		pinctrl-single,pins = <
+			/* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
+		>;
+	};
 };
 
 &omap5_pmx_core {
@@ -414,8 +421,11 @@
 
 	palmas: palmas@48 {
 		compatible = "ti,palmas";
-		interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
 		reg = <0x48>;
+		pinctrl-0 = <&palmas_sys_nirq_pins>;
+		pinctrl-names = "default";
+		/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
 		ti,system-power-controller;
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index c7ce415..f250b20 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -309,8 +309,8 @@
 
 &reg_dldo3 {
 	regulator-always-on;
-	regulator-min-microvolt = <2500000>;
-	regulator-max-microvolt = <2500000>;
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
 	regulator-name = "vcc-pd";
 };
 
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index 5d23667..25540b7 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -53,7 +53,7 @@
 
 	aliases {
 		serial0 = &uart0;
-		/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+		ethernet0 = &emac;
 		ethernet1 = &sdiowifi;
 	};
 
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index b17ee03..88286dd 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -467,6 +467,17 @@
 #endif
 	.endm
 
+	.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+	sub	\tmp, \limit, #1
+	subs	\tmp, \tmp, \addr	@ tmp = limit - 1 - addr
+	addhs	\tmp, \tmp, #1		@ if (tmp >= 0) {
+	subhss	\tmp, \tmp, \size	@ tmp = limit - (addr + size) }
+	movlo	\addr, #0		@ if (tmp < 0) addr = NULL
+	csdb
+#endif
+	.endm
+
 	.macro	uaccess_disable, tmp, isb=1
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
 	/*
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 0d28924..775cac3 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -111,6 +111,7 @@
 #include <linux/kernel.h>
 
 extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
 
 #ifdef CONFIG_CPU_CP15
 #define read_cpuid(reg)							\
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 92fd2c8..12659ce 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -10,7 +10,7 @@
 #ifndef _ASM_PGTABLE_2LEVEL_H
 #define _ASM_PGTABLE_2LEVEL_H
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 /*
  * Hardware-wise, we have a two level page table structure, where the first
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index e25f439..e1b6f28 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -23,7 +23,7 @@
 /*
  * Don't change this structure - ASM code relies on it.
  */
-extern struct processor {
+struct processor {
 	/* MISC
 	 * get data abort address/flags
 	 */
@@ -79,9 +79,13 @@
 	unsigned int suspend_size;
 	void (*do_suspend)(void *);
 	void (*do_resume)(void *);
-} processor;
+};
 
 #ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
 extern void cpu_proc_init(void);
 extern void cpu_proc_fin(void);
 extern int cpu_do_idle(void);
@@ -98,17 +102,50 @@
 extern void cpu_do_suspend(void *);
 extern void cpu_do_resume(void *);
 #else
-#define cpu_proc_init			processor._proc_init
-#define cpu_proc_fin			processor._proc_fin
-#define cpu_reset			processor.reset
-#define cpu_do_idle			processor._do_idle
-#define cpu_dcache_clean_area		processor.dcache_clean_area
-#define cpu_set_pte_ext			processor.set_pte_ext
-#define cpu_do_switch_mm		processor.switch_mm
 
-/* These three are private to arch/arm/kernel/suspend.c */
-#define cpu_do_suspend			processor.do_suspend
-#define cpu_do_resume			processor.do_resume
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised.  We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE().  We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f)			cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f)			cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+	unsigned int cpu = smp_processor_id();
+	*cpu_vtable[cpu] = *p;
+	WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+		     cpu_vtable[0]->dcache_clean_area);
+	WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+		     cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f)			processor.f
+#define PROC_TABLE(f)			processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+	processor = *p;
+}
+#endif
+
+#define cpu_proc_init			PROC_VTABLE(_proc_init)
+#define cpu_check_bugs			PROC_VTABLE(check_bugs)
+#define cpu_proc_fin			PROC_VTABLE(_proc_fin)
+#define cpu_reset			PROC_VTABLE(reset)
+#define cpu_do_idle			PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area		PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext			PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm		PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend			PROC_VTABLE(do_suspend)
+#define cpu_do_resume			PROC_VTABLE(do_resume)
 #endif
 
 extern void cpu_resume(void);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 9b37b6a..8f55dc5 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -121,8 +121,8 @@
 struct user_vfp;
 struct user_vfp_exc;
 
-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
-					   struct user_vfp_exc __user *);
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
+					   struct user_vfp_exc *);
 extern int vfp_restore_user_hwstate(struct user_vfp *,
 				    struct user_vfp_exc *);
 #endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 5451e1f..c136eef 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -69,6 +69,14 @@
 static inline void set_fs(mm_segment_t fs)
 {
 	current_thread_info()->addr_limit = fs;
+
+	/*
+	 * Prevent a mispredicted conditional call to set_fs from forwarding
+	 * the wrong address limit to access_ok under speculation.
+	 */
+	dsb(nsh);
+	isb();
+
 	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
 }
 
@@ -92,6 +100,32 @@
 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 
 /*
+ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
+ * is above the current addr_limit.
+ */
+#define uaccess_mask_range_ptr(ptr, size)			\
+	((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
+static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
+						    size_t size)
+{
+	void __user *safe_ptr = (void __user *)ptr;
+	unsigned long tmp;
+
+	asm volatile(
+	"	sub	%1, %3, #1\n"
+	"	subs	%1, %1, %0\n"
+	"	addhs	%1, %1, #1\n"
+	"	subhss	%1, %1, %2\n"
+	"	movlo	%0, #0\n"
+	: "+r" (safe_ptr), "=&r" (tmp)
+	: "r" (size), "r" (current_thread_info()->addr_limit)
+	: "cc");
+
+	csdb();
+	return safe_ptr;
+}
+
+/*
  * Single-value transfer routines.  They automatically use the right
  * size if we just have the right pointer type.  Note that the functions
  * which read from user space (*get_*) need to take care not to leak
@@ -362,6 +396,14 @@
 	__pu_err;							\
 })
 
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1.1, all accessors need to include
+ * verification of the address space.
+ */
+#define __put_user(x, ptr) put_user(x, ptr)
+
+#else
 #define __put_user(x, ptr)						\
 ({									\
 	long __pu_err = 0;						\
@@ -369,12 +411,6 @@
 	__pu_err;							\
 })
 
-#define __put_user_error(x, ptr, err)					\
-({									\
-	__put_user_switch((x), (ptr), (err), __put_user_nocheck);	\
-	(void) 0;							\
-})
-
 #define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
 	do {								\
 		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
@@ -454,6 +490,7 @@
 	: "r" (x), "i" (-EFAULT)				\
 	: "cc")
 
+#endif /* !CONFIG_CPU_SPECTRE */
 
 #ifdef CONFIG_MMU
 extern unsigned long __must_check
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
index 7be5113..d41d359 100644
--- a/arch/arm/kernel/bugs.c
+++ b/arch/arm/kernel/bugs.c
@@ -6,8 +6,8 @@
 void check_other_bugs(void)
 {
 #ifdef MULTI_CPU
-	if (processor.check_bugs)
-		processor.check_bugs();
+	if (cpu_check_bugs)
+		cpu_check_bugs();
 #endif
 }
 
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 6e0375e..997b023 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -145,6 +145,9 @@
 #endif
 	.size	__mmap_switched_data, . - __mmap_switched_data
 
+	__FINIT
+	.text
+
 /*
  * This provides a C-API version of __lookup_processor_type
  */
@@ -156,9 +159,6 @@
 	ldmfd	sp!, {r4 - r6, r9, pc}
 ENDPROC(lookup_processor_type)
 
-	__FINIT
-	.text
-
 /*
  * Read processor ID register (CP#15, CR0), and look up in the linker-built
  * supported processor list.  Note that we can't use the absolute addresses
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 2bc3bae..632f129 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -124,6 +124,11 @@
 
 #ifdef MULTI_CPU
 struct processor processor __ro_after_init;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+struct processor *cpu_vtable[NR_CPUS] = {
+	[0] = &processor,
+};
+#endif
 #endif
 #ifdef MULTI_TLB
 struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -676,28 +681,33 @@
 }
 #endif
 
+/*
+ * locate processor in the list of supported processor types.  The linker
+ * builds this table for us from the entries in arch/arm/mm/proc-*.S
+ */
+struct proc_info_list *lookup_processor(u32 midr)
+{
+	struct proc_info_list *list = lookup_processor_type(midr);
+
+	if (!list) {
+		pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
+		       smp_processor_id(), midr);
+		while (1)
+		/* can't use cpu_relax() here as it may require MMU setup */;
+	}
+
+	return list;
+}
+
 static void __init setup_processor(void)
 {
-	struct proc_info_list *list;
-
-	/*
-	 * locate processor in the list of supported processor
-	 * types.  The linker builds this table for us from the
-	 * entries in arch/arm/mm/proc-*.S
-	 */
-	list = lookup_processor_type(read_cpuid_id());
-	if (!list) {
-		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
-		       read_cpuid_id());
-		while (1);
-	}
+	unsigned int midr = read_cpuid_id();
+	struct proc_info_list *list = lookup_processor(midr);
 
 	cpu_name = list->cpu_name;
 	__cpu_architecture = __get_cpu_architecture();
 
-#ifdef MULTI_CPU
-	processor = *list->proc;
-#endif
+	init_proc_vtable(list->proc);
 #ifdef MULTI_TLB
 	cpu_tlb = *list->tlb;
 #endif
@@ -709,7 +719,7 @@
 #endif
 
 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
-		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+		list->cpu_name, midr, midr & 15,
 		proc_arch[cpu_architecture()], get_cr());
 
 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index b8f766c..b908382 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -77,8 +77,6 @@
 		kframe->magic = IWMMXT_MAGIC;
 		kframe->size = IWMMXT_STORAGE_SIZE;
 		iwmmxt_task_copy(current_thread_info(), &kframe->storage);
-
-		err = __copy_to_user(frame, kframe, sizeof(*frame));
 	} else {
 		/*
 		 * For bug-compatibility with older kernels, some space
@@ -86,10 +84,14 @@
 		 * Set the magic and size appropriately so that properly
 		 * written userspace can skip it reliably:
 		 */
-		__put_user_error(DUMMY_MAGIC, &frame->magic, err);
-		__put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
+		*kframe = (struct iwmmxt_sigframe) {
+			.magic = DUMMY_MAGIC,
+			.size  = IWMMXT_STORAGE_SIZE,
+		};
 	}
 
+	err = __copy_to_user(frame, kframe, sizeof(*kframe));
+
 	return err;
 }
 
@@ -135,17 +137,18 @@
 
 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
 {
-	const unsigned long magic = VFP_MAGIC;
-	const unsigned long size = VFP_STORAGE_SIZE;
+	struct vfp_sigframe kframe;
 	int err = 0;
 
-	__put_user_error(magic, &frame->magic, err);
-	__put_user_error(size, &frame->size, err);
+	memset(&kframe, 0, sizeof(kframe));
+	kframe.magic = VFP_MAGIC;
+	kframe.size = VFP_STORAGE_SIZE;
 
+	err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
 	if (err)
-		return -EFAULT;
+		return err;
 
-	return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
+	return __copy_to_user(frame, &kframe, sizeof(kframe));
 }
 
 static int restore_vfp_context(char __user **auxp)
@@ -288,30 +291,35 @@
 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
 {
 	struct aux_sigframe __user *aux;
+	struct sigcontext context;
 	int err = 0;
 
-	__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
-	__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
-	__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
-	__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
-	__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
-	__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
-	__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
-	__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
-	__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
-	__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
-	__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
-	__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
-	__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
-	__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
-	__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
-	__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
-	__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+	context = (struct sigcontext) {
+		.arm_r0        = regs->ARM_r0,
+		.arm_r1        = regs->ARM_r1,
+		.arm_r2        = regs->ARM_r2,
+		.arm_r3        = regs->ARM_r3,
+		.arm_r4        = regs->ARM_r4,
+		.arm_r5        = regs->ARM_r5,
+		.arm_r6        = regs->ARM_r6,
+		.arm_r7        = regs->ARM_r7,
+		.arm_r8        = regs->ARM_r8,
+		.arm_r9        = regs->ARM_r9,
+		.arm_r10       = regs->ARM_r10,
+		.arm_fp        = regs->ARM_fp,
+		.arm_ip        = regs->ARM_ip,
+		.arm_sp        = regs->ARM_sp,
+		.arm_lr        = regs->ARM_lr,
+		.arm_pc        = regs->ARM_pc,
+		.arm_cpsr      = regs->ARM_cpsr,
 
-	__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
-	__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
-	__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
-	__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+		.trap_no       = current->thread.trap_no,
+		.error_code    = current->thread.error_code,
+		.fault_address = current->thread.address,
+		.oldmask       = set->sig[0],
+	};
+
+	err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
 
 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
 
@@ -328,7 +336,7 @@
 	if (err == 0)
 		err |= preserve_vfp_context(&aux->vfp);
 #endif
-	__put_user_error(0, &aux->end_magic, err);
+	err |= __put_user(0, &aux->end_magic);
 
 	return err;
 }
@@ -491,7 +499,7 @@
 	/*
 	 * Set uc.uc_flags to a value which sc.trap_no would never have.
 	 */
-	__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
+	err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
 
 	err |= setup_sigframe(frame, regs, set);
 	if (err == 0)
@@ -511,8 +519,8 @@
 
 	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 
-	__put_user_error(0, &frame->sig.uc.uc_flags, err);
-	__put_user_error(NULL, &frame->sig.uc.uc_link, err);
+	err |= __put_user(0, &frame->sig.uc.uc_flags);
+	err |= __put_user(NULL, &frame->sig.uc.uc_link);
 
 	err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
 	err |= setup_sigframe(&frame->sig, regs, set);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 2a26e31..5bff527 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -42,6 +42,7 @@
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
+#include <asm/procinfo.h>
 #include <asm/processor.h>
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -102,6 +103,30 @@
 #endif
 }
 
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+	if (!cpu_vtable[cpu])
+		cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
+
+	return cpu_vtable[cpu] ? 0 : -ENOMEM;
+}
+
+static void secondary_biglittle_init(void)
+{
+	init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
+}
+#else
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+	return 0;
+}
+
+static void secondary_biglittle_init(void)
+{
+}
+#endif
+
 int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
 	int ret;
@@ -109,6 +134,10 @@
 	if (!smp_ops.smp_boot_secondary)
 		return -ENOSYS;
 
+	ret = secondary_biglittle_prepare(cpu);
+	if (ret)
+		return ret;
+
 	/*
 	 * We need to tell the secondary core where to find
 	 * its stack and the page tables.
@@ -359,6 +388,8 @@
 	struct mm_struct *mm = &init_mm;
 	unsigned int cpu;
 
+	secondary_biglittle_init();
+
 	/*
 	 * The identity mapping is uncached (strongly ordered), so
 	 * switch away from it before attempting any exclusive accesses.
@@ -708,6 +739,21 @@
 		pr_warn("SMP: failed to stop secondary CPUs\n");
 }
 
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
+ * kdump fails. So split out the panic_smp_self_stop() and add
+ * set_cpu_online(smp_processor_id(), false).
+ */
+void panic_smp_self_stop(void)
+{
+	pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
+	         smp_processor_id());
+	set_cpu_online(smp_processor_id(), false);
+	while (1)
+		cpu_relax();
+}
+
 /*
  * not supported here
  */
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index f0dd4b6..40da087 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -277,6 +277,7 @@
 				    int maxevents, int timeout)
 {
 	struct epoll_event *kbuf;
+	struct oabi_epoll_event e;
 	mm_segment_t fs;
 	long ret, err, i;
 
@@ -295,8 +296,11 @@
 	set_fs(fs);
 	err = 0;
 	for (i = 0; i < ret; i++) {
-		__put_user_error(kbuf[i].events, &events->events, err);
-		__put_user_error(kbuf[i].data,   &events->data,   err);
+		e.events = kbuf[i].events;
+		e.data = kbuf[i].data;
+		err = __copy_to_user(events, &e, sizeof(e));
+		if (err)
+			break;
 		events++;
 	}
 	kfree(kbuf);
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index a826df3..6709a8d 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -93,11 +93,7 @@
 #ifdef CONFIG_CPU_SPECTRE
 	get_thread_info r3
 	ldr	r3, [r3, #TI_ADDR_LIMIT]
-	adds	ip, r1, r2	@ ip=addr+size
-	sub	r3, r3, #1	@ addr_limit - 1
-	cmpcc	ip, r3		@ if (addr+size > addr_limit - 1)
-	movcs	r1, #0		@ addr = NULL
-	csdb
+	uaccess_mask_range_ptr r1, r2, r3, ip
 #endif
 
 #include "copy_template.S"
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index caf5019..970abe5 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -94,6 +94,11 @@
 
 ENTRY(__copy_to_user_std)
 WEAK(arm_copy_to_user)
+#ifdef CONFIG_CPU_SPECTRE
+	get_thread_info r3
+	ldr	r3, [r3, #TI_ADDR_LIMIT]
+	uaccess_mask_range_ptr r0, r2, r3, ip
+#endif
 
 #include "copy_template.S"
 
@@ -108,4 +113,3 @@
 	rsb	r0, r0, r2
 	copy_abort_end
 	.popsection
-
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 9b4ed17..73dc736 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -152,7 +152,8 @@
 		n = __copy_to_user_std(to, from, n);
 		uaccess_restore(ua_flags);
 	} else {
-		n = __copy_to_user_memcpy(to, from, n);
+		n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
+					  from, n);
 	}
 	return n;
 }
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394e..5e11ad3 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -83,7 +83,7 @@
 	} else /* remote PCI bus */
 		base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
 
-	return base + (where & 0xffc) + (devfn << 12);
+	return base + where + (devfn << 12);
 }
 
 static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 243a108..fd0053e 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -110,7 +110,7 @@
 	 * except for power up sw2iso which need to be
 	 * larger than LDO ramp up time.
 	 */
-	imx_gpc_set_arm_power_up_timing(2, 1);
+	imx_gpc_set_arm_power_up_timing(0xf, 1);
 	imx_gpc_set_arm_power_down_timing(1, 1);
 
 	return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index a109f648..0f916c2 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -393,7 +393,11 @@
 					      sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
 					      GFP_KERNEL);
 			chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
-			mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
+			mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
+						  "lm%x:00700", dev->id);
+			if (!lookup || !chipname || !mmciname)
+				return -ENOMEM;
+
 			lookup->dev_id = mmciname;
 			/*
 			 * Offsets on GPIO block 1:
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 3b73813..23e8c93 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -75,8 +75,7 @@
 /*
  * N2100 PCI.
  */
-static int __init
-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
 	int irq;
 
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index fc5fb77..17558be 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -50,6 +50,9 @@
 #define OMAP4_NR_BANKS		4
 #define OMAP4_NR_IRQS		128
 
+#define SYS_NIRQ1_EXT_SYS_IRQ_1	7
+#define SYS_NIRQ2_EXT_SYS_IRQ_2	119
+
 static void __iomem *wakeupgen_base;
 static void __iomem *sar_base;
 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
@@ -153,6 +156,37 @@
 	irq_chip_unmask_parent(d);
 }
 
+/*
+ * The sys_nirq pins bypass peripheral modules and are wired directly
+ * to MPUSS wakeupgen. They get automatically inverted for GIC.
+ */
+static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	bool inverted = false;
+
+	switch (type) {
+	case IRQ_TYPE_LEVEL_LOW:
+		type &= ~IRQ_TYPE_LEVEL_MASK;
+		type |= IRQ_TYPE_LEVEL_HIGH;
+		inverted = true;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		type &= ~IRQ_TYPE_EDGE_BOTH;
+		type |= IRQ_TYPE_EDGE_RISING;
+		inverted = true;
+		break;
+	default:
+		break;
+	}
+
+	if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
+	    d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
+		pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
+			d->hwirq);
+
+	return irq_chip_set_type_parent(d, type);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
 
@@ -446,7 +480,7 @@
 	.irq_mask		= wakeupgen_mask,
 	.irq_unmask		= wakeupgen_unmask,
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
-	.irq_set_type		= irq_chip_set_type_parent,
+	.irq_set_type		= wakeupgen_irq_set_type,
 	.flags			= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index cd65ea4..ec3789b 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2397,7 +2397,7 @@
  * a stub; implementing this properly requires iclk autoidle usecounting in
  * the clock code.   No return value.
  */
-static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
+static void _setup_iclk_autoidle(struct omap_hwmod *oh)
 {
 	struct omap_hwmod_ocp_if *os;
 
@@ -2428,7 +2428,7 @@
  * reset.  Returns 0 upon success or a negative error code upon
  * failure.
  */
-static int __init _setup_reset(struct omap_hwmod *oh)
+static int _setup_reset(struct omap_hwmod *oh)
 {
 	int r;
 
@@ -2489,7 +2489,7 @@
  *
  * No return value.
  */
-static void __init _setup_postsetup(struct omap_hwmod *oh)
+static void _setup_postsetup(struct omap_hwmod *oh)
 {
 	u8 postsetup_state;
 
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index c5c0ab8..024c1fb 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -558,7 +558,7 @@
 	.exit		= cm_x300_u2d_exit,
 };
 
-static void cm_x300_init_u2d(void)
+static void __init cm_x300_init_u2d(void)
 {
 	pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
 }
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 9e132b3..9960ea1 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -184,7 +184,7 @@
 	.lcd_conn		= LCD_COLOR_TFT_16BPP,
 };
 
-static void littleton_init_lcd(void)
+static void __init littleton_init_lcd(void)
 {
 	pxa_set_fb_info(NULL, &littleton_lcd_info);
 }
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index e385179..68a536d 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -559,7 +559,7 @@
 	.flags		= ENABLE_PORT_ALL | POWER_SENSE_LOW,
 };
 
-static void zeus_register_ohci(void)
+static void __init zeus_register_ohci(void)
 {
 	/* Port 2 is shared between host and client interface. */
 	UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
index 028e50c..a32c3b6 100644
--- a/arch/arm/mach-tango/pm.c
+++ b/arch/arm/mach-tango/pm.c
@@ -3,6 +3,7 @@
 #include <linux/suspend.h>
 #include <asm/suspend.h>
 #include "smc.h"
+#include "pm.h"
 
 static int tango_pm_powerdown(unsigned long arg)
 {
@@ -24,10 +25,7 @@
 	.valid = suspend_valid_only_mem,
 };
 
-static int __init tango_pm_init(void)
+void __init tango_pm_init(void)
 {
 	suspend_set_ops(&tango_pm_ops);
-	return 0;
 }
-
-late_initcall(tango_pm_init);
diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
new file mode 100644
index 0000000..35ea705
--- /dev/null
+++ b/arch/arm/mach-tango/pm.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CONFIG_SUSPEND
+void __init tango_pm_init(void);
+#else
+#define tango_pm_init NULL
+#endif
diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
index 677dd7b..824f907 100644
--- a/arch/arm/mach-tango/setup.c
+++ b/arch/arm/mach-tango/setup.c
@@ -2,6 +2,7 @@
 #include <asm/mach/arch.h>
 #include <asm/hardware/cache-l2x0.h>
 #include "smc.h"
+#include "pm.h"
 
 static void tango_l2c_write(unsigned long val, unsigned int reg)
 {
@@ -15,4 +16,5 @@
 	.dt_compat	= tango_dt_compat,
 	.l2c_aux_mask	= ~0,
 	.l2c_write_sec	= tango_l2c_write,
+	.init_late	= tango_pm_init,
 MACHINE_END
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 81d0efb..5461d58 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -274,6 +274,13 @@
 	.endm
 
 .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
+/*
+ * If we are building for big.Little with branch predictor hardening,
+ * we need the processor function tables to remain available after boot.
+ */
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+	.section ".rodata"
+#endif
 	.type	\name\()_processor_functions, #object
 	.align 2
 ENTRY(\name\()_processor_functions)
@@ -309,6 +316,9 @@
 	.endif
 
 	.size	\name\()_processor_functions, . - \name\()_processor_functions
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+	.previous
+#endif
 .endm
 
 .macro define_cache_functions name:req
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index 5544b82..9a07916 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -52,8 +52,6 @@
 	case ARM_CPU_PART_CORTEX_A17:
 	case ARM_CPU_PART_CORTEX_A73:
 	case ARM_CPU_PART_CORTEX_A75:
-		if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
-			goto bl_error;
 		per_cpu(harden_branch_predictor_fn, cpu) =
 			harden_branch_predictor_bpiall;
 		spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@
 
 	case ARM_CPU_PART_CORTEX_A15:
 	case ARM_CPU_PART_BRAHMA_B15:
-		if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
-			goto bl_error;
 		per_cpu(harden_branch_predictor_fn, cpu) =
 			harden_branch_predictor_iciallu;
 		spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@
 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 			if ((int)res.a0 != 0)
 				break;
-			if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
-				goto bl_error;
 			per_cpu(harden_branch_predictor_fn, cpu) =
 				call_hvc_arch_workaround_1;
-			processor.switch_mm = cpu_v7_hvc_switch_mm;
+			cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
 			spectre_v2_method = "hypervisor";
 			break;
 
@@ -101,11 +95,9 @@
 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 			if ((int)res.a0 != 0)
 				break;
-			if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
-				goto bl_error;
 			per_cpu(harden_branch_predictor_fn, cpu) =
 				call_smc_arch_workaround_1;
-			processor.switch_mm = cpu_v7_smc_switch_mm;
+			cpu_do_switch_mm = cpu_v7_smc_switch_mm;
 			spectre_v2_method = "firmware";
 			break;
 
@@ -119,11 +111,6 @@
 	if (spectre_v2_method)
 		pr_info("CPU%u: Spectre v2: using %s workaround\n",
 			smp_processor_id(), spectre_v2_method);
-	return;
-
-bl_error:
-	pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
-		cpu);
 }
 #else
 static void cpu_v7_spectre_init(void)
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ed36dca..f519199 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -190,8 +190,6 @@
 	if (ssp == NULL)
 		return -ENODEV;
 
-	iounmap(ssp->mmio_base);
-
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	release_mem_region(res->start, resource_size(res));
 
@@ -201,7 +199,6 @@
 	list_del(&ssp->node);
 	mutex_unlock(&ssp_lock);
 
-	kfree(ssp);
 	return 0;
 }
 
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index 2c118a6..0dc23fc 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -247,7 +247,7 @@
 	}
 
 	/* Copy arch-dep-instance from template. */
-	memcpy(code, (unsigned char *)optprobe_template_entry,
+	memcpy(code, (unsigned long *)&optprobe_template_entry,
 			TMPL_END_IDX * sizeof(kprobe_opcode_t));
 
 	/* Adjust buffer according to instruction. */
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index dc7e6b5..66c5e69 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -553,12 +553,11 @@
  * Save the current VFP state into the provided structures and prepare
  * for entry into a new function (signal handler).
  */
-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
-				    struct user_vfp_exc __user *ufp_exc)
+int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
+				    struct user_vfp_exc *ufp_exc)
 {
 	struct thread_info *thread = current_thread_info();
 	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
-	int err = 0;
 
 	/* Ensure that the saved hwstate is up-to-date. */
 	vfp_sync_hwstate(thread);
@@ -567,22 +566,19 @@
 	 * Copy the floating point registers. There can be unused
 	 * registers see asm/hwcap.h for details.
 	 */
-	err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
-			      sizeof(hwstate->fpregs));
+	memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
+
 	/*
 	 * Copy the status and control register.
 	 */
-	__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
+	ufp->fpscr = hwstate->fpscr;
 
 	/*
 	 * Copy the exception registers.
 	 */
-	__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
-	__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
-	__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
-
-	if (err)
-		return -EFAULT;
+	ufp_exc->fpexc = hwstate->fpexc;
+	ufp_exc->fpinst = hwstate->fpinst;
+	ufp_exc->fpinst2 = hwstate->fpinst2;
 
 	/* Ensure that VFP is disabled. */
 	vfp_flush_hwstate(thread);
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 60f3dd7..1e35170 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -18,7 +18,7 @@
 # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
 # for relative relocs, since this leads to better Image compression
 # with the relocation offsets always being zero.
-LDFLAGS_vmlinux		+= -pie -shared -Bsymbolic \
+LDFLAGS_vmlinux		+= -shared -Bsymbolic -z notext -z norelro \
 			$(call ld-option, --no-apply-dynamic-relocs)
 endif
 
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index f4964be..e80a792 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -118,6 +118,7 @@
 		reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
 		clocks = <&pmic>;
 		clock-names = "ext_clock";
+		post-power-on-delay-ms = <10>;
 		power-off-delay-us = <10>;
 	};
 
@@ -300,7 +301,6 @@
 
 		dwmmc_0: dwmmc0@f723d000 {
 			cap-mmc-highspeed;
-			mmc-hs200-1_8v;
 			non-removable;
 			bus-width = <0x8>;
 			vmmc-supply = <&ldo19>;
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 176e38d..ec0da5b 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -27,6 +27,23 @@
 		method = "smc";
 	};
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/*
+		 * This area matches the mapping done with a
+		 * mainline U-Boot, and should be updated by the
+		 * bootloader.
+		 */
+
+		psci-area@4000000 {
+			reg = <0x0 0x4000000 0x0 0x200000>;
+			no-map;
+		};
+	};
+
 	ap806 {
 		#address-cells = <2>;
 		#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index a747b7b..387be39 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -17,8 +17,13 @@
 	model = "MediaTek MT7622 RFB1 board";
 	compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
 
+	aliases {
+		serial0 = &uart0;
+	};
+
 	chosen {
-		bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+		stdout-path = "serial0:115200n8";
+		bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
 	};
 
 	cpus {
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 8ffb521..a38031f 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -14,15 +14,19 @@
 		kona-rumi-overlay.dtbo \
 		kona-qrd-overlay.dtbo
 
-kona-cdp-overlay.dtbo-base := kona.dtb
-kona-mtp-overlay.dtbo-base := kona.dtb
-kona-rumi-overlay.dtbo-base := kona.dtb
-kona-qrd-overlay.dtbo-base := kona.dtb
+kona-cdp-overlay.dtbo-base := kona.dtb kona-v2.dtb
+kona-mtp-overlay.dtbo-base := kona.dtb kona-v2.dtb
+kona-rumi-overlay.dtbo-base := kona.dtb kona-v2.dtb
+kona-qrd-overlay.dtbo-base := kona.dtb kona-v2.dtb
 else
 dtb-$(CONFIG_ARCH_KONA)	+= kona-rumi.dtb \
 	kona-mtp.dtb \
 	kona-cdp.dtb \
-	kona-qrd.dtb
+	kona-qrd.dtb \
+	kona-v2-rumi.dtb \
+	kona-v2-mtp.dtb \
+	kona-v2-cdp.dtb \
+	kona-v2-qrd.dtb
 endif
 
 ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
index 71b39de..da87cc9 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 &mdss_mdp {
@@ -9,9 +9,9 @@
 				"nt35695b truly fhd command mode dsi panel";
 		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
 
-		qcom,dsi-ctrl-num = <1>;
-		qcom,dsi-phy-num = <1>;
-		qcom,dsi-select-clocks = "src_byte_clk1", "src_pixel_clk1";
+		qcom,dsi-ctrl-num = <0>;
+		qcom,dsi-phy-num = <0>;
+		qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0";
 
 		qcom,mdss-dsi-virtual-channel-id = <0>;
 		qcom,mdss-dsi-stream = <0>;
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-dsc375-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-dsc375-cmd.dtsi
index 249279f..748be1b 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-dsc375-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-dsc375-cmd.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 &mdss_mdp {
@@ -275,6 +275,58 @@
 				qcom,mdss-dsc-bit-per-pixel = <8>;
 				qcom,mdss-dsc-block-prediction-enable;
 			};
+
+			timing@2 {
+				qcom,mdss-dsi-panel-width = <2520>;
+				qcom,mdss-dsi-panel-height = <2160>;
+				qcom,mdss-dsi-h-front-porch = <30>;
+				qcom,mdss-dsi-h-back-porch = <100>;
+				qcom,mdss-dsi-h-pulse-width = <4>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <7>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-panel-framerate = <120>;
+
+				qcom,mdss-dsi-on-command = [
+					39 01 00 00 00 00 11 91 09 20 00 20 02
+					00 03 1c 04 21 00
+					0f 03 19 01 97
+					39 01 00 00 00 00 03 92 10 f0
+					15 01 00 00 00 00 02 90 03
+					15 01 00 00 00 00 02 03 01
+					39 01 00 00 00 00 06 f0 55 aa 52 08 04
+					15 01 00 00 00 00 02 c0 03
+					39 01 00 00 00 00 06 f0 55 aa 52 08 07
+					15 01 00 00 00 00 02 ef 01
+					39 01 00 00 00 00 06 f0 55 aa 52 08 00
+					15 01 00 00 00 00 02 b4 01
+					15 01 00 00 00 00 02 35 00
+					39 01 00 00 00 00 06 f0 55 aa 52 08 01
+					39 01 00 00 00 00 05 ff aa 55 a5 80
+					15 01 00 00 00 00 02 6f 01
+					15 01 00 00 00 00 02 f3 10
+					39 01 00 00 00 00 05 ff aa 55 a5 00
+					/* sleep out + delay 120ms */
+					05 01 00 00 78 00 01 11
+					/* display on + delay 120ms */
+					05 01 00 00 78 00 01 29
+					];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 78 00 02 28 00
+					 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <1080>;
+				qcom,mdss-dsc-slice-width = <1260>;
+				qcom,mdss-dsc-slice-per-pkt = <2>;
+				qcom,mdss-dsc-bit-per-component = <10>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi
index 3fc0c8d..9e555b4 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi
@@ -83,7 +83,7 @@
 				  39 01 00 00 00 00 06 b9 bf 11 40 00 30
 				  39 01 00 00 00 00 09 F8 00 08 10 08 2D
 					   00 00 2D
-				  15 01 00 00 00 00 02 55 0c
+				  15 01 00 00 00 00 02 55 00
 				  05 01 00 00 1e 00 02 11 00
 				  15 01 00 00 78 00 02 3d 01
 				  39 01 00 00 00 00 03 b0 a5 00
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi
index 6762017..e96c7ba 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi
@@ -79,7 +79,7 @@
 					39 01 00 00 00 00 03 b0 a5 00
 					39 01 00 00 00 00 09 F8 00 08 10 08 2D
 					   00 00 2D
-					15 01 00 00 00 00 02 55 0c
+					15 01 00 00 00 00 02 55 00
 					05 01 00 00 1e 00 02 11 00
 					39 01 00 00 00 00 03 b0 a5 00
 					15 01 00 00 00 00 02 e0 18
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi
index 7c72021..68822a9 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi
@@ -53,7 +53,7 @@
 			  39 01 00 00 00 00 06 b2 00 5d 04 80 49
 			  15 01 00 00 00 00 02 3d 10
 			  15 01 00 00 00 00 02 36 00
-			  15 01 00 00 00 00 02 55 0c
+			  15 01 00 00 00 00 02 55 00
 			  39 01 00 00 00 00 09 f8 00 08 10 08 2d
 			     00 00 2d
 			  39 01 00 00 3c 00 03 51 00 00
diff --git a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-ascent-3450mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-ascent-3450mah.dtsi
new file mode 100644
index 0000000..6a00deb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-ascent-3450mah.dtsi
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *  Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+qcom,ascent_wconn_3450mah_fresh_averaged_masterslave_feb28th2019 {
+	qcom,profile-revision = <24>;
+	/* #Ascent_wConn_3450mAh_Fresh_averaged_MasterSlave_Feb28th2019*/
+	qcom,max-voltage-uv = <4350000>;
+	qcom,fastchg-current-ma = <3450>;
+	qcom,jeita-fcc-ranges = <0   100  1725000
+				101  400  3450000
+				401  450  2760000>;
+	qcom,jeita-fv-ranges = <0   100  4250000
+				101 400  4350000
+				401 450  4250000>;
+	/* COLD = 0 DegC, HOT = 45 DegC */
+	qcom,jeita-hard-thresholds = <0x58cd 0x20b8>;
+	/* COOL = 10 DegC, WARM = 40 DegC */
+	qcom,jeita-soft-thresholds = <0x4ccc 0x25e3>;
+	/* COLD hys = 13 DegC, WARM hys = 37 DegC */
+	qcom,jeita-soft-hys-thresholds = <0x48d4 0x2943>;
+	qcom,jeita-soft-fcc-ua = <1725000 2760000>;
+	qcom,jeita-soft-fv-uv = <4250000 4250000>;
+	qcom,fg-cc-cv-threshold-mv = <4340>;
+	qcom,nom-batt-capacity-mah = <3450>;
+	qcom,batt-id-kohm = <60>;
+	qcom,battery-beta = <3435>;
+	qcom,therm-room-temp = <68000>;
+	qcom,battery-type = "ascent_3450mah_averaged_masterslave_feb28th2019";
+	qcom,therm-coefficients = <0x2313 0xc42 0xea62 0xcc3d 0x8313>;
+	qcom,therm-center-offset = <0x5b>;
+	qcom,therm-pull-up = <100>;
+	qcom,rslow-normal-coeffs = <0x43 0x0a 0x7b 0x1a>;
+	qcom,rslow-low-coeffs = <0xd0 0x13 0x18 0x22>;
+	qcom,checksum = <0xC0ED>;
+	qcom,gui-version = "PM855GUI - 1.0.0.13";
+	qcom,fg-profile-data = [
+		09 00 63 EA
+		65 DD F5 DB
+		02 D4 00 00
+		A5 BD 62 8A
+		FA 87 3A A4
+		16 9A D5 80
+		0E 00 43 0A
+		7B 1A 3B F4
+		4D F2 CE 07
+		32 00 1F F3
+		18 D4 81 DA
+		D4 02 0B E4
+		F3 C4 F6 1B
+		AB F3 AF C4
+		60 00 4A 00
+		42 00 43 00
+		42 00 3A 00
+		3C 00 49 00
+		3D 00 39 00
+		3A 00 60 00
+		26 00 24 00
+		33 00 3D 00
+		36 00 94 00
+		58 64 41 00
+		3A 00 35 08
+		60 F8 18 00
+		25 00 3B 08
+		3C 08 3D 00
+		83 20 4E 40
+		44 50 42 12
+		3E 00 D8 00
+		6D 20 B5 0C
+		E5 FA 2B 04
+		7C 1C F0 0A
+		55 0C A7 23
+		95 17 74 43
+		11 55 74 03
+		79 14 A1 1F
+		9B 05 5A 02
+		EF F4 AE 1C
+		34 02 90 05
+		8E 0A 1D 17
+		66 23 70 45
+		A8 52 7B 14
+		DE 1E 75 EE
+		7D D3 02 C4
+		AA 1C F8 C1
+		06 04 25 BA
+		33 18 BD 8A
+		F2 85 21 A2
+		78 98 09 80
+		3D FA AD 0D
+		2F 02 61 03
+		00 F8 DF D5
+		6D EA F9 0F
+		E8 F5 6A D5
+		0F 11 0C 18
+		03 F5 6A 03
+		B0 05 D8 01
+		CE 07 32 00
+		9F 03 19 04
+		0B 05 5D 02
+		79 03 E4 05
+		4A 03 FB 05
+		AB 02 55 00
+		3F 00 41 00
+		40 64 40 00
+		44 F8 37 00
+		3B F0 41 00
+		43 00 36 10
+		60 10 3E 00
+		4A 20 4E 40
+		52 58 5D 0F
+		45 00 46 00
+		4B 08 5E F8
+		43 00 5E 00
+		42 08 52 10
+		50 00 65 20
+		78 40 59 50
+		65 12 66 00
+		5E 00 47 08
+		D8 00 A8 1F
+		53 04 7D 0B
+		52 0C A9 1C
+		7D 23 B8 45
+		44 52 5E 18
+		A8 03 4D 04
+		9D 02 6C 13
+		3F 0A 85 1F
+		F5 05 11 02
+		6D 05 A7 1C
+		0E 03 06 04
+		11 02 47 18
+		1C 03 61 05
+		15 03 6C 00
+		6C 20 DD 04
+		E4 02 EF 05
+		C4 1C 1F 02
+		D9 05 31 02
+		7B 18 C5 02
+		D2 05 60 02
+		85 00 A4 01
+		C0 00 FA 00
+		A4 0D 00 00
+	];
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
index d9d4054..657fd87 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
@@ -4,12 +4,28 @@
  */
 
 #include <dt-bindings/clock/qcom,audio-ext-clk.h>
+#include <dt-bindings/sound/qcom,bolero-clk-rsc.h>
 #include <dt-bindings/sound/audio-codec-port-types.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "kona-va-bolero.dtsi"
 
 &bolero {
 	qcom,num-macros = <4>;
+	bolero-clk-rsc-mngr {
+		compatible = "qcom,bolero-clk-rsc-mngr";
+		qcom,fs-gen-sequence = <0x3000 0x1>,
+					<0x3004 0x1>, <0x3080 0x2>;
+	qcom,rx_mclk_mode_muxsel = <0x033240D8>;
+	qcom,wsa_mclk_mode_muxsel = <0x033220D8>;
+	qcom,va_mclk_mode_muxsel = <0x033A0000>;
+	clock-names = "tx_core_clk", "tx_npl_clk", "rx_core_clk", "rx_npl_clk",
+		 "wsa_core_clk", "wsa_npl_clk", "va_core_clk", "va_npl_clk";
+	clocks = <&clock_audio_tx_1 0>, <&clock_audio_tx_2 0>,
+		<&clock_audio_rx_1 0>, <&clock_audio_rx_2 0>,
+		<&clock_audio_wsa_1 0>, <&clock_audio_wsa_2 0>,
+		<&clock_audio_va_1 0>, <&clock_audio_va_2 0>;
+	};
+
 	tx_macro: tx-macro@3220000 {
 		compatible = "qcom,tx-macro";
 		reg = <0x3220000 0x0>;
@@ -55,6 +71,7 @@
 		qcom,rx-swr-gpios = <&rx_swr_gpios>;
 		qcom,rx_mclk_mode_muxsel = <0x033240D8>;
 		qcom,rx-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		swr1: rx_swr_master {
 			compatible = "qcom,swr-mstr";
 			#address-cells = <2>;
@@ -87,6 +104,7 @@
 			 <&clock_audio_wsa_2 0>;
 		qcom,wsa-swr-gpios = <&wsa_swr_gpios>;
 		qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		swr0: wsa_swr_master {
 			compatible = "qcom,swr-mstr";
 			#address-cells = <2>;
@@ -181,6 +199,8 @@
 &kona_snd {
 	qcom,model = "kona-mtp-snd-card";
 	qcom,msm-mi2s-master = <1>, <1>, <1>;
+	qcom,wcn-bt = <1>;
+	qcom,ext-disp-audio-rx = <1>;
 	qcom,audio-routing =
 		"AMIC1", "MIC BIAS1",
 		"MIC BIAS1", "Analog Mic1",
@@ -208,9 +228,9 @@
 		"IN2_HPHR", "HPHR_OUT",
 		"IN3_AUX", "AUX_OUT",
 		"TX SWR_ADC0", "ADC1_OUTPUT",
-		"TX SWR_ADC2", "ADC2_OUTPUT",
-		"TX SWR_ADC3", "ADC3_OUTPUT",
-		"TX SWR_ADC4", "ADC4_OUTPUT",
+		"TX SWR_ADC1", "ADC2_OUTPUT",
+		"TX SWR_ADC2", "ADC3_OUTPUT",
+		"TX SWR_ADC3", "ADC4_OUTPUT",
 		"TX SWR_DMIC0", "DMIC1_OUTPUT",
 		"TX SWR_DMIC1", "DMIC2_OUTPUT",
 		"TX SWR_DMIC2", "DMIC3_OUTPUT",
@@ -227,14 +247,36 @@
 		"RX_TX DEC2_INP", "TX DEC2 MUX",
 		"RX_TX DEC3_INP", "TX DEC3 MUX",
 		"SpkrLeft IN", "WSA_SPK1 OUT",
-		"SpkrRight IN", "WSA_SPK2 OUT";
+		"SpkrRight IN", "WSA_SPK2 OUT",
+		"VA_AIF1 CAP", "VA_SWR_CLK",
+		"VA_AIF2 CAP", "VA_SWR_CLK",
+		"VA_AIF3 CAP", "VA_SWR_CLK",
+		"VA DMIC0", "MIC BIAS3",
+		"VA DMIC1", "MIC BIAS3",
+		"VA DMIC2", "MIC BIAS1",
+		"VA DMIC3", "MIC BIAS1",
+		"VA DMIC4", "MIC BIAS4",
+		"VA DMIC5", "MIC BIAS4",
+		"VA SWR_ADC0", "ADC1_OUTPUT",
+		"VA SWR_ADC1", "ADC2_OUTPUT",
+		"VA SWR_ADC2", "ADC3_OUTPUT",
+		"VA SWR_ADC3", "ADC4_OUTPUT",
+		"VA SWR_MIC0", "DMIC1_OUTPUT",
+		"VA SWR_MIC1", "DMIC2_OUTPUT",
+		"VA SWR_MIC2", "DMIC3_OUTPUT",
+		"VA SWR_MIC3", "DMIC4_OUTPUT",
+		"VA SWR_MIC4", "DMIC5_OUTPUT",
+		"VA SWR_MIC5", "DMIC6_OUTPUT",
+		"VA SWR_MIC6", "DMIC7_OUTPUT",
+		"VA SWR_MIC7", "DMIC8_OUTPUT";
 	qcom,msm-mbhc-hphl-swh = <1>;
 	qcom,msm-mbhc-gnd-swh = <1>;
 	qcom,cdc-dmic01-gpios = <&cdc_dmic01_gpios>;
 	qcom,cdc-dmic23-gpios = <&cdc_dmic23_gpios>;
 	qcom,cdc-dmic45-gpios = <&cdc_dmic45_gpios>;
-	asoc-codec  = <&stub_codec>, <&bolero>;
-	asoc-codec-names = "msm-stub-codec.1", "bolero_codec";
+	asoc-codec  = <&stub_codec>, <&bolero>, <&ext_disp_audio_codec>;
+	asoc-codec-names = "msm-stub-codec.1", "bolero_codec",
+			   "msm-ext-disp-audio-codec-rx";
 	qcom,wsa-max-devs = <2>;
 	qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
 			<&wsa881x_0213>, <&wsa881x_0214>;
@@ -366,4 +408,20 @@
 		qcom,codec-lpass-clk-id = <0x30D>;
 		#clock-cells = <1>;
 	};
+
+	clock_audio_va_1: va_core_clk {
+		compatible = "qcom,audio-ref-clk";
+		qcom,codec-ext-clk-src = <AUDIO_LPASS_MCLK>;
+		qcom,codec-lpass-ext-clk-freq = <19200000>;
+		qcom,codec-lpass-clk-id = <0x30B>;
+		#clock-cells = <1>;
+	};
+
+	clock_audio_va_2: va_npl_clk {
+		compatible = "qcom,audio-ref-clk";
+		qcom,codec-ext-clk-src = <AUDIO_LPASS_MCLK_8>;
+		qcom,codec-lpass-ext-clk-freq = <19200000>;
+		qcom,codec-lpass-clk-id = <0x310>;
+		#clock-cells = <1>;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-audio.dtsi b/arch/arm64/boot/dts/qcom/kona-audio.dtsi
index 604d389..430def3 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio.dtsi
@@ -3,6 +3,8 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <dt-bindings/clock/qcom,audio-ext-clk.h>
+
 #include "kona-lpi.dtsi"
 #include "msm-audio-lpass.dtsi"
 
@@ -25,8 +27,21 @@
 &audio_apr {
 	q6core: qcom,q6core-audio {
 		compatible = "qcom,q6core-audio";
+
+		lpass_core_hw_vote: vote_lpass_core_hw {
+			compatible = "qcom,audio-ref-clk";
+			qcom,codec-ext-clk-src = <AUDIO_LPASS_CORE_HW_VOTE>;
+			#clock-cells = <1>;
+		};
+
 		bolero: bolero-cdc {
 			compatible = "qcom,bolero-codec";
+			clock-names = "lpass_core_hw_vote";
+			clocks = <&lpass_core_hw_vote 0>;
+			bolero-clk-rsc-mngr {
+				compatible = "qcom,bolero-clk-rsc-mngr";
+			};
+
 			tx_macro: tx-macro@3220000 {
 				swr2: tx_swr_master {
 				};
diff --git a/arch/arm64/boot/dts/qcom/kona-bus.dtsi b/arch/arm64/boot/dts/qcom/kona-bus.dtsi
index 19edc89..717c829 100644
--- a/arch/arm64/boot/dts/qcom/kona-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-bus.dtsi
@@ -1309,6 +1309,15 @@
 			qcom,forwarding;
 		};
 
+		mas_alc: mas-alc {
+			cell-id = <MSM_BUS_MASTER_ALC>;
+			label = "mas-alc";
+			qcom,buswidth = <1>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mc_virt>;
+			qcom,bcms = <&bcm_alc>;
+		};
+
 		mas_qnm_mnoc_hf_display: mas-qnm-mnoc-hf_display {
 			cell-id = <MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY>;
 			label = "mas-qnm-mnoc-hf_display";
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
index 4e6df1f..434c601 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
@@ -38,7 +38,55 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <2856000>;
 		rgltr-max-voltage = <3104000>;
-		rgltr-load-current = <0>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_rear_aux: qcom,actuator1 {
+		cell-index = <1>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_wide: qcom,actuator4 {
+		cell-index = <4>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_tele: qcom,actuator5 {
+		cell-index = <5>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_uw: qcom,actuator6 {
+		cell-index = <6>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
 	};
 
 	eeprom_rear: qcom,eeprom0 {
@@ -54,7 +102,7 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -91,7 +139,7 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
@@ -115,6 +163,117 @@
 		clock-rates = <24000000>;
 	};
 
+	eeprom_triple_wide: qcom,eeprom4 {
+		cell-index = <4>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_triple_tele: qcom,eeprom5 {
+		cell-index = <1>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_triple_uw: qcom,eeprom6 {
+		cell-index = <6>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
 	qcom,cam-sensor0 {
 		cell-index = <0>;
 		compatible = "qcom,cam-sensor";
@@ -123,6 +282,7 @@
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
 		actuator-src = <&actuator_rear>;
+		led-flash-src = <&led_flash_rear>;
 		eeprom-src = <&eeprom_rear>;
 		cam_vio-supply = <&pm8009_l7>;
 		cam_bob-supply = <&pm8150a_bob>;
@@ -166,6 +326,7 @@
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
 		eeprom-src = <&eeprom_rear_aux>;
+		led-flash-src = <&led_flash_rear_aux>;
 		cam_bob-supply = <&pm8150a_bob>;
 		cam_vdig-supply = <&pm8009_l2>;
 		cam_vio-supply = <&pm8009_l7>;
@@ -199,6 +360,137 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
+
+	qcom,cam-sensor4 {
+		cell-index = <4>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		actuator-src = <&actuator_triple_wide>;
+		led-flash-src = <&led_flash_rear>;
+		eeprom-src = <&eeprom_triple_wide>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1104000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor5 {
+		cell-index = <5>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_triple_tele>;
+		actuator-src = <&actuator_triple_tele>;
+		led-flash-src = <&led_flash_rear_aux>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1200000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor6 {
+		cell-index = <6>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_triple_uw>;
+		actuator-src = <&actuator_triple_uw>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1056000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
 };
 
 &cam_cci1 {
@@ -215,13 +507,13 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
@@ -243,7 +535,7 @@
 		cell-index = <2>;
 		compatible = "qcom,cam-sensor";
 		csiphy-sd-index = <2>;
-		sensor-position-roll = <90>;
+		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <0>;
 		eeprom-src = <&eeprom_front>;
@@ -262,9 +554,9 @@
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
@@ -304,14 +596,12 @@
 		pinctrl-1 = <&cam_sensor_mclk3_suspend
 				 &cam_sensor_suspend_3>;
 		gpios = <&tlmm 97 0>,
-			<&tlmm 109 0>,
-			<&pm8009_gpios 1 0>;
+			<&tlmm 109 0>;
 		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1 2>;
-		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
 		gpio-req-tbl-label = "CAMIF_MCLK3",
-					"CAM_RESET3",
-					"TOF_VDD_EN";
+					"CAM_RESET3";
 		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
@@ -321,4 +611,3 @@
 		clock-rates = <24000000>;
 	};
 };
-
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
index 4e6df1f..120fcc1 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
@@ -38,7 +38,55 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <2856000>;
 		rgltr-max-voltage = <3104000>;
-		rgltr-load-current = <0>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_rear_aux: qcom,actuator1 {
+		cell-index = <1>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_wide: qcom,actuator4 {
+		cell-index = <4>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_tele: qcom,actuator5 {
+		cell-index = <5>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_uw: qcom,actuator6 {
+		cell-index = <6>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
 	};
 
 	eeprom_rear: qcom,eeprom0 {
@@ -54,7 +102,7 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -91,7 +139,7 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
@@ -115,6 +163,117 @@
 		clock-rates = <24000000>;
 	};
 
+	eeprom_triple_wide: qcom,eeprom4 {
+		cell-index = <4>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_triple_tele: qcom,eeprom5 {
+		cell-index = <1>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_triple_uw: qcom,eeprom6 {
+		cell-index = <6>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
 	qcom,cam-sensor0 {
 		cell-index = <0>;
 		compatible = "qcom,cam-sensor";
@@ -123,6 +282,7 @@
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
 		actuator-src = <&actuator_rear>;
+		led-flash-src = <&led_flash_rear>;
 		eeprom-src = <&eeprom_rear>;
 		cam_vio-supply = <&pm8009_l7>;
 		cam_bob-supply = <&pm8150a_bob>;
@@ -166,6 +326,7 @@
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
 		eeprom-src = <&eeprom_rear_aux>;
+		led-flash-src = <&led_flash_rear_aux>;
 		cam_bob-supply = <&pm8150a_bob>;
 		cam_vdig-supply = <&pm8009_l2>;
 		cam_vio-supply = <&pm8009_l7>;
@@ -199,6 +360,137 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
+
+	qcom,cam-sensor4 {
+		cell-index = <4>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		actuator-src = <&actuator_triple_wide>;
+		led-flash-src = <&led_flash_rear>;
+		eeprom-src = <&eeprom_triple_wide>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1104000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor5 {
+		cell-index = <5>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_triple_tele>;
+		actuator-src = <&actuator_triple_tele>;
+		led-flash-src = <&led_flash_rear_aux>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1200000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor6 {
+		cell-index = <6>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_triple_uw>;
+		actuator-src = <&actuator_triple_uw>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1056000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
 };
 
 &cam_cci1 {
@@ -215,13 +507,13 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
@@ -243,7 +535,7 @@
 		cell-index = <2>;
 		compatible = "qcom,cam-sensor";
 		csiphy-sd-index = <2>;
-		sensor-position-roll = <90>;
+		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <0>;
 		eeprom-src = <&eeprom_front>;
@@ -262,9 +554,9 @@
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
@@ -304,14 +596,12 @@
 		pinctrl-1 = <&cam_sensor_mclk3_suspend
 				 &cam_sensor_suspend_3>;
 		gpios = <&tlmm 97 0>,
-			<&tlmm 109 0>,
-			<&pm8009_gpios 1 0>;
+			<&tlmm 109 0>;
 		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1 2>;
-		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
 		gpio-req-tbl-label = "CAMIF_MCLK3",
-					"CAM_RESET3",
-					"TOF_VDD_EN";
+					"CAM_RESET3";
 		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
new file mode 100644
index 0000000..01f5771
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+&soc {
+	led_flash_rear: qcom,camera-flash0 {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm8150l_flash0 &pm8150l_flash1>;
+		torch-source = <&pm8150l_torch0 &pm8150l_torch1>;
+		switch-source = <&pm8150l_switch2>;
+		status = "ok";
+	};
+
+	led_flash_rear_aux: qcom,camera-flash1 {
+		cell-index = <1>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm8150l_flash0 &pm8150l_flash1>;
+		torch-source = <&pm8150l_torch0 &pm8150l_torch1>;
+		switch-source = <&pm8150l_switch2>;
+		status = "ok";
+	};
+
+	qcom,cam-res-mgr {
+		compatible = "qcom,cam-res-mgr";
+		status = "ok";
+	};
+};
+
+&cam_cci0 {
+	actuator_rear: qcom,actuator0 {
+		cell-index = <0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	eeprom_rear: qcom,eeprom0 {
+		cell-index = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_rear_aux: qcom,eeprom1 {
+		cell-index = <1>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		actuator-src = <&actuator_rear>;
+		led-flash-src = <&led_flash_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1104000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
+		led-flash-src = <&led_flash_rear_aux>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1200000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
+
+&cam_cci1 {
+	eeprom_front: qcom,eeprom2 {
+		cell-index = <2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1056000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor3 {
+		cell-index = <3>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <3>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vdig-supply = <&vreg_tof>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3600000 0>;
+		rgltr-max-voltage = <0 3600000 0>;
+		rgltr-load-current = <0 1200000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk3_active
+				 &cam_sensor_active_3>;
+		pinctrl-1 = <&cam_sensor_mclk3_suspend
+				 &cam_sensor_suspend_3>;
+		gpios = <&tlmm 97 0>,
+			<&tlmm 109 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK3",
+					"CAM_RESET3";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
+
diff --git a/arch/arm64/boot/dts/qcom/kona-camera.dtsi b/arch/arm64/boot/dts/qcom/kona-camera.dtsi
index d4abf24..d93c443 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera.dtsi
@@ -448,10 +448,10 @@
 				};
 
 				iova-mem-region-shared {
-					/* Shared region is 100MB long */
+					/* Shared region is 150MB long */
 					iova-region-name = "shared";
 					iova-region-start = <0x7400000>;
-					iova-region-len = <0x6400000>;
+					iova-region-len = <0x9600000>;
 					iova-region-id = <0x1>;
 					status = "ok";
 				};
@@ -459,17 +459,17 @@
 				iova-mem-region-secondary-heap {
 					/* Secondary heap region is 1MB long */
 					iova-region-name = "secheap";
-					iova-region-start = <0xd800000>;
+					iova-region-start = <0x10a00000>;
 					iova-region-len = <0x100000>;
 					iova-region-id = <0x4>;
 					status = "ok";
 				};
 
 				iova-mem-region-io {
-					/* IO region is approximately 3 GB */
+					/* IO region is approximately 3.3 GB */
 					iova-region-name = "io";
-					iova-region-start = <0xda00000>;
-					iova-region-len = <0xace00000>;
+					iova-region-start = <0x10c00000>;
+					iova-region-len = <0xcf300000>;
 					iova-region-id = <0x3>;
 					status = "ok";
 				};
@@ -477,7 +477,7 @@
 				iova-mem-qdss-region {
 					/* QDSS region is appropriate 1MB */
 					iova-region-name = "qdss";
-					iova-region-start = <0xd900000>;
+					iova-region-start = <0x10b00000>;
 					iova-region-len = <0x100000>;
 					iova-region-id = <0x5>;
 					qdss-phy-addr = <0x16790000>;
diff --git a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
index c8f902b..391ed85 100644
--- a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
@@ -3,11 +3,14 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
 
 #include "kona-pmic-overlay.dtsi"
 #include "kona-sde-display.dtsi"
 #include "kona-camera-sensor-cdp.dtsi"
 #include "kona-audio-overlay.dtsi"
+#include "kona-thermal-overlay.dtsi"
 
 &qupv3_se12_2uart {
 	status = "ok";
@@ -111,13 +114,42 @@
 			linux,can-disable;
 		};
 	};
+
+	qcom,qbt_handler {
+		compatible = "qcom,qbt-handler";
+		qcom,ipc-gpio = <&tlmm 23 0>;
+		qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+		status = "disabled";
+	};
+};
+
+&qupv3_se13_i2c {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	status = "ok";
+
+	st_fts@49 {
+		compatible = "st,fts";
+		reg = <0x49>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <39 0x2008>;
+		vdd-supply = <&pm8150a_l1>;
+		avdd-supply = <&pm8150_l13>;
+		pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+		pinctrl-0 = <&ts_active>;
+		pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+		st,irq-gpio = <&tlmm 39 0x2008>;
+		st,reset-gpio = <&tlmm 38 0x00>;
+		st,regulator_dvdd = "vdd";
+		st,regulator_avdd = "avdd";
+	};
 };
 
 &vendor {
 	bluetooth: bt_qca6390 {
 		compatible = "qca,qca6390";
 		pinctrl-names = "default";
-		pinctrl-0 = <&bt_en_active>;
+		pinctrl-0 = <&bt_en_sleep>;
 		qca,bt-reset-gpio = <&tlmm 21 0>; /* BT_EN */
 		qca,bt-vdd-aon-supply = <&pm8150_s6>;
 		qca,bt-vdd-dig-supply = <&pm8009_s2>;
@@ -129,6 +161,18 @@
 		qca,bt-vdd-rfa1-voltage-level = <1900000 1900000>;
 		qca,bt-vdd-rfa2-voltage-level = <1350000 1350000>;
 	};
+
+	extcon_usb1: extcon_usb1 {
+		compatible = "linux,extcon-usb-gpio";
+		vbus-gpio = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
+		id-gpio = <&tlmm 91 GPIO_ACTIVE_HIGH>;
+		vbus-out-gpio = <&pm8150_gpios 9 GPIO_ACTIVE_HIGH>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb2_vbus_det_default
+			     &usb2_id_det_default
+			     &usb2_vbus_boost_default>;
+	};
 };
 
 &dsi_sw43404_amoled_cmd {
@@ -150,6 +194,114 @@
 	qcom,platform-reset-gpio = <&tlmm 75 0>;
 };
 
+&dsi_sw43404_amoled_fhd_plus_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-en-gpio = <&tlmm 60 0>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-en-gpio = <&tlmm 60 0>;
+};
+
+&dsi_sharp_1080_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_nt35597_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_nt35597_truly_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_nt35695b_truly_fhd_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_nt35695b_truly_fhd_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_dsc_375_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_dsc_375_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
 &sde_dsi {
 	qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
 };
@@ -368,9 +520,9 @@
 		"IN2_HPHR", "HPHR_OUT",
 		"IN3_AUX", "AUX_OUT",
 		"TX SWR_ADC0", "ADC1_OUTPUT",
-		"TX SWR_ADC2", "ADC2_OUTPUT",
-		"TX SWR_ADC3", "ADC3_OUTPUT",
-		"TX SWR_ADC4", "ADC4_OUTPUT",
+		"TX SWR_ADC1", "ADC2_OUTPUT",
+		"TX SWR_ADC2", "ADC3_OUTPUT",
+		"TX SWR_ADC3", "ADC4_OUTPUT",
 		"TX SWR_DMIC0", "DMIC1_OUTPUT",
 		"TX SWR_DMIC1", "DMIC2_OUTPUT",
 		"TX SWR_DMIC2", "DMIC3_OUTPUT",
@@ -387,7 +539,28 @@
 		"RX_TX DEC2_INP", "TX DEC2 MUX",
 		"RX_TX DEC3_INP", "TX DEC3 MUX",
 		"SpkrLeft IN", "WSA_SPK1 OUT",
-		"SpkrRight IN", "WSA_SPK2 OUT";
+		"SpkrRight IN", "WSA_SPK2 OUT",
+		"VA_AIF1 CAP", "VA_SWR_CLK",
+		"VA_AIF2 CAP", "VA_SWR_CLK",
+		"VA_AIF3 CAP", "VA_SWR_CLK",
+		"VA DMIC0", "MIC BIAS3",
+		"VA DMIC1", "MIC BIAS3",
+		"VA DMIC2", "MIC BIAS1",
+		"VA DMIC3", "MIC BIAS1",
+		"VA DMIC4", "MIC BIAS4",
+		"VA DMIC5", "MIC BIAS4",
+		"VA SWR_ADC0", "ADC1_OUTPUT",
+		"VA SWR_ADC1", "ADC2_OUTPUT",
+		"VA SWR_ADC2", "ADC3_OUTPUT",
+		"VA SWR_ADC3", "ADC4_OUTPUT",
+		"VA SWR_MIC0", "DMIC1_OUTPUT",
+		"VA SWR_MIC1", "DMIC2_OUTPUT",
+		"VA SWR_MIC2", "DMIC3_OUTPUT",
+		"VA SWR_MIC3", "DMIC4_OUTPUT",
+		"VA SWR_MIC4", "DMIC5_OUTPUT",
+		"VA SWR_MIC5", "DMIC6_OUTPUT",
+		"VA SWR_MIC6", "DMIC7_OUTPUT",
+		"VA SWR_MIC7", "DMIC8_OUTPUT";
 };
 
 &thermal_zones {
@@ -475,3 +648,25 @@
 		};
 	};
 };
+
+&sdhc_2 {
+	vdd-supply = <&pm8150a_l9>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8150a_l6>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
+
+	cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+
+	status = "ok";
+};
+
+&usb1 {
+	extcon = <&extcon_usb1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
index 845a05e..6b80be3 100644
--- a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
@@ -19,7 +19,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				replicator0_out_tmc_etr: endpoint {
@@ -116,7 +115,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				tmc_etf_swao_out_replicator_swao: endpoint {
@@ -151,7 +149,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_swao_out_tmc_etf_swao: endpoint {
@@ -216,7 +213,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				tpda_swao_out_funnel_swao: endpoint {
@@ -335,7 +331,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_merg_out_funnel_swao: endpoint {
@@ -427,7 +422,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_in0_out_funnel_merg: endpoint {
@@ -470,7 +464,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_in1_out_funnel_merg: endpoint {
@@ -499,6 +492,107 @@
 		};
 	};
 
+	funnel_gpu: funnel@6902000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb908>;
+
+		reg = <0x6902000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-gpu";
+
+		clocks =  <&clock_aop QDSS_CLK>,
+			<&clock_gpucc GPU_CC_CXO_CLK>,
+			<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+			<&clock_gpucc GPU_CC_CX_GMU_CLK>,
+			<&clock_gpucc GPU_CC_AHB_CLK>,
+			<&clock_cpucc L3_GPU_VOTE_CLK>;
+
+		clock-names = "apb_pclk",
+			"rbbmtimer_clk",
+			"mem_clk",
+			"mem_iface_clk",
+			"gmu_clk",
+			"gpu_cc_ahb",
+			"l3_vote";
+
+		qcom,proxy-clks = "rbbmtimer_clk",
+			"mem_clk",
+			"mem_iface_clk",
+			"gmu_clk",
+			"gpu_cc_ahb",
+			"l3_vote";
+
+		vddcx-supply = <&gpu_cx_gdsc>;
+		vdd-supply = <&gpu_gx_gdsc>;
+		regulator-names = "vddcx", "vdd";
+		qcom,proxy-regs  = "vddcx", "vdd";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				funnel_gpu_out_tpda: endpoint {
+					remote-endpoint =
+					  <&tpda_in_funnel_gpu>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_gpu_in_tpdm_gpu: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&tpdm_gpu_out_funnel_gpu>;
+				};
+			};
+		};
+	};
+
+	tpdm_gpu: tpdm@6900000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x6900000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-gpu";
+
+		clocks =  <&clock_aop QDSS_CLK>,
+			<&clock_gpucc GPU_CC_CXO_CLK>,
+			<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+			<&clock_gpucc GPU_CC_CX_GMU_CLK>,
+			<&clock_gpucc GPU_CC_AHB_CLK>,
+			<&clock_cpucc L3_GPU_VOTE_CLK>;
+		clock-names = "apb_pclk",
+			"rbbmtimer_clk",
+			"mem_clk",
+			"mem_iface_clk",
+			"gmu_clk",
+			"gpu_cc_ahb",
+			"l3_vote";
+
+		qcom,proxy-clks = "rbbmtimer_clk",
+			"mem_clk",
+			"mem_iface_clk",
+			"gmu_clk",
+			"gpu_cc_ahb",
+			"l3_vote";
+
+		vddcx-supply = <&gpu_cx_gdsc>;
+		vdd-supply = <&gpu_gx_gdsc>;
+		regulator-names = "vddcx", "vdd";
+		qcom,proxy-regs  = "vddcx", "vdd";
+
+		port {
+			tpdm_gpu_out_funnel_gpu: endpoint {
+				remote-endpoint = <&funnel_gpu_in_tpdm_gpu>;
+			};
+		};
+	};
+
 	tpda: tpda@6004000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x000bb969>;
@@ -527,13 +621,12 @@
 				     <25 32>;
 		qcom,cmb-elem-size = <7 64>,
 				     <13 64>,
-				     <15 64>,
+				     <15 32>,
 				     <16 64>,
-				     <17 64>,
+				     <17 32>,
 				     <18 64>,
 				     <20 64>,
 				     <21 64>,
-				     <22 64>,
 				     <23 64>,
 				     <25 64>;
 
@@ -552,6 +645,15 @@
 			};
 
 			port@1 {
+				reg = <1>;
+				tpda_in_funnel_gpu: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_gpu_out_tpda>;
+				};
+			};
+
+			port@2 {
 				reg = <6>;
 				tpda_6_in_tpdm_venus: endpoint {
 					slave-mode;
@@ -560,7 +662,7 @@
 				};
 			};
 
-			port@2 {
+			port@3 {
 				reg = <7>;
 				tpda_7_in_tpdm_mdss: endpoint {
 					slave-mode;
@@ -569,7 +671,7 @@
 				};
 			};
 
-			port@3 {
+			port@4 {
 				reg = <9>;
 				tpda_9_in_tpdm_mm: endpoint {
 					slave-mode;
@@ -633,6 +735,33 @@
 			};
 
 			port@11 {
+				reg = <16>;
+				tpda_16_in_tpdm_npu: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_npu_out_tpda16>;
+				};
+			};
+
+			port@12 {
+				reg = <17>;
+				tpda_17_in_tpdm_npu_llm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_npu_llm_out_tpda17>;
+				};
+			};
+
+			port@13 {
+				reg = <18>;
+				tpda_18_in_tpdm_npu_dpm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_npu_dpm_out_tpda18>;
+				};
+			};
+
+			port@14 {
 				reg = <19>;
 				tpda_19_in_tpdm_dlct: endpoint {
 					slave-mode;
@@ -641,7 +770,7 @@
 				};
 			};
 
-			port@12 {
+			port@15 {
 				reg = <20>;
 				tpda_20_in_tpdm_ipcc: endpoint {
 					slave-mode;
@@ -650,7 +779,7 @@
 				};
 			};
 
-			port@13 {
+			port@16 {
 				reg = <21>;
 				tpda_in_tpdm_vsense: endpoint {
 					slave-mode;
@@ -659,7 +788,16 @@
 				};
 			};
 
-			port@14 {
+			port@17 {
+				reg = <22>;
+				tpda_in_tpdm_dcc: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_dcc_out_tpda>;
+				};
+			};
+
+			port@18 {
 				reg = <23>;
 				tpda_in_tpdm_prng: endpoint {
 					slave-mode;
@@ -668,7 +806,7 @@
 				};
 			};
 
-			port@15 {
+			port@19 {
 				reg = <24>;
 				tpda_in_tpdm_qm: endpoint {
 					slave-mode;
@@ -677,7 +815,7 @@
 				};
 			};
 
-			port@16 {
+			port@20 {
 				reg = <25>;
 				tpda_in_tpdm_pimem: endpoint {
 					slave-mode;
@@ -688,6 +826,26 @@
 		};
 	};
 
+	tpdm_dcc: tpdm@6870000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x6870000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-dcc";
+
+		qcom,hw-enable-check;
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			tpdm_dcc_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_dcc>;
+			};
+		};
+	};
+
 	tpdm_vsense: tpdm@6840000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x000bb968>;
@@ -757,7 +915,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_lpass_out_funnel_dl_center: endpoint {
@@ -878,10 +1035,47 @@
 
 			port@1 {
 				reg = <0>;
-				funnel_dl_south_in_tpdm_dl_south: endpoint {
+				funnel_dl_south_in_tpda_dl_south: endpoint {
 					slave-mode;
 					remote-endpoint =
-					<&tpdm_dl_south_out_funnel_dl_south>;
+					<&tpda_dl_south_out_funnel_dl_south>;
+				};
+			};
+		};
+	};
+
+	tpda_dl_south: tpda@69c1000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb969>;
+		reg = <0x69c1000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-dl-south";
+
+		qcom,tpda-atid = <75>;
+		qcom,dsb-elem-size = <0 64>;
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_dl_south_out_funnel_dl_south: endpoint {
+					remote-endpoint =
+					<&funnel_dl_south_in_tpda_dl_south>;
+				};
+
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_dl_south_in_tpdm_dl_south: endpoint {
+					slave-mode;
+					remote-endpoint =
+					<&tpdm_dl_south_out_tpda_dl_south>;
 				};
 			};
 		};
@@ -899,9 +1093,9 @@
 		clock-names = "apb_pclk";
 
 		port {
-			tpdm_dl_south_out_funnel_dl_south: endpoint {
+			tpdm_dl_south_out_tpda_dl_south: endpoint {
 				remote-endpoint =
-					<&funnel_dl_south_in_tpdm_dl_south>;
+					<&tpda_dl_south_in_tpdm_dl_south>;
 			};
 		};
 	};
@@ -921,7 +1115,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_dl_north_out_funnel_in1: endpoint {
@@ -956,7 +1149,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_compute_out_funnel_dl_center: endpoint {
@@ -994,6 +1186,155 @@
 		};
 	};
 
+	tpdm_npu: tpdm@6c47000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb968>;
+		reg = <0x6c47000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-npu";
+
+		clocks = <&clock_aop QDSS_CLK>,
+			<&clock_gcc GCC_NPU_AXI_CLK>,
+			<&clock_gcc GCC_NPU_CFG_AHB_CLK>,
+			<&clock_npucc NPU_CC_XO_CLK>,
+			<&clock_npucc NPU_CC_CORE_CLK>,
+			<&clock_npucc NPU_CC_CORE_CLK_SRC>,
+			<&clock_npucc NPU_CC_ATB_CLK>;
+
+		clock-names = "apb_pclk",
+			"gcc_npu_axi_clk",
+			"gcc_npu_cfg_ahb_clk",
+			"npu_cc_xo_clk",
+			"npu_core_clk",
+			"npu_core_clk_src",
+			"npu_cc_atb_clk";
+
+		qcom,proxy-clks = "gcc_npu_axi_clk",
+			"gcc_npu_cfg_ahb_clk",
+			"npu_cc_xo_clk",
+			"npu_core_clk",
+			"npu_core_clk_src",
+			"npu_cc_atb_clk";
+
+		vdd-supply = <&npu_core_gdsc>;
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		qcom,proxy-regs ="vdd", "vdd_cx";
+
+		port {
+			tpdm_npu_out_funnel_npu: endpoint {
+				remote-endpoint = <&funnel_npu_in_tpdm_npu>;
+			};
+		};
+	};
+
+	tpdm_npu_llm: tpdm@6c40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb968>;
+		reg = <0x6c40000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-npu-llm";
+		clocks = <&clock_aop QDSS_CLK>,
+			<&clock_gcc GCC_NPU_AXI_CLK>,
+			<&clock_gcc GCC_NPU_CFG_AHB_CLK>,
+			<&clock_npucc NPU_CC_XO_CLK>,
+			<&clock_npucc NPU_CC_CORE_CLK>,
+			<&clock_npucc NPU_CC_CORE_CLK_SRC>,
+			<&clock_npucc NPU_CC_ATB_CLK>,
+			<&clock_npucc NPU_CC_LLM_CLK>,
+			<&clock_npucc NPU_CC_LLM_XO_CLK>,
+			<&clock_npucc NPU_CC_LLM_TEMP_CLK>,
+			<&clock_npucc NPU_CC_LLM_CURR_CLK>,
+			<&clock_npucc NPU_CC_DL_LLM_CLK>;
+
+		clock-names = "apb_pclk",
+			"gcc_npu_axi_clk",
+			"gcc_npu_cfg_ahb_clk",
+			"npu_cc_xo_clk",
+			"npu_core_clk",
+			"npu_core_clk_src",
+			"npu_cc_atb_clk",
+			"npu_cc_llm_clk",
+			"npu_cc_llm_xo_clk",
+			"npu_cc_llm_temp_clk",
+			"npu_cc_llm_curr_clk",
+			"npu_cc_dl_llm_clk";
+
+		qcom,proxy-clks = "gcc_npu_axi_clk",
+			"gcc_npu_cfg_ahb_clk",
+			"npu_cc_xo_clk",
+			"npu_core_clk",
+			"npu_core_clk_src",
+			"npu_cc_atb_clk",
+			"npu_cc_llm_clk",
+			"npu_cc_llm_xo_clk",
+			"npu_cc_llm_temp_clk",
+			"npu_cc_llm_curr_clk",
+			"npu_cc_dl_llm_clk";
+
+		vdd-supply = <&npu_core_gdsc>;
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		qcom,proxy-regs ="vdd", "vdd_cx";
+
+		port {
+			tpdm_npu_llm_out_funnel_npu: endpoint {
+				remote-endpoint = <&funnel_npu_in_tpdm_npu_llm>;
+			};
+		};
+	};
+
+	tpdm_npu_dpm: tpdm@6c41000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb968>;
+		reg = <0x6c41000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-npu-dpm";
+
+		clocks = <&clock_aop QDSS_CLK>,
+			<&clock_gcc GCC_NPU_AXI_CLK>,
+			<&clock_gcc GCC_NPU_CFG_AHB_CLK>,
+			<&clock_npucc NPU_CC_XO_CLK>,
+			<&clock_npucc NPU_CC_CORE_CLK>,
+			<&clock_npucc NPU_CC_CORE_CLK_SRC>,
+			<&clock_npucc NPU_CC_ATB_CLK>,
+			<&clock_npucc NPU_CC_DPM_CLK>,
+			<&clock_npucc NPU_CC_DPM_XO_CLK>,
+			<&clock_npucc NPU_CC_DL_DPM_CLK>;
+
+		clock-names = "apb_pclk",
+			"gcc_npu_axi_clk",
+			"gcc_npu_cfg_ahb_clk",
+			"npu_cc_xo_clk",
+			"npu_core_clk",
+			"npu_core_clk_src",
+			"npu_cc_atb_clk",
+			"npu_cc_dpm_clk",
+			"npu_cc_dpm_xo_clk",
+			"npu_cc_dl_dpm_clk";
+
+		qcom,proxy-clks = "gcc_npu_axi_clk",
+			"gcc_npu_cfg_ahb_clk",
+			"npu_cc_xo_clk",
+			"npu_core_clk",
+			"npu_core_clk_src",
+			"npu_cc_atb_clk",
+			"npu_cc_dpm_clk",
+			"npu_cc_dpm_xo_clk",
+			"npu_cc_dl_dpm_clk";
+
+		vdd-supply = <&npu_core_gdsc>;
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		qcom,proxy-regs ="vdd", "vdd_cx";
+
+		port {
+			tpdm_npu_dpm_out_funnel_npu: endpoint {
+				remote-endpoint = <&funnel_npu_in_tpdm_npu_dpm>;
+			};
+		};
+	};
+
 	funnel_dl_center: funnel@6c2d000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x000bb908>;
@@ -1009,18 +1350,8 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
-				tpda_dl_south_out_qatb_3: endpoint {
-					remote-endpoint =
-					    <&qatb_3_in_tpda_dl_south>;
-					source = <&tpdm_dl_south>;
-				};
-			};
-
-			port@1 {
-				reg = <0>;
 				tpdm_venus_out_tpda6: endpoint {
 					remote-endpoint =
 					    <&tpda_6_in_tpdm_venus>;
@@ -1028,7 +1359,7 @@
 				};
 			};
 
-			port@2 {
+			port@1 {
 				reg = <0>;
 				tpdm_mdss_out_tpda7: endpoint {
 					remote-endpoint =
@@ -1037,7 +1368,7 @@
 				};
 			};
 
-			port@3 {
+			port@2 {
 				reg = <0>;
 				tpdm_mm_out_tpda9: endpoint {
 					remote-endpoint =
@@ -1046,7 +1377,7 @@
 				};
 			};
 
-			port@4 {
+			port@3 {
 				reg = <0>;
 				funnel_dl_center_out_tpda_10: endpoint {
 					remote-endpoint =
@@ -1055,7 +1386,7 @@
 				};
 			};
 
-			port@5 {
+			port@4 {
 				reg = <0>;
 				tpdm_ddr_ch02_out_tpda11: endpoint {
 					remote-endpoint =
@@ -1064,7 +1395,7 @@
 				};
 			};
 
-			port@6 {
+			port@5 {
 				reg = <0>;
 				tpdm_ddr_ch13_out_tpda12: endpoint {
 					remote-endpoint =
@@ -1073,7 +1404,7 @@
 				};
 			};
 
-			port@7 {
+			port@6 {
 				reg = <0>;
 				tpdm_ddr_out_tpda13: endpoint {
 					remote-endpoint =
@@ -1082,7 +1413,7 @@
 				};
 			};
 
-			port@8 {
+			port@7 {
 				reg = <0>;
 				tpdm_turing_out_tpda14: endpoint {
 					remote-endpoint =
@@ -1091,7 +1422,7 @@
 				};
 			};
 
-			port@9 {
+			port@8 {
 				reg = <0>;
 				tpdm_llm_turing_out_tpda15: endpoint {
 					remote-endpoint =
@@ -1100,8 +1431,35 @@
 				};
 			};
 
+			port@9 {
+				reg = <0>;
+				tpdm_npu_out_tpda16: endpoint {
+				remote-endpoint =
+					<&tpda_16_in_tpdm_npu>;
+					source = <&tpdm_npu>;
+				};
+			};
+
 			port@10 {
 				reg = <0>;
+				tpdm_npu_llm_out_tpda17: endpoint {
+				remote-endpoint =
+					<&tpda_17_in_tpdm_npu_llm>;
+					source = <&tpdm_npu_llm>;
+				};
+			};
+
+			port@11 {
+				reg = <0>;
+				tpdm_npu_dpm_out_tpda18: endpoint {
+					remote-endpoint =
+					    <&tpda_18_in_tpdm_npu_dpm>;
+					source = <&tpdm_npu_dpm>;
+				};
+			};
+
+			port@12 {
+				reg = <0>;
 				tpdm_dlct_out_tpda19: endpoint {
 					remote-endpoint =
 					    <&tpda_19_in_tpdm_dlct>;
@@ -1109,7 +1467,7 @@
 				};
 			};
 
-			port@11 {
+			port@13 {
 				reg = <0>;
 				tpdm_ipcc_out_tpda20: endpoint {
 					remote-endpoint =
@@ -1118,7 +1476,15 @@
 				};
 			};
 
-			port@12 {
+			port@14 {
+				reg = <0>;
+				funnel_dl_center_out_qatb3: endpoint {
+					remote-endpoint =
+					<&qatb3_in_funnel_dl_center>;
+				};
+			};
+
+			port@15 {
 				reg = <2>;
 				funnel_dl_center_in_funnel_dl_mm: endpoint {
 					slave-mode;
@@ -1127,7 +1493,7 @@
 				};
 			};
 
-			port@13 {
+			port@16 {
 				reg = <3>;
 				funnel_dl_center_in_funnel_lpass: endpoint {
 					slave-mode;
@@ -1136,7 +1502,7 @@
 				};
 			};
 
-			port@14 {
+			port@17 {
 				reg = <4>;
 				funnel_dl_center_in_funnel_ddr_0: endpoint {
 					slave-mode;
@@ -1145,7 +1511,7 @@
 				};
 			};
 
-			port@15 {
+			port@18 {
 				reg = <5>;
 				funnel_dl_center_in_funnel_compute: endpoint {
 					slave-mode;
@@ -1154,7 +1520,7 @@
 				};
 			};
 
-			port@16 {
+			port@19 {
 				reg = <6>;
 				funnel_center_in_tpdm_dlct: endpoint {
 					slave-mode;
@@ -1163,7 +1529,7 @@
 				};
 			};
 
-			port@17 {
+			port@20 {
 				reg = <7>;
 				funnel_center_in_tpdm_ipcc: endpoint {
 					slave-mode;
@@ -1385,7 +1751,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_dl_mm_out_funnel_dl_center: endpoint {
@@ -1438,7 +1803,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_venus_out_funnel_dl_mm: endpoint {
@@ -1527,13 +1891,37 @@
 
 		coresight-name = "coresight-funnel-npu";
 
-		clocks = <&clock_aop QDSS_CLK>;
-		clock-names = "apb_pclk";
+		clocks = <&clock_aop QDSS_CLK>,
+			<&clock_gcc GCC_NPU_AXI_CLK>,
+			<&clock_gcc GCC_NPU_CFG_AHB_CLK>,
+			<&clock_npucc NPU_CC_XO_CLK>,
+			<&clock_npucc NPU_CC_CORE_CLK>,
+			<&clock_npucc NPU_CC_CORE_CLK_SRC>,
+			<&clock_npucc NPU_CC_ATB_CLK>;
+
+		clock-names = "apb_pclk",
+			"gcc_npu_axi_clk",
+			"gcc_npu_cfg_ahb_clk",
+			"npu_cc_xo_clk",
+			"npu_core_clk",
+			"npu_core_clk_src",
+			"npu_cc_atb_clk";
+
+		qcom,proxy-clks = "gcc_npu_axi_clk",
+			"gcc_npu_cfg_ahb_clk",
+			"npu_cc_xo_clk",
+			"npu_core_clk",
+			"npu_core_clk_src",
+			"npu_cc_atb_clk";
+
+		vdd-supply = <&npu_core_gdsc>;
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		regulator-names = "vdd", "vdd_cx";
+		qcom,proxy-regs ="vdd", "vdd_cx";
 
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_npu_out_funnel_dl_compute: endpoint {
@@ -1543,6 +1931,33 @@
 			};
 
 			port@1 {
+				reg = <0>;
+				funnel_npu_in_tpdm_npu: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_npu_out_funnel_npu>;
+				};
+			};
+
+			port@2 {
+				reg = <1>;
+				funnel_npu_in_tpdm_npu_llm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_npu_llm_out_funnel_npu>;
+				};
+			};
+
+			port@3 {
+				reg = <2>;
+				funnel_npu_in_tpdm_npu_dpm: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpdm_npu_dpm_out_funnel_npu>;
+				};
+			};
+
+			port@4 {
 				reg = <3>;
 				funnel_npu_in_npu_etm0: endpoint {
 					slave-mode;
@@ -1568,7 +1983,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_turing_out_funnel_dl_compute: endpoint {
@@ -1633,7 +2047,7 @@
 		reg = <0x6981000 0x1000>;
 		reg-names = "tpdm-base";
 
-		coresight-name = "coresight-tpdm-llm-turing";
+		coresight-name = "coresight-tpdm-turing-llm";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1661,7 +2075,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_ddr_0_out_funnel_dl_center: endpoint {
@@ -1714,7 +2127,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_ddr_ch02_out_funnel_ddr_0: endpoint {
@@ -1749,7 +2161,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_ddr_ch13_out_funnel_ddr_0: endpoint {
@@ -1846,7 +2257,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_qatb_out_funnel_in0: endpoint {
@@ -1866,10 +2276,10 @@
 
 			port@2 {
 				reg = <3>;
-				qatb_3_in_tpda_dl_south: endpoint {
+				qatb3_in_funnel_dl_center: endpoint {
 					slave-mode;
 					remote-endpoint =
-					    <&tpda_dl_south_out_qatb_3>;
+					    <&funnel_dl_center_out_qatb3>;
 				};
 			};
 		};
@@ -2383,6 +2793,193 @@
 		clock-names = "apb_pclk";
 	};
 
+	cti_gpu_m3: cti@6962000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6962000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-gpu_cortex_m3";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_gpu_isdb: cti@6961000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6961000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-gpu_isdb_cti";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_iris: cti@6831000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6831000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-iris_dl_cti";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_lpass: cti@6845000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6845000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-lpass_dl_cti";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_lpass_lpi: cti@6b21000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6b21000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-lpass_lpi_cti";
+		status = "disabled";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_lpass_q6: cti@6b2b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6b2b000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-lpass_q6_cti";
+		status = "disabled";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_mdss: cti@6c61000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6c61000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-mdss_dl_cti";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_npu_dl0: cti@6c42000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6c42000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-npu_dl_cti_0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_npu_dl1: cti@6c43000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6c43000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-npu_dl_cti_1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_npu: cti@6c4b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6c4b000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-npu_q6_cti";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_titan: cti@6c13000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6c13000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-sierra_a6_cti";
+		status = "disabled";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_sdc: cti@6b40000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6b40000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ssc_cortex_m3";
+		status = "disabled";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_ssc0: cti@6b4b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6b4b000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ssc_cti0_q6";
+		status = "disabled";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_ssc1: cti@6b41000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6b41000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ssc_cti1";
+		status = "disabled";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_ssc4: cti@6b4e000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6b4e000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ssc_cti_noc";
+		status = "disabled";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
 	cti0_swao:cti@6b00000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x000bb966>;
@@ -2395,6 +2992,78 @@
 		clock-names = "apb_pclk";
 	};
 
+	cti1_swao:cti@6b01000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6b01000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-swao_cti1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti2_swao:cti@6b02000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6b02000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-swao_cti2";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti3_swao:cti@6b03000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6b03000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-swao_cti3";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_turing:cti@6982000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6982000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-turing_dl_cti";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_turing_q6:cti@698b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x698b000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-turing_q6_cti";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_compute:cti@6c38000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6c38000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-compute_dl_cti";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
 	ipcb_tgu: tgu@6b0b000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x000bb999>;
@@ -2411,7 +3080,7 @@
 		clock-names = "apb_pclk";
 	};
 
-	turing_etm0 {
+	etm_turing: turing_etm0 {
 		compatible = "qcom,coresight-remote-etm";
 
 		coresight-name = "coresight-turing-etm0";
@@ -2419,8 +3088,8 @@
 
 		port {
 			turing_etm0_out_funnel_turing: endpoint {
-				remote-endpoint =
-					<&funnel_turing_in_turing_etm0>;
+			remote-endpoint =
+				<&funnel_turing_in_turing_etm0>;
 			};
 		};
 	};
@@ -2482,7 +3151,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_apss_merg_out_funnel_in1: endpoint {
@@ -2687,7 +3355,6 @@
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-
 			port@0 {
 				reg = <0>;
 				funnel_apss_out_funnel_apss_merg: endpoint {
diff --git a/arch/arm64/boot/dts/qcom/kona-cvp.dtsi b/arch/arm64/boot/dts/qcom/kona-cvp.dtsi
index d1c93ab..7092ba6 100644
--- a/arch/arm64/boot/dts/qcom/kona-cvp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cvp.dtsi
@@ -20,16 +20,17 @@
 
 		/* Clocks */
 		clock-names =  "gcc_video_axi0",
-			"gcc_video_axi1", "cvp_clk";
+			"gcc_video_axi1", "cvp_clk", "core_clk";
 		clocks = <&clock_gcc GCC_VIDEO_AXI0_CLK>,
 			<&clock_gcc GCC_VIDEO_AXI1_CLK>,
-			<&clock_videocc VIDEO_CC_MVS1C_CLK>;
+			<&clock_videocc VIDEO_CC_MVS1C_CLK>,
+			<&clock_videocc VIDEO_CC_MVS1_CLK>;
 		qcom,proxy-clock-names = "gcc_video_axi0", "gcc_video_axi1",
-			"cvp_clk";
+			"cvp_clk", "core_clk";
 
-		qcom,clock-configs = <0x0 0x0 0x1>;
-		qcom,allowed-clock-rates = <403000000 520000000
-			549000000 666000000 800000000>;
+		qcom,clock-configs = <0x0 0x0 0x1 0x1>;
+		qcom,allowed-clock-rates = <239999999 338000000
+			366000000 444000000>;
 
 		/* Buses */
 		bus_cnoc {
diff --git a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
index bef41c1..69df65a 100644
--- a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
@@ -25,21 +25,6 @@
 	gpu_opp_table: gpu-opp-table {
 		compatible = "operating-points-v2";
 
-		opp-700000000 {
-			opp-hz = /bits/ 64 <700000000>;
-			opp-microvolt = <RPMH_REGULATOR_LEVEL_TURBO>;
-		};
-
-		opp-670000000 {
-			opp-hz = /bits/ 64 <670000000>;
-			opp-microvolt = <RPMH_REGULATOR_LEVEL_NOM_L1>;
-		};
-
-		opp-625000000 {
-			opp-hz = /bits/ 64 <625000000>;
-			opp-microvolt = <RPMH_REGULATOR_LEVEL_NOM>;
-		};
-
 		opp-480000000 {
 			opp-hz = /bits/ 64 <480000000>;
 			opp-microvolt = <RPMH_REGULATOR_LEVEL_SVS_L1>;
@@ -61,17 +46,18 @@
 		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
 		status = "ok";
 		reg = <0x3d00000 0x40000>, <0x3d61000 0x800>,
-			<0x3de0000 0x10000>;
-		reg-names = "kgsl_3d0_reg_memory", "cx_dbgc", "rscc";
-		interrupts = <0 300 IRQ_TYPE_NONE>;
+			<0x3de0000 0x10000>, <0x3d8b000 0x2000>;
+		reg-names = "kgsl_3d0_reg_memory", "cx_dbgc", "rscc",
+				"isense_cntl";
+		interrupts = <0 300 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "kgsl_3d0_irq";
 		qcom,id = <0>;
 
 		qcom,chipid = <0x06050000>;
 
-		qcom,initial-pwrlevel = <5>;
+		qcom,initial-pwrlevel = <2>;
 
-		qcom,idle-timeout = <1000000>; /* msecs */
+		qcom,idle-timeout = <80>; /* msecs */
 
 		qcom,no-nap;
 
@@ -105,7 +91,7 @@
 
 		/* Bus Scale Settings */
 		qcom,gpubw-dev = <&gpubw>;
-		//qcom,bus-control;
+		qcom,bus-control;
 		qcom,msm-bus,name = "grp3d";
 		qcom,bus-width = <32>;
 		qcom,msm-bus,num-cases = <13>;
@@ -199,54 +185,30 @@
 
 			qcom,gpu-pwrlevel@0 {
 				reg = <0>;
-				qcom,gpu-freq = <700000000>;
-				qcom,bus-freq = <12>;
-				qcom,bus-min = <10>;
-				qcom,bus-max = <12>;
-			};
-
-			qcom,gpu-pwrlevel@1 {
-				reg = <1>;
-				qcom,gpu-freq = <670000000>;
-				qcom,bus-freq = <11>;
-				qcom,bus-min = <9>;
-				qcom,bus-max = <11>;
-			};
-
-			qcom,gpu-pwrlevel@2 {
-				reg = <2>;
-				qcom,gpu-freq = <625000000>;
-				qcom,bus-freq = <10>;
-				qcom,bus-min = <8>;
-				qcom,bus-max = <10>;
-			};
-
-			qcom,gpu-pwrlevel@3 {
-				reg = <3>;
 				qcom,gpu-freq = <480000000>;
 				qcom,bus-freq = <8>;
 				qcom,bus-min = <7>;
 				qcom,bus-max = <9>;
 			};
 
-			qcom,gpu-pwrlevel@4 {
-				reg = <4>;
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
 				qcom,gpu-freq = <381000000>;
 				qcom,bus-freq = <5>;
 				qcom,bus-min = <5>;
 				qcom,bus-max = <7>;
 			};
 
-			qcom,gpu-pwrlevel@5 {
-				reg = <5>;
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
 				qcom,gpu-freq = <290000000>;
 				qcom,bus-freq = <4>;
 				qcom,bus-min = <3>;
 				qcom,bus-max = <5>;
 			};
 
-			qcom,gpu-pwrlevel@6 {
-				reg = <6>;
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
 				qcom,gpu-freq = <0>;
 				qcom,bus-freq = <0>;
 				qcom,bus-min = <0>;
@@ -262,14 +224,14 @@
 		/* CB5(ATOS) & CB5/6/7 are protected by HYP */
 		qcom,protect = <0xa0000 0xc000>;
 
-		clocks =<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
-			<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
-			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
-
-		clock-names = "iface_clk", "mem_clk", "mem_iface_clk";
+		clocks = <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+			<&clock_gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
+			<&clock_gpucc GPU_CC_AHB_CLK>;
+		clock-names = "gcc_gpu_memnoc_gfx",
+			"gcc_gpu_snoc_dvm_gfx",
+			"gpu_cc_ahb";
 
 		qcom,secure_align_mask = <0xfff>;
-		qcom,global_pt;
 		qcom,retention;
 		qcom,hyp_secure_alloc;
 
@@ -300,7 +262,8 @@
 			"kgsl_gmu_pdc_cfg",
 			"kgsl_gmu_pdc_seq";
 
-		interrupts = <0 304 IRQ_TYPE_NONE>, <0 305 IRQ_TYPE_NONE>;
+		interrupts = <0 304 IRQ_TYPE_LEVEL_HIGH>,
+						<0 305 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "kgsl_hfi_irq", "kgsl_gmu_irq";
 
 		qcom,msm-bus,name = "cnoc";
diff --git a/arch/arm64/boot/dts/qcom/kona-ion.dtsi b/arch/arm64/boot/dts/qcom/kona-ion.dtsi
index 92c2de0..facc644 100644
--- a/arch/arm64/boot/dts/qcom/kona-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-ion.dtsi
@@ -42,5 +42,11 @@
 			memory-region = <&sp_mem>;
 			qcom,ion-heap-type = "HYP_CMA";
 		};
+
+		qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+			reg = <10>;
+			memory-region = <&secure_display_memory>;
+			qcom,ion-heap-type = "HYP_CMA";
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-lpi.dtsi b/arch/arm64/boot/dts/qcom/kona-lpi.dtsi
index 687ffe0..04b591c 100644
--- a/arch/arm64/boot/dts/qcom/kona-lpi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-lpi.dtsi
@@ -7,6 +7,7 @@
 	lpi_tlmm: lpi_pinctrl@33c0000 {
 		compatible = "qcom,lpi-pinctrl";
 		reg = <0x33c0000 0x0>;
+		qcom,slew-reg = <0x355a000 0x0>;
 		qcom,num-gpios = <14>;
 		gpio-controller;
 		#gpio-cells = <2>;
@@ -17,6 +18,16 @@
 				      <0x00008000>, <0x00009000>,
 				      <0x0000A000>, <0x0000B000>,
 				      <0x0000C000>, <0x0000D000>;
+		qcom,lpi-slew-offset-tbl = <0x00000000>, <0x00000002>,
+					   <0x00000004>, <0x00000008>,
+					   <0x0000000A>, <0x0000000C>,
+					   <0x00000000>, <0x00000000>,
+					   <0x00000000>, <0x00000000>,
+					   <0x00000010>, <0x00000012>,
+					   <0x00000000>, <0x00000000>;
+
+		clock-names = "lpass_core_hw_vote";
+		clocks = <&lpass_core_hw_vote 0>;
 
 		quat_mi2s_sck {
 			quat_mi2s_sck_sleep: quat_mi2s_sck_sleep {
@@ -1288,7 +1299,8 @@
 				config {
 					pins = "gpio10";
 					drive-strength = <2>;
-					bias-bus-hold;
+					input-enable;
+					bias-pull-down;
 				};
 			};
 
@@ -1301,7 +1313,8 @@
 				config {
 					pins = "gpio10";
 					drive-strength = <2>;
-					bias-bus-hold;
+					slew-rate = <1>;
+					bias-disable;
 				};
 			};
 		};
@@ -1315,8 +1328,9 @@
 
 				config {
 					pins = "gpio11";
-					drive-strength = <4>;
-					bias-bus-hold;
+					drive-strength = <2>;
+					input-enable;
+					bias-pull-down;
 				};
 			};
 
@@ -1328,7 +1342,8 @@
 
 				config {
 					pins = "gpio11";
-					drive-strength = <4>;
+					drive-strength = <2>;
+					slew-rate = <1>;
 					bias-bus-hold;
 				};
 			};
@@ -1338,12 +1353,13 @@
 			mux {
 				pins = "gpio0";
 				function = "func1";
+				input-enable;
+				bias-pull-down;
 			};
 
 			config {
 				pins = "gpio0";
 				drive-strength = <2>;
-				bias-bus-hold;
 			};
 		};
 
@@ -1355,8 +1371,9 @@
 
 			config {
 				pins = "gpio0";
-				drive-strength = <8>;
-				bias-bus-hold;
+				drive-strength = <2>;
+				slew-rate = <1>;
+				bias-disable;
 			};
 		};
 
@@ -1369,7 +1386,8 @@
 			config {
 				pins = "gpio1";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1381,7 +1399,8 @@
 
 			config {
 				pins = "gpio1";
-				drive-strength = <8>;
+				drive-strength = <2>;
+				slew-rate = <1>;
 				bias-bus-hold;
 			};
 		};
@@ -1395,7 +1414,8 @@
 			config {
 				pins = "gpio2";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1407,7 +1427,8 @@
 
 			config {
 				pins = "gpio2";
-				drive-strength = <8>;
+				drive-strength = <2>;
+				slew-rate = <1>;
 				bias-bus-hold;
 			};
 		};
@@ -1421,7 +1442,8 @@
 			config {
 				pins = "gpio3";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1433,8 +1455,9 @@
 
 			config {
 				pins = "gpio3";
-				drive-strength = <8>;
-				bias-bus-hold;
+				drive-strength = <2>;
+				slew-rate = <1>;
+				bias-disable;
 			};
 		};
 
@@ -1447,7 +1470,8 @@
 			config {
 				pins = "gpio4";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1459,7 +1483,8 @@
 
 			config {
 				pins = "gpio4";
-				drive-strength = <8>;
+				drive-strength = <2>;
+				slew-rate = <1>;
 				bias-bus-hold;
 			};
 		};
@@ -1473,7 +1498,8 @@
 			config {
 				pins = "gpio5";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1485,7 +1511,8 @@
 
 			config {
 				pins = "gpio5";
-				drive-strength = <8>;
+				drive-strength = <2>;
+				slew-rate = <1>;
 				bias-bus-hold;
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
index 009fbce..8cd7629 100644
--- a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
@@ -19,7 +19,7 @@
 		esoc-0 = <&mdm0>;
 
 		/* mhi bus specific settings */
-		mhi,max-channels = <106>;
+		mhi,max-channels = <110>;
 		mhi,timeout = <2000>;
 
 		mhi_channels: mhi_channels {
@@ -253,8 +253,8 @@
 			mhi_chan@25 {
 				reg = <25>;
 				label = "BL";
-				mhi,num-elements = <64>;
-				mhi,event-ring = <2>;
+				mhi,num-elements = <32>;
+				mhi,event-ring = <1>;
 				mhi,chan-dir = <2>;
 				mhi,data-type = <0>;
 				mhi,doorbell-mode = <2>;
@@ -330,7 +330,7 @@
 			mhi_chan@52 {
 				reg = <52>;
 				label = "SLPI_0";
-				mhi,event-ring = <4>;
+				mhi,event-ring = <5>;
 				mhi,chan-dir = <0>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -340,7 +340,7 @@
 			mhi_chan@53 {
 				reg = <53>;
 				label = "SLPI_1";
-				mhi,event-ring = <4>;
+				mhi,event-ring = <5>;
 				mhi,chan-dir = <0>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -350,7 +350,7 @@
 			mhi_chan@70 {
 				reg = <70>;
 				label = "ADSP_2";
-				mhi,event-ring = <5>;
+				mhi,event-ring = <4>;
 				mhi,chan-dir = <0>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -360,7 +360,7 @@
 			mhi_chan@71 {
 				reg = <71>;
 				label = "ADSP_3";
-				mhi,event-ring = <5>;
+				mhi,event-ring = <4>;
 				mhi,chan-dir = <0>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -387,11 +387,22 @@
 				mhi,offload-chan;
 			};
 
+			mhi_chan@80 {
+				reg = <80>;
+				label = "AUDIO_VOICE_0";
+				mhi,event-ring = <0>;
+				mhi,chan-dir = <0>;
+				mhi,ee = <0x4>;
+				mhi,data-type = <3>;
+				mhi,offload-chan;
+				status = "ok";
+			};
+
 			mhi_chan@100 {
 				reg = <100>;
 				label = "IP_HW0";
 				mhi,num-elements = <512>;
-				mhi,event-ring = <4>;
+				mhi,event-ring = <6>;
 				mhi,chan-dir = <1>;
 				mhi,data-type = <1>;
 				mhi,doorbell-mode = <3>;
@@ -403,12 +414,68 @@
 				reg = <101>;
 				label = "IP_HW0";
 				mhi,num-elements = <512>;
-				mhi,event-ring = <5>;
+				mhi,event-ring = <7>;
 				mhi,chan-dir = <2>;
-				mhi,data-type = <1>;
+				mhi,data-type = <4>;
 				mhi,doorbell-mode = <3>;
 				mhi,ee = <0x4>;
 			};
+
+			mhi_chan@102 {
+				reg = <102>;
+				label = "IP_HW_ADPL";
+				mhi,event-ring = <8>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+				mhi,lpm-notify;
+			};
+
+			mhi_chan@103 {
+				reg = <103>;
+				label = "IP_HW_QDSS";
+				mhi,num-elements = <128>;
+				mhi,event-ring = <9>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <0>;
+				mhi,doorbell-mode = <2>;
+				mhi,ee = <0x4>;
+			};
+
+			mhi_chan@104 {
+				reg = <104>;
+				label = "IP_HW0_RSC";
+				mhi,num-elements = <512>;
+				mhi,local-elements = <3078>;
+				mhi,event-ring = <7>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <5>;
+				mhi,doorbell-mode = <3>;
+				mhi,ee = <0x4>;
+				mhi,chan-type = <3>;
+			};
+
+			mhi_chan@107 {
+				reg = <107>;
+				label = "IP_HW_MHIP_1";
+				mhi,event-ring = <10>;
+				mhi,chan-dir = <1>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+			};
+
+			mhi_chan@108 {
+				reg = <108>;
+				label = "IP_HW_MHIP_1";
+				mhi,event-ring = <11>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+				mhi,lpm-notify;
+			};
 		};
 
 		mhi_events: mhi_events {
@@ -450,6 +517,26 @@
 			};
 
 			mhi_event@4 {
+				mhi,num-elements = <512>;
+				mhi,intmod = <5>;
+				mhi,msi = <0>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@5 {
+				mhi,num-elements = <512>;
+				mhi,intmod = <5>;
+				mhi,msi = <0>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@6 {
 				mhi,num-elements = <1024>;
 				mhi,intmod = <5>;
 				mhi,msi = <5>;
@@ -457,11 +544,9 @@
 				mhi,priority = <1>;
 				mhi,brstmode = <3>;
 				mhi,hw-ev;
-				mhi,client-manage;
-				mhi,offload;
 			};
 
-			mhi_event@5 {
+			mhi_event@7 {
 				mhi,num-elements = <1024>;
 				mhi,intmod = <5>;
 				mhi,msi = <6>;
@@ -470,6 +555,51 @@
 				mhi,brstmode = <3>;
 				mhi,hw-ev;
 				mhi,client-manage;
+			};
+
+			mhi_event@8 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
+				mhi,chan = <102>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@9 {
+				mhi,num-elements = <1024>;
+				mhi,intmod = <5>;
+				mhi,msi = <7>;
+				mhi,chan = <103>;
+				mhi,priority = <1>;
+				mhi,brstmode = <2>;
+				mhi,hw-ev;
+			};
+
+			mhi_event@10 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
+				mhi,chan = <107>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@11 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
+				mhi,chan = <108>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
 				mhi,offload;
 			};
 		};
@@ -483,13 +613,23 @@
 				mhi,chan = "IP_HW0";
 				mhi,interface-name = "rmnet_mhi";
 				mhi,mru = <0x4000>;
+				mhi,chain-skb;
 			};
 
-			mhi_netdev_1: mhi_rmnet@1 {
+			mhi_rmnet@1 {
 				reg = <0x1>;
-				mhi,chan = "IP_HW_ADPL";
-				mhi,interface-name = "rmnet_mhi";
-				mhi,mru = <0x4000>;
+				mhi,chan = "IP_HW0_RSC";
+				mhi,mru = <0x8000>;
+				mhi,rsc-parent = <&mhi_netdev_0>;
+			};
+
+			mhi_qdss_dev_0 {
+				mhi,chan = "QDSS";
+				mhi,default-channel;
+			};
+
+			mhi_qdss_dev_1 {
+				mhi,chan = "IP_HW_QDSS";
 			};
 
 			mhi_qrtr {
@@ -500,13 +640,13 @@
 			mhi_subsys_adsp_0: mhi_dev@2 {
 				reg = <0x2>;
 				mhi,chan = "ADSP_0";
-				mhi,num-devices = <4>;
+				mhi,max-devices = <4>;
 			};
 
 			mhi_subsys_slpi_0: mhi_dev@3 {
 				reg = <0x3>;
 				mhi,chan = "SLPI_0";
-				mhi,num-devices = <4>;
+				mhi,max-devices = <4>;
 			};
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
index edaf412..67ae102 100644
--- a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
@@ -3,11 +3,14 @@
  *  Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
 
 #include "kona-pmic-overlay.dtsi"
 #include "kona-sde-display.dtsi"
 #include "kona-camera-sensor-mtp.dtsi"
 #include "kona-audio-overlay.dtsi"
+#include "kona-thermal-overlay.dtsi"
 
 &qupv3_se12_2uart {
 	status = "ok";
@@ -100,13 +103,46 @@
 			linux,can-disable;
 		};
 	};
+
+	qcom,qbt_handler {
+		compatible = "qcom,qbt-handler";
+		qcom,ipc-gpio = <&tlmm 23 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_home_default>;
+		qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+	};
+};
+
+&qupv3_se13_i2c {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	status = "ok";
+
+	st_fts@49 {
+		compatible = "st,fts";
+		reg = <0x49>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <39 0x2008>;
+		vdd-supply = <&pm8150a_l1>;
+		avdd-supply = <&pm8150_l13>;
+		pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+		pinctrl-0 = <&ts_active>;
+		pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+		st,irq-gpio = <&tlmm 39 0x2008>;
+		st,reset-gpio = <&tlmm 38 0x00>;
+		st,regulator_dvdd = "vdd";
+		st,regulator_avdd = "avdd";
+		st,x-flip = <1>;
+		st,y-flip = <1>;
+	};
 };
 
 &vendor {
 	bluetooth: bt_qca6390 {
 		compatible = "qca,qca6390";
 		pinctrl-names = "default";
-		pinctrl-0 = <&bt_en_active>;
+		pinctrl-0 = <&bt_en_sleep>;
 		qca,bt-reset-gpio = <&tlmm 21 0>; /* BT_EN */
 		qca,bt-vdd-aon-supply = <&pm8150_s6>;
 		qca,bt-vdd-dig-supply = <&pm8009_s2>;
@@ -122,7 +158,28 @@
 	kona_mtp_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
 		#include "fg-gen4-batterydata-alium-3600mah.dtsi"
+		#include "fg-gen4-batterydata-ascent-3450mah.dtsi"
 	};
+
+	extcon_usb1: extcon_usb1 {
+		compatible = "linux,extcon-usb-gpio";
+		vbus-gpio = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
+		id-gpio = <&tlmm 91 GPIO_ACTIVE_HIGH>;
+		vbus-out-gpio = <&pm8150_gpios 9 GPIO_ACTIVE_HIGH>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb2_vbus_det_default
+			     &usb2_id_det_default
+			     &usb2_vbus_boost_default>;
+	};
+};
+
+&vreg_hap_boost {
+	status = "ok";
+};
+
+&pm8150b_haptics {
+	vdd-supply = <&vreg_hap_boost>;
 };
 
 &pm8150b_vadc {
@@ -212,6 +269,24 @@
 	qcom,fg-esr-cal-temp-thresh = <10 40>;
 };
 
+&qupv3_se15_i2c {
+	#address-cells = <1>;
+	#size-cells = <0>;
+#include "smb1390.dtsi"
+};
+
+&smb1390 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_stat_default>;
+	status = "ok";
+};
+
+&smb1390_charger {
+	io-channels = <&pm8150b_vadc ADC_AMUX_THM2>;
+	io-channel-names = "cp_die_temp";
+	status = "ok";
+};
+
 &pm8150_vadc {
 	#address-cells = <1>;
 	#size-cells = <0>;
@@ -350,6 +425,52 @@
 	qcom,platform-reset-gpio = <&tlmm 75 0>;
 };
 
+&dsi_sw43404_amoled_fhd_plus_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_dsc_375_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_dsc_375_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
 &sde_dsi {
 	qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
 };
@@ -439,3 +560,25 @@
 		};
 	};
 };
+
+&sdhc_2 {
+	vdd-supply = <&pm8150a_l9>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8150a_l6>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
+
+	cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+
+	status = "ok";
+};
+
+&usb1 {
+	extcon = <&extcon_usb1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-npu.dtsi b/arch/arm64/boot/dts/qcom/kona-npu.dtsi
index 685afca..bf79abb 100644
--- a/arch/arm64/boot/dts/qcom/kona-npu.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-npu.dtsi
@@ -41,17 +41,13 @@
 				<&clock_npucc NPU_CC_DPM_TEMP_CLK>,
 				<&clock_npucc NPU_CC_CAL_HM0_DPM_IP_CLK>,
 				<&clock_npucc NPU_CC_CAL_HM1_DPM_IP_CLK>,
-				<&clock_npucc NPU_CC_DSP_AHBS_CLK>,
-				<&clock_npucc NPU_CC_DSP_AHBM_CLK>,
-				<&clock_npucc NPU_CC_DSP_AXI_CLK>,
-				<&clock_npucc NPU_CC_DSP_BWMON_CLK>,
-				<&clock_npucc NPU_CC_DSP_BWMON_AHB_CLK>,
 				<&clock_npucc NPU_CC_ATB_CLK>,
 				<&clock_npucc NPU_CC_S2P_CLK>,
 				<&clock_npucc NPU_CC_BWMON_CLK>,
 				<&clock_npucc NPU_CC_CAL_HM0_PERF_CNT_CLK>,
 				<&clock_npucc NPU_CC_CAL_HM1_PERF_CNT_CLK>,
-				<&clock_npucc NPU_CC_BTO_CORE_CLK>;
+				<&clock_npucc NPU_CC_BTO_CORE_CLK>,
+				<&clock_npucc NPU_DSP_CORE_CLK_SRC>;
 		clock-names = "xo_clk",
 				"npu_core_clk",
 				"cal_hm0_clk",
@@ -74,17 +70,13 @@
 				"dpm_temp_clk",
 				"cal_hm0_dpm_ip_clk",
 				"cal_hm1_dpm_ip_clk",
-				"dsp_ahbs_clk",
-				"dsp_ahbm_clk",
-				"dsp_axi_clk",
-				"dsp_bwmon_clk",
-				"dsp_bwmon_ahb_clk",
 				"atb_clk",
 				"s2p_clk",
 				"bwmon_clk",
 				"cal_hm0_perf_cnt_clk",
 				"cal_hm1_perf_cnt_clk",
-				"bto_core_clk";
+				"bto_core_clk",
+				"dsp_core_clk_src";
 
 		vdd-supply = <&npu_core_gdsc>;
 		vdd_cx-supply = <&VDD_CX_LEVEL>;
@@ -98,44 +90,6 @@
 			initial-pwrlevel = <4>;
 			qcom,npu-pwrlevel@0 {
 				reg = <0>;
-				vreg = <0>;
-				clk-freq = <19200000
-					60000000
-					200000000
-					200000000
-					200000000
-					200000000
-					120000000
-					20000000
-					200000000
-					60000000
-					19200000
-					50000000
-					50000000
-					60000000
-					60000000
-					60000000
-					19200000
-					60000000
-					19200000
-					50000000
-					200000000
-					200000000
-					60000000
-					60000000
-					120000000
-					19200000
-					60000000
-					30000000
-					50000000
-					19200000
-					200000000
-					200000000
-					19200000>;
-			};
-
-			qcom,npu-pwrlevel@1 {
-				reg = <1>;
 				vreg = <1>;
 				clk-freq = <19200000
 					100000000
@@ -159,21 +113,17 @@
 					50000000
 					200000000
 					200000000
-					100000000
-					100000000
-					200000000
-					19200000
-					100000000
 					60000000
 					50000000
 					19200000
 					300000000
 					300000000
-					19200000>;
+					19200000
+					300000000>;
 			};
 
-			qcom,npu-pwrlevel@2 {
-				reg = <2>;
+			qcom,npu-pwrlevel@1 {
+				reg = <1>;
 				vreg = <2>;
 				clk-freq = <19200000
 					200000000
@@ -197,21 +147,17 @@
 					50000000
 					466000000
 					466000000
-					200000000
-					200000000
-					267000000
-					19200000
-					200000000
 					120000000
 					50000000
 					19200000
 					466000000
 					466000000
-					19200000>;
+					19200000
+					400000000>;
 			};
 
-			qcom,npu-pwrlevel@3 {
-				reg = <3>;
+			qcom,npu-pwrlevel@2 {
+				reg = <2>;
 				vreg = <3>;
 				clk-freq = <19200000
 					333000000
@@ -235,21 +181,17 @@
 					50000000
 					533000000
 					533000000
-					333000000
-					333000000
-					403000000
-					19200000
-					333000000
 					240000000
 					50000000
 					19200000
 					533000000
 					533000000
-					19200000>;
+					19200000
+					500000000>;
 			};
 
-			qcom,npu-pwrlevel@4 {
-				reg = <4>;
+			qcom,npu-pwrlevel@3 {
+				reg = <3>;
 				vreg = <4>;
 				clk-freq = <19200000
 					428000000
@@ -273,21 +215,17 @@
 					100000000
 					850000000
 					850000000
-					428000000
-					428000000
-					533000000
-					19200000
-					428000000
 					240000000
 					100000000
 					19200000
 					850000000
 					850000000
-					19200000>;
+					19200000
+					660000000>;
 			};
 
-			qcom,npu-pwrlevel@5 {
-				reg = <5>;
+			qcom,npu-pwrlevel@4 {
+				reg = <4>;
 				vreg = <6>;
 				clk-freq = <19200000
 					500000000
@@ -311,17 +249,13 @@
 					100000000
 					1000000000
 					1000000000
-					500000000
-					500000000
-					700000000
-					19200000
-					500000000
 					30000000
 					100000000
 					19200000
 					1000000000
 					1000000000
-					19200000>;
+					19200000
+					800000000>;
 			};
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/kona-pcie.dtsi b/arch/arm64/boot/dts/qcom/kona-pcie.dtsi
index a13400a..694c5fc 100644
--- a/arch/arm64/boot/dts/qcom/kona-pcie.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pcie.dtsi
@@ -67,7 +67,7 @@
 			<&clock_gcc GCC_PCIE_0_CFG_AHB_CLK>,
 			<&clock_gcc GCC_PCIE_0_MSTR_AXI_CLK>,
 			<&clock_gcc GCC_PCIE_0_SLV_AXI_CLK>,
-			<&clock_gcc GCC_PCIE_MDM_CLKREF_EN>,
+			<&clock_gcc GCC_PCIE_WIFI_CLKREF_EN>,
 			<&clock_gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
 			<&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
 			<&clock_gcc GCC_PCIE0_PHY_REFGEN_CLK>,
@@ -307,7 +307,7 @@
 			<&clock_gcc GCC_PCIE_1_CFG_AHB_CLK>,
 			<&clock_gcc GCC_PCIE_1_MSTR_AXI_CLK>,
 			<&clock_gcc GCC_PCIE_1_SLV_AXI_CLK>,
-			<&clock_gcc GCC_PCIE_MDM_CLKREF_EN>,
+			<&clock_gcc GCC_PCIE_WIGIG_CLKREF_EN>,
 			<&clock_gcc GCC_PCIE_1_SLV_Q2A_AXI_CLK>,
 			<&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
 			<&clock_gcc GCC_PCIE1_PHY_REFGEN_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
index 6f4fc2a..417c45b 100644
--- a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
@@ -192,6 +192,51 @@
 			};
 		};
 
+		pmx_ts_active {
+			ts_active: ts_active {
+					mux {
+						pins = "gpio38", "gpio39";
+						function = "gpio";
+					};
+
+					config {
+						pins = "gpio38", "gpio39";
+						drive-strength = <8>;
+						bias-pull-up;
+					};
+			};
+		};
+
+		pmx_ts_int_suspend {
+			ts_int_suspend: ts_int_suspend {
+				mux {
+					pins = "gpio39";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio39";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		pmx_ts_reset_suspend {
+			ts_reset_suspend: ts_reset_suspend {
+				mux {
+					pins = "gpio38";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio38";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
 		ufs_dev_reset_assert: ufs_dev_reset_assert {
 			config {
 				pins = "ufs_reset";
@@ -667,12 +712,12 @@
 		pmx_sde: pmx_sde {
 			sde_dsi_active: sde_dsi_active {
 				mux {
-					pins = "gpio75";
+					pins = "gpio75", "gpio60";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio75";
+					pins = "gpio75", "gpio60";
 					drive-strength = <8>;   /* 8 mA */
 					bias-disable = <0>;   /* no pull */
 				};
@@ -680,12 +725,12 @@
 
 			sde_dsi_suspend: sde_dsi_suspend {
 				mux {
-					pins = "gpio75";
+					pins = "gpio75", "gpio60";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio75";
+					pins = "gpio75", "gpio60";
 					drive-strength = <2>;   /* 2 mA */
 					bias-pull-down;         /* PULL DOWN */
 				};
@@ -693,12 +738,12 @@
 
 			sde_dsi1_active: sde_dsi1_active {
 				mux {
-					pins = "gpio128";
+					pins = "gpio128", "gpio60";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio128";
+					pins = "gpio128", "gpio60";
 					drive-strength = <8>;   /* 8 mA */
 					bias-disable = <0>;   /* no pull */
 				};
@@ -706,12 +751,12 @@
 
 			sde_dsi1_suspend: sde_dsi1_suspend {
 				mux {
-					pins = "gpio128";
+					pins = "gpio128", "gpio60";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio128";
+					pins = "gpio128", "gpio60";
 					drive-strength = <2>;   /* 2 mA */
 					bias-pull-down;         /* PULL DOWN */
 				};
@@ -2222,8 +2267,8 @@
 			};
 		};
 
-		cam_sensor_active_front: cam_sensor_active_front {
-			/* RESET FRONT */
+		cam_sensor_active_rst2: cam_sensor_active_rst2 {
+			/* RESET 2 */
 			mux {
 				pins = "gpio78";
 				function = "gpio";
@@ -2236,8 +2281,8 @@
 			};
 		};
 
-		cam_sensor_suspend_front: cam_sensor_suspend_front {
-			/* RESET FRONT */
+		cam_sensor_suspend_rst2: cam_sensor_suspend_rst2 {
+			/* RESET 2 */
 			mux {
 				pins = "gpio78";
 				function = "gpio";
@@ -2479,7 +2524,7 @@
 			};
 		};
 
-		bt_en_active: bt_en_active {
+		bt_en_sleep: bt_en_sleep {
 			mux {
 			pins = "gpio21";
 			function = "gpio";
@@ -2488,11 +2533,40 @@
 			config {
 			pins = "gpio21";
 			drive-strength = <2>;
-			bias-pull-up;
+			output-low;
+			bias-pull-down;
 			};
 		};
 
-		/* QUPv3_0 North SE mappings */
+		/* QUPv3_0 North SE0 mappings */
+		qupv3_se0_i3c_pins: qupv3_se0_i3c_pins {
+			qupv3_se0_i3c_active: qupv3_se0_i3c_active {
+				mux {
+					pins = "gpio28", "gpio29";
+					function = "ibi_i3c";
+				};
+
+				config {
+					pins = "gpio28", "gpio29";
+					drive-strength = <16>;
+					bias-pull-up;
+				};
+			};
+
+			qupv3_se0_i3c_sleep: qupv3_se0_i3c_sleep {
+				mux {
+					pins = "gpio28", "gpio29";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio28", "gpio29";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
 		/* SE 0 pin mappings */
 		qupv3_se0_i2c_pins: qupv3_se0_i2c_pins {
 			qupv3_se0_i2c_active: qupv3_se0_i2c_active {
@@ -3806,5 +3880,14 @@
 				};
 			};
 		};
+
+		usb2_id_det_default: usb2_id_det_default {
+			config {
+				pins = "gpio91";
+				function = "gpio";
+				input-enable;
+				bias-pull-up;
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi
index eff509c..634c688 100644
--- a/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi
@@ -56,9 +56,68 @@
 			power-source = <1>;
 		};
 	};
+
+	usb2_vbus_boost {
+		usb2_vbus_boost_default: usb2_vbus_boost_default {
+			pins = "gpio9";
+			function = "normal";
+			output-low;
+			power-source = <1>;	/* 1.8V input supply */
+		};
+	};
+
+	usb2_vbus_det {
+		usb2_vbus_det_default: usb2_vbus_det_default {
+			pins = "gpio10";
+			function = "normal";
+			input-enable;
+			bias-pull-down;
+			power-source = <1>;	/* 1.8V input supply */
+		};
+	};
+};
+
+&pm8150b_gpios {
+	qnovo_fet_ctrl {
+		qnovo_fet_ctrl_state1: qnovo_fet_ctrl_state1 {
+			pins = "gpio8";
+			function = "normal";
+			input-enable;
+			output-disable;
+			bias-disable;
+			power-source = <0>;
+		};
+
+		qnovo_fet_ctrl_state2: qnovo_fet_ctrl_state2 {
+			pins = "gpio8";
+			function = "normal";
+			input-enable;
+			output-disable;
+			bias-pull-down;
+			power-source = <0>;
+		};
+	};
+
+	smb_stat {
+		smb_stat_default: smb_stat_default {
+			  pins = "gpio6";
+			  function = "normal";
+			  input-enable;
+			  bias-pull-up;
+			  qcom,pull-up-strength = <PMIC_GPIO_PULL_UP_30>;
+			  power-source = <0>;
+		  };
+	};
+};
+
+&pm8150b_qnovo {
+	pinctrl-names = "q_state1", "q_state2";
+	pinctrl-0 = <&qnovo_fet_ctrl_state1>;
+	pinctrl-1 = <&qnovo_fet_ctrl_state2>;
 };
 
 &pm8150b_charger {
+	dpdm-supply = <&usb2_phy0>;
 	smb5_vconn: qcom,smb5-vconn {
 		regulator-name = "smb5-vconn";
 	};
@@ -74,6 +133,20 @@
 	vconn-supply = <&smb5_vconn>;
 };
 
+&pm8150b_gpios {
+	haptics_boost {
+		haptics_boost_default: haptics_boost_default {
+			pins = "gpio5";
+			function = "normal";
+			output-enable;
+			input-disable;
+			bias-disable;
+			qcom,drive-strength = <3>; /* high */
+			power-source = <1>; /* 1.8 V */
+		};
+	};
+};
+
 &soc {
 	vreg_tof: regulator-dbb1 {
 		compatible = "regulator-fixed";
@@ -84,4 +157,19 @@
 		startup-delay-us = <1000>;
 		enable-active-high;
 	};
+
+	vreg_hap_boost: regulator-haptics-boost {
+		compatible = "regulator-fixed";
+		regulator-name = "vdd_hap_boost";
+		gpio = <&pm8150b_gpios 5 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&haptics_boost_default>;
+		startup-delay-us = <1000>;
+		enable-active-high;
+		status = "disabled";
+	};
+};
+
+&usb0 {
+	extcon = <&pm8150b_pdphy>;
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
index 8d50d20..c08d2ba 100644
--- a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
@@ -3,9 +3,12 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <dt-bindings/gpio/gpio.h>
 #include "kona-pmic-overlay.dtsi"
 #include "kona-sde-display.dtsi"
+#include "kona-camera-sensor-qrd.dtsi"
 #include "kona-audio-overlay.dtsi"
+#include "kona-thermal-overlay.dtsi"
 
 &vendor {
 	kona_qrd_batterydata: qcom,battery-data {
@@ -22,6 +25,10 @@
 	status = "ok";
 };
 
+&qupv3_se6_4uart {
+	status = "ok";
+};
+
 &dai_mi2s2 {
 	qcom,msm-mi2s-tx-lines = <1>;
 	pinctrl-names = "default", "sleep";
@@ -36,6 +43,8 @@
 	qcom,audio-routing =
 		"AMIC2", "MIC BIAS2",
 		"MIC BIAS2", "Analog Mic2",
+		"TX DMIC0", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic0",
 		"TX DMIC1", "MIC BIAS3",
 		"MIC BIAS3", "Digital Mic1",
 		"TX DMIC2", "MIC BIAS1",
@@ -48,7 +57,17 @@
 		"IN2_HPHR", "HPHR_OUT",
 		"IN3_AUX", "AUX_OUT",
 		"TX SWR_ADC0", "ADC1_OUTPUT",
-		"TX SWR_ADC2", "ADC2_OUTPUT",
+		"TX SWR_ADC1", "ADC2_OUTPUT",
+		"TX SWR_ADC2", "ADC3_OUTPUT",
+		"TX SWR_ADC3", "ADC4_OUTPUT",
+		"TX SWR_DMIC0", "DMIC1_OUTPUT",
+		"TX SWR_DMIC1", "DMIC2_OUTPUT",
+		"TX SWR_DMIC2", "DMIC3_OUTPUT",
+		"TX SWR_DMIC3", "DMIC4_OUTPUT",
+		"TX SWR_DMIC4", "DMIC5_OUTPUT",
+		"TX SWR_DMIC5", "DMIC6_OUTPUT",
+		"TX SWR_DMIC6", "DMIC7_OUTPUT",
+		"TX SWR_DMIC7", "DMIC8_OUTPUT",
 		"WSA SRC0_INP", "SRC0",
 		"WSA_TX DEC0_INP", "TX DEC0 MUX",
 		"WSA_TX DEC1_INP", "TX DEC1 MUX",
@@ -56,10 +75,31 @@
 		"RX_TX DEC1_INP", "TX DEC1 MUX",
 		"RX_TX DEC2_INP", "TX DEC2 MUX",
 		"RX_TX DEC3_INP", "TX DEC3 MUX",
-		"SpkrRight IN", "WSA_SPK2 OUT";
+		"SpkrRight IN", "WSA_SPK2 OUT",
+		"VA_AIF1 CAP", "VA_SWR_CLK",
+		"VA_AIF2 CAP", "VA_SWR_CLK",
+		"VA_AIF3 CAP", "VA_SWR_CLK",
+		"VA DMIC0", "MIC BIAS3",
+		"VA DMIC1", "MIC BIAS3",
+		"VA DMIC2", "MIC BIAS1",
+		"VA DMIC3", "MIC BIAS1",
+		"VA DMIC5", "MIC BIAS4",
+		"VA SWR_MIC0", "DMIC1_OUTPUT",
+		"VA SWR_MIC1", "DMIC2_OUTPUT",
+		"VA SWR_MIC2", "DMIC3_OUTPUT",
+		"VA SWR_MIC3", "DMIC4_OUTPUT",
+		"VA SWR_MIC4", "DMIC5_OUTPUT",
+		"VA SWR_MIC5", "DMIC6_OUTPUT",
+		"VA SWR_MIC6", "DMIC7_OUTPUT",
+		"VA SWR_MIC7", "DMIC8_OUTPUT",
+		"VA SWR_ADC1", "ADC2_OUTPUT";
 	qcom,wsa-max-devs = <1>;
 	qcom,wsa-devs = <&wsa881x_0212>, <&wsa881x_0214>;
 	qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
+
+	qcom,msm-mbhc-usbc-audio-supported = <1>;
+	qcom,msm-mbhc-hphl-swh = <0>;
+	qcom,msm-mbhc-gnd-swh = <0>;
 };
 
 &qupv3_se1_i2c {
@@ -85,6 +125,29 @@
 	};
 };
 
+&qupv3_se13_i2c {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	status = "ok";
+
+	st_fts@49 {
+		compatible = "st,fts";
+		reg = <0x49>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <39 0x2008>;
+		vdd-supply = <&pm8150a_l1>;
+		avdd-supply = <&pm8150_l13>;
+		pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+		pinctrl-0 = <&ts_active>;
+		pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+		st,irq-gpio = <&tlmm 39 0x2008>;
+		st,reset-gpio = <&tlmm 38 0x00>;
+		st,regulator_dvdd = "vdd";
+		st,regulator_avdd = "avdd";
+	};
+};
+
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v4";
 
@@ -132,6 +195,60 @@
 			linux,can-disable;
 		};
 	};
+
+	qcom,qbt_handler {
+		compatible = "qcom,qbt-handler";
+		qcom,ipc-gpio = <&tlmm 23 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_home_default>;
+		qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+	};
+};
+
+&vreg_hap_boost {
+	status = "ok";
+};
+
+&pm8150b_haptics {
+	qcom,vmax-mv = <1697>;
+	qcom,play-rate-us = <5882>;
+	vdd-supply = <&vreg_hap_boost>;
+
+	wf_0 {
+		/* CLICK */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_1 {
+		/* DOUBLE CLICK */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_2 {
+		/* TICK */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_3 {
+		/* THUD */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_4 {
+		/* POP */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_5 {
+		/* HEAVY CLICK */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
 };
 
 &pm8150b_vadc {
@@ -349,6 +466,25 @@
 	qcom,platform-reset-gpio = <&tlmm 75 0>;
 };
 
+&dsi_sw43404_amoled_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sw43404_amoled_fhd_plus_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
 &sde_dsi {
 	qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
 };
@@ -438,3 +574,45 @@
 		};
 	};
 };
+
+&sdhc_2 {
+	vdd-supply = <&pm8150a_l9>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8150a_l6>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
+
+	cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+
+	status = "ok";
+};
+
+&vendor {
+	bluetooth: bt_qca6390 {
+		compatible = "qca,qca6390";
+		pinctrl-names = "default";
+		pinctrl-0 = <&bt_en_sleep>;
+		qca,bt-reset-gpio = <&tlmm 21 0>; /* BT_EN */
+		qca,bt-vdd-aon-supply = <&pm8150_s6>;
+		qca,bt-vdd-dig-supply = <&pm8009_s2>;
+		qca,bt-vdd-rfa1-supply = <&pm8150_s5>;
+		qca,bt-vdd-rfa2-supply = <&pm8150a_s8>;
+
+		qca,bt-vdd-aon-voltage-level = <950000 950000>;
+		qca,bt-vdd-dig-voltage-level = <950000 952000>;
+		qca,bt-vdd-rfa1-voltage-level = <1900000 1900000>;
+		qca,bt-vdd-rfa2-voltage-level = <1350000 1350000>;
+	};
+};
+
+&usb0 {
+	dwc3@a600000 {
+		maximum-speed = "high-speed";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi b/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
index 85e332c..a7df830 100644
--- a/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
@@ -17,6 +17,24 @@
 		qcom,iommu-dma = "bypass";
 	};
 
+	/* QUPV3_0_SE0 */
+	i3c0: i3c@980000 {
+		compatible = "qcom,geni-i3c";
+		reg = <0x980000 0x4000>;
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se0_i3c_active>;
+		pinctrl-1 = <&qupv3_se0_i3c_sleep>;
+		interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <3>;
+		#size-cells = <0>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
 	/* Debug UART Instance for RUMI platform */
 	qupv3_se2_2uart: qcom,qup_uart@988000 {
 		compatible = "qcom,msm-geni-console";
diff --git a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
index c7bb1af..233b71a 100644
--- a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
@@ -315,9 +315,12 @@
 			<RPMH_REGULATOR_MODE_LPM
 			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
+		proxy-supply = <&pm8150_l14>;
 		L14A: pm8150_l14: regulator-pm8150-l14 {
 			regulator-name = "pm8150_l14";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <62000>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1880000>;
 			qcom,init-voltage = <1800000>;
@@ -683,9 +686,12 @@
 			<RPMH_REGULATOR_MODE_LPM
 			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
+		proxy-supply = <&pm8150a_l11>;
 		L11C: pm8150a_l11: regulator-pm8150a-l11 {
 			regulator-name = "pm8150a_l11";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <857000>;
 			regulator-min-microvolt = <3104000>;
 			regulator-max-microvolt = <3304000>;
 			qcom,init-voltage = <3104000>;
diff --git a/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi b/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
index 9266d94..f6a5e98 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
@@ -5,8 +5,38 @@
 
 #include "dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi"
 #include "dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi"
+#include "dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi"
+#include "dsi-panel-sharp-dsc-4k-cmd.dtsi"
+#include "dsi-panel-sharp-dsc-4k-video.dtsi"
+#include "dsi-panel-sharp-1080p-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi"
+#include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
+#include "dsi-panel-nt35695b-truly-fhd-video.dtsi"
+#include "dsi-panel-sim-cmd.dtsi"
+#include "dsi-panel-sim-video.dtsi"
+#include "dsi-panel-sim-dsc375-cmd.dtsi"
+#include "dsi-panel-sim-dualmipi-cmd.dtsi"
+#include "dsi-panel-sim-dualmipi-video.dtsi"
+#include "dsi-panel-sim-dualmipi-dsc375-cmd.dtsi"
 #include <dt-bindings/clock/mdss-7nm-pll-clk.h>
 
+&tlmm {
+	display_panel_avdd_default: display_panel_avdd_default {
+		mux {
+			pins = "gpio61";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio61";
+			drive-strength = <8>;
+			bias-disable = <0>;
+			output-high;
+		};
+	};
+};
+
 &soc {
 	ext_disp: qcom,msm-ext-disp {
 		compatible = "qcom,msm-ext-disp";
@@ -41,6 +71,43 @@
 		};
 	};
 
+	dsi_panel_pwr_supply_avdd: dsi_panel_pwr_supply_avdd {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+
+		qcom,panel-supply-entry@1 {
+			reg = <1>;
+			qcom,supply-name = "avdd";
+			qcom,supply-min-voltage = <4600000>;
+			qcom,supply-max-voltage = <6000000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+		};
+	};
+
+	display_panel_avdd: display_gpio_regulator@1 {
+		compatible = "regulator-fixed";
+		regulator-name = "display_panel_avdd";
+		regulator-min-microvolt = <5500000>;
+		regulator-max-microvolt = <5500000>;
+		regulator-enable-ramp-delay = <233>;
+		gpio = <&tlmm 61 0>;
+		enable-active-high;
+		regulator-boost-on;
+		pinctrl-names = "default";
+		pinctrl-0 = <&display_panel_avdd_default>;
+	};
+
 	sde_dsi: qcom,dsi-display-primary {
 		compatible = "qcom,dsi-display";
 		label = "primary";
@@ -64,6 +131,7 @@
 
 		vddio-supply = <&pm8150_l14>;
 		vdd-supply = <&pm8150a_l11>;
+		avdd-supply = <&display_panel_avdd>;
 
 		qcom,mdp = <&mdss_mdp>;
 		qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
@@ -89,10 +157,12 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_wb &sde_dsi>;
+	connectors = <&sde_dp &sde_wb &sde_dsi>;
 };
 
+/* PHY TIMINGS REVISION W */
 &dsi_sw43404_amoled_cmd {
+	qcom,ulps-enabled;
 	qcom,mdss-dsi-display-timings {
 		timing@0 {
 			qcom,mdss-dsi-panel-phy-timings = [00 14 05 05 1f 1e 05
@@ -113,3 +183,230 @@
 		};
 	};
 };
+
+&dsi_sw43404_amoled_fhd_plus_cmd {
+	qcom,ulps-enabled;
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 12 04 04 1e 1e 04
+				05 02 03 04 00 11 14];
+			qcom,display-topology = <2 2 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sharp_4k_dsc_cmd {
+	qcom,ulps-enabled;
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08
+				08 05 02 04 00 19 18];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08
+				08 05 02 04 00 19 18];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sharp_1080_cmd {
+	qcom,ulps-enabled;
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1E 08 08 24 22 08
+				08 05 02 04 00 19 18];
+			qcom,display-topology = <1 0 1>;
+			qcom,default-topology-index = <0>;
+			qcom,mdss-dsi-panel-clockrate = <900000000>;
+		};
+	};
+};
+
+&dsi_dual_nt35597_truly_cmd {
+	qcom,ulps-enabled;
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_dual_nt35597_truly_video {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_nt35695b_truly_fhd_cmd {
+	qcom,ulps-enabled;
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22
+				08 08 05 02 04 00 19 17];
+			qcom,display-topology = <1 0 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_nt35695b_truly_fhd_video {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22
+				08 08 05 02 04 00 19 17];
+			qcom,display-topology = <1 0 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sim_cmd {
+	qcom,ulps-enabled;
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <1 1 1>,
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <720 40 720 40 720 40>;
+			qcom,partial-update-enabled = "single_roi";
+		};
+
+		timing@1 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <1 1 1>,
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <540 40 540 40 540 40>;
+			qcom,partial-update-enabled = "single_roi";
+		};
+
+		timing@2 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <1 1 1>,
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <360 40 360 40 360 40>;
+			qcom,partial-update-enabled = "single_roi";
+		};
+	};
+};
+
+&dsi_sim_vid {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <1 0 1>,
+						<2 0 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sim_dsc_375_cmd {
+	qcom,ulps-enabled;
+	qcom,mdss-dsi-display-timings {
+		timing@0 { /* 1080p */
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <1 1 1>;
+			qcom,default-topology-index = <0>;
+		};
+
+		timing@1 { /* qhd */
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08
+				08 05 02 04 00 19 18];
+			qcom,display-topology = <1 1 1>,
+						<2 2 1>, /* dsc merge */
+						<2 1 1>; /* 3d mux */
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_dual_sim_cmd {
+	qcom,ulps-enabled;
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 24 09 09 26 24 09
+				09 06 02 04 00 18 17];
+			qcom,display-topology = <2 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+
+		timing@1 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <2 0 2>,
+						<1 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+
+		timing@2 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08
+				08 05 02 04 00 19 18];
+			qcom,display-topology = <2 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_dual_sim_vid {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <2 0 2>,
+						<1 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_dual_sim_dsc_375_cmd {
+	qcom,ulps-enabled;
+	qcom,mdss-dsi-display-timings {
+		timing@0 { /* qhd */
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+
+		timing@1 { /* 4k */
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08
+				08 05 02 04 00 19 18];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+
+		timing@2 { /* 5k */
+			qcom,mdss-dsi-panel-phy-timings = [00 46 13 14 33 30 12
+				14 0e 02 04 00 37 22];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
index 6ee5fd6..373150d 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
@@ -5,7 +5,7 @@
 
 &soc {
 	mdss_dsi0_pll: qcom,mdss_dsi_pll@ae94900 {
-		compatible = "qcom,mdss_dsi_pll_7nm";
+		compatible = "qcom,mdss_dsi_pll_7nm_v4_1";
 		label = "MDSS DSI 0 PLL";
 		cell-index = <0>;
 		#clock-cells = <1>;
@@ -17,6 +17,8 @@
 		clock-names = "iface_clk";
 		clock-rate = <0>;
 		gdsc-supply = <&mdss_core_gdsc>;
+		qcom,dsi-pll-ssc-en;
+		qcom,dsi-pll-ssc-mode = "down-spread";
 		qcom,platform-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -32,7 +34,7 @@
 	};
 
 	mdss_dsi1_pll: qcom,mdss_dsi_pll@ae96900 {
-		compatible = "qcom,mdss_dsi_pll_7nm";
+		compatible = "qcom,mdss_dsi_pll_7nm_v4_1";
 		label = "MDSS DSI 1 PLL";
 		cell-index = <1>;
 		#clock-cells = <1>;
@@ -44,6 +46,8 @@
 		clock-names = "iface_clk";
 		clock-rate = <0>;
 		gdsc-supply = <&mdss_core_gdsc>;
+		qcom,dsi-pll-ssc-en;
+		qcom,dsi-pll-ssc-mode = "down-spread";
 		qcom,platform-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/kona-sde.dtsi b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
index 4924a2a..e28a2c5 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
@@ -74,7 +74,7 @@
 		qcom,sde-wb-size = <0x2c8>;
 		qcom,sde-wb-xin-id = <6>;
 		qcom,sde-wb-id = <2>;
-		qcom,sde-wb-clk-ctrl = <0x3b8 24>;
+		qcom,sde-wb-clk-ctrl = <0x2bc 16>;
 
 		qcom,sde-intf-off = <0x6b000 0x6b800
 					0x6c000 0x6c800>;
@@ -177,19 +177,21 @@
 
 		qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
 		qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+		qcom,sde-vbif-qos-cwb-remap = <3 3 4 4 5 5 6 3>;
+		qcom,sde-vbif-qos-lutdma-remap = <3 3 3 3 4 4 4 4>;
 
 		/* macrotile & macrotile-qseed has the same configs */
-		qcom,sde-danger-lut = <0x0000000f 0x0000ffff
+		qcom,sde-danger-lut = <0x000000ff 0x0000ffff
 			0x00000000 0x00000000 0x0000ffff>;
 
-		qcom,sde-safe-lut-linear = <0 0xfffc>;
+		qcom,sde-safe-lut-linear = <0 0xfff0>;
 		qcom,sde-safe-lut-macrotile = <0 0xff00>;
 		/* same as safe-lut-macrotile */
 		qcom,sde-safe-lut-macrotile-qseed = <0 0xff00>;
 		qcom,sde-safe-lut-nrt = <0 0xffff>;
 		qcom,sde-safe-lut-cwb = <0 0x3ff>;
 
-		qcom,sde-qos-lut-linear = <0 0x00112222 0x22223357>;
+		qcom,sde-qos-lut-linear = <0 0x00112222 0x22335777>;
 		qcom,sde-qos-lut-macrotile = <0 0x00112233 0x44556677>;
 		qcom,sde-qos-lut-macrotile-qseed = <0 0x00112233 0x66777777>;
 		qcom,sde-qos-lut-nrt = <0 0x00000000 0x00000000>;
@@ -205,8 +207,10 @@
 		qcom,sde-reg-dma-off = <0>;
 		qcom,sde-reg-dma-version = <0x00010002>;
 		qcom,sde-reg-dma-trigger-off = <0x119c>;
+		qcom,sde-reg-dma-xin-id = <7>;
+		qcom,sde-reg-dma-clk-ctrl = <0x2bc 20>;
 
-		qcom,sde-secure-sid-mask = <0x4200801>;
+		qcom,sde-secure-sid-mask = <0x821>;
 
 		qcom,sde-sspp-vig-blocks {
 			qcom,sde-vig-csc-off = <0x1a00>;
@@ -341,7 +345,6 @@
 			<&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
-			<&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
 			<&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>,
 			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK_SRC>,
@@ -350,7 +353,7 @@
 			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK>;
 		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
 			"core_usb_pipe_clk", "link_clk", "link_iface_clk",
-			"crypto_clk", "pixel_clk_rcg", "pixel_parent",
+			"pixel_clk_rcg", "pixel_parent",
 			"pixel1_clk_rcg", "pixel1_parent",
 			"strm0_pixel_clk", "strm1_pixel_clk";
 
@@ -370,6 +373,10 @@
 
 		qcom,mst-enable;
 		qcom,widebus-enable;
+		qcom,dsc-feature-enable;
+		qcom,fec-feature-enable;
+		qcom,max-dp-dsc-blks = <2>;
+		qcom,max-dp-dsc-input-width-pixs = <2048>;
 
 		qcom,ctrl-supply-entries {
 			#address-cells = <1>;
@@ -472,6 +479,7 @@
 		      <0x0aeb8000 0x3000>;
 		reg-names = "mdp_phys",
 			"rot_vbif_phys";
+		status = "disabled";
 
 		#list-cells = <1>;
 
diff --git a/arch/arm64/boot/dts/qcom/kona-thermal-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-thermal-overlay.dtsi
new file mode 100644
index 0000000..8171492
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-thermal-overlay.dtsi
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+&mdss_mdp {
+	#cooling-cells = <2>;
+};
+
+&thermal_zones {
+	soc {
+		cooling-maps {
+			soc_cpu4 {
+				trip = <&soc_trip>;
+				cooling-device = <&cpu4_isolate 1 1>;
+			};
+
+			soc_cpu5 {
+				trip = <&soc_trip>;
+				cooling-device = <&cpu5_isolate 1 1>;
+			};
+
+			soc_cpu6 {
+				trip = <&soc_trip>;
+				cooling-device = <&cpu6_isolate 1 1>;
+			};
+
+			soc_cpu7 {
+				trip = <&soc_trip>;
+				cooling-device = <&cpu7_isolate 1 1>;
+			};
+		};
+	};
+
+	pm8150b-vbat-lvl0 {
+		cooling-maps {
+			vbat_cpu4 {
+				trip = <&vbat_lvl0>;
+				cooling-device = <&cpu4_isolate 1 1>;
+			};
+
+			vbat_cpu5 {
+				trip = <&vbat_lvl0>;
+				cooling-device = <&cpu5_isolate 1 1>;
+			};
+
+			vbat_gpu0 {
+				trip = <&vbat_lvl0>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+		};
+	};
+
+	pm8150b-vbat-lvl1 {
+		cooling-maps {
+			vbat_cpu6 {
+				trip = <&vbat_lvl1>;
+				cooling-device = <&cpu6_isolate 1 1>;
+			};
+
+			vbat_cpu7 {
+				trip = <&vbat_lvl1>;
+				cooling-device = <&cpu7_isolate 1 1>;
+			};
+
+			vbat_gpu1 {
+				trip = <&vbat_lvl1>;
+				cooling-device = <&msm_gpu 4 4>;
+			};
+		};
+	};
+
+	pm8150b-vbat-lvl2 {
+		cooling-maps {
+			vbat_gpu2 {
+				trip = <&vbat_lvl2>;
+				cooling-device = <&msm_gpu THERMAL_MAX_LIMIT
+							THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	pm8150b-ibat-lvl0 {
+		cooling-maps {
+			ibat_cpu4 {
+				trip = <&ibat_lvl0>;
+				cooling-device = <&cpu4_isolate 1 1>;
+			};
+
+			ibat_cpu5 {
+				trip = <&ibat_lvl0>;
+				cooling-device = <&cpu5_isolate 1 1>;
+			};
+
+			ibat_gpu0 {
+				trip = <&ibat_lvl0>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+		};
+	};
+
+	pm8150b-ibat-lvl1 {
+		cooling-maps {
+			ibat_cpu6 {
+				trip = <&ibat_lvl1>;
+				cooling-device = <&cpu6_isolate 1 1>;
+			};
+
+			ibat_cpu7 {
+				trip = <&ibat_lvl1>;
+				cooling-device = <&cpu7_isolate 1 1>;
+			};
+
+			ibat_gpu1 {
+				trip = <&ibat_lvl1>;
+				cooling-device = <&msm_gpu 4 4>;
+			};
+		};
+	};
+
+	pm8150l-vph-lvl0 {
+		disable-thermal-zone;
+		cooling-maps {
+			vph_cpu4 {
+				trip = <&vph_lvl0>;
+				cooling-device = <&cpu4_isolate 1 1>;
+			};
+
+			vph_cpu5 {
+				trip = <&vph_lvl0>;
+				cooling-device = <&cpu5_isolate 1 1>;
+			};
+
+			vph_gpu0 {
+				trip = <&vph_lvl0>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+		};
+	};
+
+	pm8150l-vph-lvl1 {
+		disable-thermal-zone;
+		cooling-maps {
+			vph_cpu6 {
+				trip = <&vph_lvl1>;
+				cooling-device = <&cpu6_isolate 1 1>;
+			};
+
+			vph_cpu7 {
+				trip = <&vph_lvl1>;
+				cooling-device = <&cpu7_isolate 1 1>;
+			};
+
+			vph_gpu1 {
+				trip = <&vph_lvl1>;
+				cooling-device = <&msm_gpu 4 4>;
+			};
+		};
+	};
+
+	pm8150l-vph-lvl2 {
+		disable-thermal-zone;
+		cooling-maps {
+			vph_gpu2 {
+				trip = <&vph_lvl2>;
+				cooling-device = <&msm_gpu THERMAL_MAX_LIMIT
+							THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
index 34786e4..9102f4f 100644
--- a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
@@ -5,6 +5,148 @@
 
 #include <dt-bindings/thermal/thermal.h>
 
+&cpufreq_hw {
+	qcom,cpu-isolation {
+		compatible = "qcom,cpu-isolate";
+		cpu0_isolate: cpu0-isolate {
+			qcom,cpu = <&CPU0>;
+			#cooling-cells = <2>;
+		};
+
+		cpu1_isolate: cpu1-isolate {
+			qcom,cpu = <&CPU1>;
+			#cooling-cells = <2>;
+		};
+
+		cpu2_isolate: cpu2-isolate {
+			qcom,cpu = <&CPU2>;
+			#cooling-cells = <2>;
+		};
+
+		cpu3_isolate: cpu3-isolate {
+			qcom,cpu = <&CPU3>;
+			#cooling-cells = <2>;
+		};
+
+		cpu4_isolate: cpu4-isolate {
+			qcom,cpu = <&CPU4>;
+			#cooling-cells = <2>;
+		};
+
+		cpu5_isolate: cpu5-isolate {
+			qcom,cpu = <&CPU5>;
+			#cooling-cells = <2>;
+		};
+
+		cpu6_isolate: cpu6-isolate {
+			qcom,cpu = <&CPU6>;
+			#cooling-cells = <2>;
+		};
+
+		cpu7_isolate: cpu7-isolate {
+			qcom,cpu = <&CPU7>;
+			#cooling-cells = <2>;
+		};
+	};
+};
+
+&soc {
+	qmi-tmd-devices {
+		compatible = "qcom,qmi-cooling-devices";
+
+		modem {
+			qcom,instance-id = <0x64>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin: modem_skin {
+				qcom,qmi-dev-name = "modem_skin";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin0: modem_skin0 {
+				qcom,qmi-dev-name = "modem_skin0";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin1: modem_skin1 {
+				qcom,qmi-dev-name = "modem_skin1";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin2: modem_skin2 {
+				qcom,qmi-dev-name = "modem_skin2";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin3: modem_skin3 {
+				qcom,qmi-dev-name = "modem_skin3";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw0: modem_mmw0 {
+				qcom,qmi-dev-name = "mmw0";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw1: modem_mmw1 {
+				qcom,qmi-dev-name = "mmw1";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw2: modem_mmw2 {
+				qcom,qmi-dev-name = "mmw2";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw3: modem_mmw3 {
+				qcom,qmi-dev-name = "mmw3";
+				#cooling-cells = <2>;
+			};
+
+			modem_bcl: modem_bcl {
+				qcom,qmi-dev-name = "vbatt_low";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+
+	qmi_sensor: qmi-ts-sensors {
+		compatible = "qcom,qmi-sensors";
+		#thermal-sensor-cells = <1>;
+
+		modem {
+			qcom,instance-id = <100>;
+			qcom,qmi-sensor-names = "pa",
+						"pa_1",
+						"pa_2",
+						"qfe_pa0",
+						"qfe_wtr0",
+						"modem_tsens",
+						"qfe_mmw0",
+						"qfe_mmw1",
+						"qfe_mmw2",
+						"qfe_mmw3",
+						"xo_therm",
+						"qfe_pa_mdm",
+						"qfe_pa_wtr";
+		};
+	};
+};
+
 &thermal_zones {
 	aoss0-usr {
 		polling-delay-passive = <0>;
@@ -17,6 +159,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -31,6 +178,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -45,6 +197,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -59,6 +216,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -73,6 +235,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -87,6 +254,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -101,6 +273,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -115,6 +292,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -129,6 +311,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -143,6 +330,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -157,6 +349,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -171,6 +368,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -185,6 +387,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -199,6 +406,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -213,6 +425,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -227,6 +444,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -241,6 +463,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -255,6 +482,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -269,6 +501,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -283,6 +520,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -297,6 +539,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -311,6 +558,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -325,6 +577,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -339,6 +596,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -353,6 +615,396 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	gpuss-max-step {
+		polling-delay-passive = <10>;
+		polling-delay = <100>;
+		thermal-governor = "step_wise";
+		trips {
+			gpu_trip0: gpu-trip0 {
+				temperature = <95000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			gpu_cdev {
+				trip = <&gpu_trip0>;
+				cooling-device = <&msm_gpu THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+			};
+		};
+	};
+
+	apc-0-max-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		trips {
+			silver-trip {
+				temperature = <120000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc-1-max-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		trips {
+			gold-trip {
+				temperature = <120000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+	};
+
+	pop-mem-step {
+		polling-delay-passive = <10>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens1 3>;
+		thermal-governor = "step_wise";
+		trips {
+			pop_trip: pop-trip {
+				temperature = <95000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			pop_cdev4 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU4 THERMAL_NO_LIMIT
+						THERMAL_NO_LIMIT>;
+			};
+
+			pop_cdev7 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU7 THERMAL_NO_LIMIT
+						THERMAL_NO_LIMIT>;
+			};
+		};
+	};
+
+	cpu-0-0-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&tsens0 1>;
+		trips {
+			cpu00_config: cpu00-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu00_cdev {
+				trip = <&cpu00_config>;
+				cooling-device = <&cpu0_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-0-1-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&tsens0 2>;
+		trips {
+			cpu01_config: cpu01-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu01_cdev {
+				trip = <&cpu01_config>;
+				cooling-device = <&cpu1_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-0-2-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&tsens0 3>;
+		trips {
+			cpu02_config: cpu02-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu02_cdev {
+				trip = <&cpu02_config>;
+				cooling-device = <&cpu2_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-0-3-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 4>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu03_config: cpu03-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu03_cdev {
+				trip = <&cpu03_config>;
+				cooling-device = <&cpu3_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-0-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 7>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu10_config: cpu10-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu10_cdev {
+				trip = <&cpu10_config>;
+				cooling-device = <&cpu4_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-1-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 8>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu11_config: cpu11-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu11_cdev {
+				trip = <&cpu11_config>;
+				cooling-device = <&cpu5_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-2-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 9>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu12_config: cpu12-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu12_cdev {
+				trip = <&cpu12_config>;
+				cooling-device = <&cpu6_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-3-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 10>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu13_config: cpu13-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu13_cdev {
+				trip = <&cpu13_config>;
+				cooling-device = <&cpu7_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-4-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 11>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu14_config: cpu14-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu14_cdev {
+				trip = <&cpu14_config>;
+				cooling-device = <&cpu4_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-5-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 12>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu15_config: cpu15-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu15_cdev {
+				trip = <&cpu15_config>;
+				cooling-device = <&cpu5_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-6-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 13>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu16_config: cpu16-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu16_cdev {
+				trip = <&cpu16_config>;
+				cooling-device = <&cpu6_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-7-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 14>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu17_config: cpu17-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu17_cdev {
+				trip = <&cpu17_config>;
+				cooling-device = <&cpu7_isolate 1 1>;
+			};
+		};
+	};
+
+	modem-pa0-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 100>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	modem-pa1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 101>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	modem-modem-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 105>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	modem-skin-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 110>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-usb.dtsi b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
index df962e9..9b39f65 100644
--- a/arch/arm64/boot/dts/qcom/kona-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
@@ -14,20 +14,22 @@
 		reg-names = "core_base";
 
 		iommus = <&apps_smmu 0x0 0x0>;
-		qcom,iommu-dma = "disabled";
+		qcom,iommu-dma = "atomic";
+		qcom,iommu-dma-addr-pool = <0x90000000 0x60000000>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
 
-		interrupts = <GIC_SPI 494 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 497 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 495 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts-extended = <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+			     <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+			     <&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
+			     <&pdc 15 IRQ_TYPE_EDGE_BOTH>;
 		interrupt-names = "dp_hs_phy_irq", "pwr_event_irq",
 				"ss_phy_irq", "dm_hs_phy_irq";
 		qcom,use-pdc-interrupts;
 
 		USB3_GDSC-supply = <&usb30_prim_gdsc>;
+		dpdm-supply = <&usb2_phy0>;
 		clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>,
 			<&clock_gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
 			<&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
@@ -86,7 +88,7 @@
 			compatible = "snps,dwc3";
 			reg = <0x0a600000 0xcd00>;
 			interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
-			usb-phy = <&usb2_phy0>, <&usb_nop_phy>;
+			usb-phy = <&usb2_phy0>, <&usb_qmp_dp_phy>;
 			linux,sysdev_is_parent;
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
@@ -95,7 +97,7 @@
 			snps,usb3-u1u2-disable;
 			usb-core-id = <0>;
 			tx-fifo-resize;
-			maximum-speed = "high-speed";
+			maximum-speed = "super-speed-plus";
 			dr_mode = "drd";
 		};
 
@@ -333,13 +335,12 @@
 		resets = <&clock_gcc GCC_USB3_DP_PHY_PRIM_BCR>,
 			<&clock_gcc GCC_USB3_PHY_PRIM_BCR>;
 		reset-names = "global_phy_reset", "phy_reset";
-
-		status = "disabled";
 	};
 
 	usb_audio_qmi_dev {
 		compatible = "qcom,usb-audio-qmi-dev";
 		iommus = <&apps_smmu 0x180f 0x0>;
+		qcom,iommu-dma = "disabled";
 		qcom,usb-audio-stream-id = <0xf>;
 		qcom,usb-audio-intr-num = <2>;
 	};
@@ -347,4 +348,242 @@
 	usb_nop_phy: usb_nop_phy {
 		compatible = "usb-nop-xceiv";
 	};
+
+	/* Secondary USB port related controller */
+	usb1: ssusb@a800000 {
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0xa800000 0x100000>;
+		reg-names = "core_base";
+
+		iommus = <&apps_smmu 0x20 0x0>;
+		qcom,iommu-dma = "atomic";
+		qcom,iommu-dma-addr-pool = <0x90000000 0x60000000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		interrupts-extended = <&pdc 12 IRQ_TYPE_LEVEL_HIGH>,
+			     <&intc GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+			     <&pdc 16 IRQ_TYPE_LEVEL_HIGH>,
+			     <&pdc 13 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "dp_hs_phy_irq", "pwr_event_irq",
+				"ss_phy_irq", "dm_hs_phy_irq";
+		qcom,use-pdc-interrupts;
+
+		USB3_GDSC-supply = <&usb30_sec_gdsc>;
+		clocks = <&clock_gcc GCC_USB30_SEC_MASTER_CLK>,
+		       <&clock_gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
+		       <&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
+		       <&clock_gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
+		       <&clock_gcc GCC_USB30_SEC_SLEEP_CLK>,
+		       <&clock_gcc GCC_USB3_SEC_CLKREF_EN>;
+
+		clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+				"utmi_clk", "sleep_clk", "xo";
+
+		resets = <&clock_gcc GCC_USB30_SEC_BCR>;
+		reset-names = "core_reset";
+
+		qcom,core-clk-rate = <200000000>;
+		qcom,core-clk-rate-hs = <66666667>;
+		qcom,num-gsi-evt-buffs = <0x3>;
+		qcom,gsi-reg-offset =
+			<0x0fc /* GSI_GENERAL_CFG */
+			 0x110 /* GSI_DBL_ADDR_L */
+			 0x120 /* GSI_DBL_ADDR_H */
+			 0x130 /* GSI_RING_BASE_ADDR_L */
+			 0x144 /* GSI_RING_BASE_ADDR_H */
+			 0x1a4>; /* GSI_IF_STS */
+		qcom,dwc-usb3-msm-tx-fifo-size = <27696>;
+		qcom,charging-disabled;
+
+		qcom,msm-bus,name = "usb1";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <3>;
+		qcom,msm-bus,vectors-KBps =
+			/*  suspend vote */
+			<MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_EBI_CH0 0 0>,
+			<MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_IPA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 0>,
+
+			/*  nominal vote */
+			<MSM_BUS_MASTER_USB3_1
+				MSM_BUS_SLAVE_EBI_CH0 1000000 2500000>,
+			<MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_IPA_CFG 0 2400>,
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 40000>,
+
+			/*  svs vote */
+			<MSM_BUS_MASTER_USB3_1
+				MSM_BUS_SLAVE_EBI_CH0 240000 700000>,
+			<MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_IPA_CFG 0 2400>,
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 40000>;
+
+		dwc3@a800000 {
+			compatible = "snps,dwc3";
+			reg = <0xa800000 0xcd00>;
+			interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+			usb-phy = <&usb2_phy1>, <&usb_qmp_phy>;
+			linux,sysdev_is_parent;
+			snps,disable-clk-gating;
+			snps,has-lpm-erratum;
+			snps,hird-threshold = /bits/ 8 <0x10>;
+			snps,usb3_lpm_capable;
+			usb-core-id = <1>;
+			tx-fifo-resize;
+			maximum-speed = "super-speed";
+			dr_mode = "drd";
+		};
+	};
+
+	/* Primary USB port related High Speed PHY */
+	usb2_phy1: hsphy@88e4000 {
+		compatible = "qcom,usb-hsphy-snps-femto";
+		reg = <0x88e4000 0x110>;
+		reg-names = "hsusb_phy_base";
+
+		vdd-supply = <&pm8150_l5>;
+		vdda18-supply = <&pm8150_l12>;
+		vdda33-supply = <&pm8150_l2>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+
+		clocks = <&clock_rpmh RPMH_CXO_CLK>;
+		clock-names = "ref_clk_src";
+
+		resets = <&clock_gcc GCC_QUSB2PHY_SEC_BCR>;
+		reset-names = "phy_reset";
+	};
+
+	/* Secondary USB port related QMP PHY */
+	usb_qmp_phy: ssphy@88eb000 {
+		compatible = "qcom,usb-ssphy-qmp-v2";
+		reg = <0x88eb000 0x1000>,
+		    <0x088eb88c 0x4>;
+		reg-names = "qmp_phy_base",
+			"pcs_clamp_enable_reg";
+
+		vdd-supply = <&pm8150_l18>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vdd-max-load-uA = <47000>;
+		core-supply = <&pm8150_l9>;
+		qcom,vbus-valid-override;
+		qcom,qmp-phy-init-seq =
+		    /* <reg_offset, value, delay> */
+		    <USB3_UNI_QSERDES_COM_SYSCLK_EN_SEL 0x1a 0
+		     USB3_UNI_QSERDES_COM_BIN_VCOCAL_HSCLK_SEL 0x11 0
+		     USB3_UNI_QSERDES_COM_HSCLK_SEL 0x01 0
+		     USB3_UNI_QSERDES_COM_DEC_START_MODE0 0x82 0
+		     USB3_UNI_QSERDES_COM_DIV_FRAC_START1_MODE0 0xab 0
+		     USB3_UNI_QSERDES_COM_DIV_FRAC_START2_MODE0 0xea 0
+		     USB3_UNI_QSERDES_COM_DIV_FRAC_START3_MODE0 0x02 0
+		     USB3_UNI_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0xca 0
+		     USB3_UNI_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1e 0
+		     USB3_UNI_QSERDES_COM_CP_CTRL_MODE0 0x06 0
+		     USB3_UNI_QSERDES_COM_PLL_RCTRL_MODE0 0x16 0
+		     USB3_UNI_QSERDES_COM_PLL_CCTRL_MODE0 0x36 0
+		     USB3_UNI_QSERDES_COM_VCO_TUNE1_MODE0 0x24 0
+		     USB3_UNI_QSERDES_COM_LOCK_CMP2_MODE0 0x34 0
+		     USB3_UNI_QSERDES_COM_LOCK_CMP1_MODE0 0x14 0
+		     USB3_UNI_QSERDES_COM_LOCK_CMP_EN 0x04 0
+		     USB3_UNI_QSERDES_COM_SYSCLK_BUF_ENABLE 0x0a 0
+		     USB3_UNI_QSERDES_COM_VCO_TUNE2_MODE1 0x02 0
+		     USB3_UNI_QSERDES_COM_VCO_TUNE1_MODE1 0x24 0
+		     USB3_UNI_QSERDES_COM_CORECLK_DIV_MODE1 0x08 0
+		     USB3_UNI_QSERDES_COM_DEC_START_MODE1 0x82 0
+		     USB3_UNI_QSERDES_COM_DIV_FRAC_START1_MODE1 0xab 0
+		     USB3_UNI_QSERDES_COM_DIV_FRAC_START2_MODE1 0xea 0
+		     USB3_UNI_QSERDES_COM_DIV_FRAC_START3_MODE1 0x02 0
+		     USB3_UNI_QSERDES_COM_LOCK_CMP2_MODE1 0x82 0
+		     USB3_UNI_QSERDES_COM_LOCK_CMP1_MODE1 0x34 0
+		     USB3_UNI_QSERDES_COM_CP_CTRL_MODE1 0x06 0
+		     USB3_UNI_QSERDES_COM_PLL_RCTRL_MODE1 0x16 0
+		     USB3_UNI_QSERDES_COM_PLL_CCTRL_MODE1 0x36 0
+		     USB3_UNI_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0xca 0
+		     USB3_UNI_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1e 0
+		     USB3_UNI_QSERDES_COM_CMN_IPTRIM 0x20 0
+		     USB3_UNI_QSERDES_COM_SSC_EN_CENTER 0x01 0
+		     USB3_UNI_QSERDES_COM_SSC_PER1 0x31 0
+		     USB3_UNI_QSERDES_COM_SSC_PER2 0x01 0
+		     USB3_UNI_QSERDES_COM_SSC_STEP_SIZE1_MODE1 0xde 0
+		     USB3_UNI_QSERDES_COM_SSC_STEP_SIZE2_MODE1 0x07 0
+		     USB3_UNI_QSERDES_COM_SSC_STEP_SIZE1_MODE0 0xde 0
+		     USB3_UNI_QSERDES_COM_SSC_STEP_SIZE2_MODE0 0x07 0
+		     USB3_UNI_QSERDES_COM_VCO_TUNE_MAP 0x02 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH4 0xb8 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH3 0xff 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH2 0xb7 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH 0x7f 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_00_LOW 0x7f 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_01_HIGH4 0xb4 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_01_HIGH3 0x7b 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_01_HIGH2 0x5c 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_01_HIGH 0xdc 0
+		     USB3_UNI_QSERDES_RX_RX_MODE_01_LOW 0xdc 0
+		     USB3_UNI_QSERDES_RX_UCDR_PI_CONTROLS 0x99 0
+		     USB3_UNI_QSERDES_RX_UCDR_SB2_THRESH1 0x04 0
+		     USB3_UNI_QSERDES_RX_UCDR_SB2_THRESH2 0x08 0
+		     USB3_UNI_QSERDES_RX_UCDR_SB2_GAIN1 0x05 0
+		     USB3_UNI_QSERDES_RX_UCDR_SB2_GAIN2 0x05 0
+		     USB3_UNI_QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x2f 0
+		     USB3_UNI_QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW 0xff 0
+		     USB3_UNI_QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH 0x0f 0
+		     USB3_UNI_QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x7f 0
+		     USB3_UNI_QSERDES_RX_UCDR_FO_GAIN 0x0a 0
+		     USB3_UNI_QSERDES_RX_VGA_CAL_CNTRL1 0x54 0
+		     USB3_UNI_QSERDES_RX_VGA_CAL_CNTRL2 0x0c 0
+		     USB3_UNI_QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x0f 0
+		     USB3_UNI_QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 0x4a 0
+		     USB3_UNI_QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 0x0a 0
+		     USB3_UNI_QSERDES_RX_DFE_EN_TIMER 0x04 0
+		     USB3_UNI_QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x47 0
+		     USB3_UNI_QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x80 0
+		     USB3_UNI_QSERDES_RX_SIGDET_CNTRL 0x04 0
+		     USB3_UNI_QSERDES_RX_SIGDET_DEGLITCH_CNTRL 0x0e 0
+		     USB3_UNI_QSERDES_RX_RX_IDAC_TSETTLE_HIGH 0x00 0
+		     USB3_UNI_QSERDES_RX_RX_IDAC_TSETTLE_LOW 0xc0 0
+		     USB3_UNI_QSERDES_RX_DFE_CTLE_POST_CAL_OFFSET 0x38 0
+		     USB3_UNI_QSERDES_RX_UCDR_SO_GAIN 0x06 0
+		     USB3_UNI_QSERDES_RX_DCC_CTRL1 0x0c 0
+		     USB3_UNI_QSERDES_RX_GM_CAL 0x1f 0
+		     USB3_UNI_QSERDES_TX_RCV_DETECT_LVL_2 0x12 0
+		     USB3_UNI_QSERDES_TX_LANE_MODE_1 0xd5 0
+		     USB3_UNI_QSERDES_TX_PI_QEC_CTRL 0x54 0
+		     USB3_UNI_QSERDES_TX_RES_CODE_LANE_OFFSET_TX 0x08 0
+		     USB3_UNI_PCS_LOCK_DETECT_CONFIG1 0xd0 0
+		     USB3_UNI_PCS_LOCK_DETECT_CONFIG2 0x07 0
+		     USB3_UNI_PCS_LOCK_DETECT_CONFIG3 0x20 0
+		     USB3_UNI_PCS_LOCK_DETECT_CONFIG6 0x13 0
+		     USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_L 0xe7 0
+		     USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_H 0x03 0
+		     USB3_UNI_PCS_RX_SIGDET_LVL 0xaa 0
+		     USB3_UNI_PCS_PCS_TX_RX_CONFIG 0x0c 0
+		     USB3_UNI_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x07 0
+		     USB3_UNI_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0xf8 0
+		     USB3_UNI_PCS_CDR_RESET_TIME 0x0a 0
+		     USB3_UNI_PCS_ALIGN_DETECT_CONFIG1 0x88 0
+		     USB3_UNI_PCS_ALIGN_DETECT_CONFIG2 0x13 0
+		     USB3_UNI_PCS_EQ_CONFIG1 0x4b 0
+		     USB3_UNI_PCS_EQ_CONFIG5 0x10 0
+		     USB3_UNI_PCS_REFGEN_REQ_CONFIG1 0x21 0
+		     0xffffffff 0xffffffff 0x00>;
+
+		qcom,qmp-phy-reg-offset =
+				<USB3_UNI_PCS_PCS_STATUS1
+				 USB3_UNI_PCS_USB3_AUTONOMOUS_MODE_CTRL
+				 USB3_UNI_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR
+				 USB3_UNI_PCS_POWER_DOWN_CONTROL
+				 USB3_UNI_PCS_SW_RESET
+				 USB3_UNI_PCS_START_CONTROL>;
+
+		clocks = <&clock_gcc GCC_USB3_SEC_PHY_AUX_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_PHY_PIPE_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_CLKREF_EN>,
+			 <&clock_gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
+		clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+				"ref_clk", "com_aux_clk";
+
+		resets = <&clock_gcc GCC_USB3_PHY_SEC_BCR>,
+			<&clock_gcc GCC_USB3PHY_PHY_SEC_BCR>;
+		reset-names = "phy_reset", "phy_phy_reset";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-v2-cdp.dts b/arch/arm64/boot/dts/qcom/kona-v2-cdp.dts
new file mode 100644
index 0000000..5716b11
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2-cdp.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+#include "kona-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. kona CDP";
+	compatible = "qcom,kona-cdp", "qcom,kona", "qcom,cdp";
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2-mtp.dts b/arch/arm64/boot/dts/qcom/kona-v2-mtp.dts
new file mode 100644
index 0000000..eaf41c5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2-mtp.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+#include "kona-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. kona MTP";
+	compatible = "qcom,kona-mtp", "qcom,kona", "qcom,mtp";
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2-qrd.dts b/arch/arm64/boot/dts/qcom/kona-v2-qrd.dts
new file mode 100644
index 0000000..cbd380f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2-qrd.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+#include "kona-qrd.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. kona QRD";
+	compatible = "qcom,kona-qrd", "qcom,kona", "qcom,qrd";
+	qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2-rumi.dts b/arch/arm64/boot/dts/qcom/kona-v2-rumi.dts
new file mode 100644
index 0000000..32efd28
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2-rumi.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+#include "kona-rumi.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. kona RUMI";
+	compatible = "qcom,kona-rumi", "qcom,kona", "qcom,rumi";
+	qcom,board-id = <15 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2.dts b/arch/arm64/boot/dts/qcom/kona-v2.dts
new file mode 100644
index 0000000..fa0a032
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. kona v2 SoC";
+	compatible = "qcom,kona";
+	qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2.dtsi b/arch/arm64/boot/dts/qcom/kona-v2.dtsi
new file mode 100644
index 0000000..e39bc9c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2.dtsi
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "kona.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. kona v2";
+	compatible = "qcom,kona";
+	qcom,msm-id = <356 0x20000>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
index 3ddc27e..11c09eb 100644
--- a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
@@ -8,23 +8,14 @@
 		compatible = "qcom,va-macro";
 		reg = <0x3370000 0x0>;
 		clock-names = "va_core_clk";
-		clocks = <&clock_audio_va 0>;
+		clocks = <&clock_audio_va_1 0>;
 		va-vdd-micb-supply = <&S4A>;
 		qcom,va-vdd-micb-voltage = <1800000 1800000>;
 		qcom,va-vdd-micb-current = <11200>;
 		qcom,va-dmic-sample-rate = <4800000>;
 		qcom,va-clk-mux-select = <1>;
 		qcom,va-island-mode-muxsel = <0x033A0000>;
-	};
-};
-
-&soc {
-	clock_audio_va: va_core_clk  {
-		compatible = "qcom,audio-ref-clk";
-		qcom,codec-ext-clk-src = <AUDIO_LPASS_MCLK>;
-		qcom,codec-lpass-ext-clk-freq = <19200000>;
-		qcom,codec-lpass-clk-id = <0x30B>;
-		#clock-cells = <1>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
index b10c5f7..674f53a 100644
--- a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
@@ -26,16 +26,15 @@
 		vcodec-supply = <&mvs0_gdsc>;
 
 		/* Clocks */
-		clock-names = "gcc_video_axi0", "ahb_clk",
+		clock-names = "gcc_video_axi0",
 			"core_clk", "vcodec_clk";
 		clocks = <&clock_gcc GCC_VIDEO_AXI0_CLK>,
-			<&clock_videocc VIDEO_CC_AHB_CLK>,
 			<&clock_videocc VIDEO_CC_MVS0C_CLK>,
 			<&clock_videocc VIDEO_CC_MVS0_CLK>;
-		qcom,proxy-clock-names = "gcc_video_axi0", "ahb_clk",
+		qcom,proxy-clock-names = "gcc_video_axi0",
 					"core_clk", "vcodec_clk";
 		/* Mask: Bit0: Clock Scaling, Bit1: Mem Retention*/
-		qcom,clock-configs = <0x0 0x0 0x1 0x1>;
+		qcom,clock-configs = <0x0 0x1 0x1>;
 		qcom,allowed-clock-rates = <239999999 338000000
 						366000000 444000000>;
 		resets = <&clock_gcc GCC_VIDEO_AXI0_CLK_ARES>,
@@ -50,19 +49,17 @@
 			label = "cnoc";
 			qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
-			qcom,bus-governor = "performance";
-			qcom,bus-range-kbps = <1000 1000>;
-			operating-points-v2 = <&venus_bus_cnoc_bw_table>;
+			qcom,mode = "performance";
+			qcom,bus-range-kbps = <762 762>;
 		};
 
 		venus_bus_ddr {
 			compatible = "qcom,msm-vidc,bus";
 			label = "venus-ddr";
-			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-master = <MSM_BUS_MASTER_LLCC>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
-			qcom,bus-governor = "msm-vidc-ddr";
-			qcom,bus-range-kbps = <1000 6533000>;
-			operating-points-v2 = <&ddr_bw_opp_table>;
+			qcom,mode = "venus-ddr";
+			qcom,bus-range-kbps = <762 6533000>;
 		};
 
 		venus_bus_llcc {
@@ -70,9 +67,8 @@
 			label = "venus-llcc";
 			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
-			qcom,bus-governor = "msm-vidc-llcc";
-			qcom,bus-range-kbps = <1000 6533000>;
-			operating-points-v2 = <&llcc_bw_opp_table>;
+			qcom,mode = "venuc-llcc";
+			qcom,bus-range-kbps = <2288 6533000>;
 		};
 
 		/* MMUs */
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 2c625d4..952bd10 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -31,6 +31,14 @@
 	qcom,msm-id = <356 0x10000>;
 	interrupt-parent = <&intc>;
 
+	mem-offline {
+		compatible = "qcom,mem-offline";
+		offline-sizes = <0x1 0x40000000 0x0 0x40000000>,
+				<0x1 0xc0000000 0x0 0x80000000>;
+		granule = <512>;
+		mboxes = <&qmp_aop 0>;
+	};
+
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
 		sdhc2 = &sdhc_2; /* SDC2 SD card slot */
@@ -41,6 +49,7 @@
 		swr0 = &swr0;
 		swr1 = &swr1;
 		swr2 = &swr2;
+		mhi-netdev0 = &mhi_netdev_0;
 	};
 
 	cpus {
@@ -58,6 +67,7 @@
 			qcom,freq-domain = <&cpufreq_hw 0 4>;
 			capacity-dmips-mhz = <1024>;
 			dynamic-power-coefficient = <100>;
+			#cooling-cells = <2>;
 			L2_0: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -196,6 +206,7 @@
 			qcom,freq-domain = <&cpufreq_hw 1 4>;
 			capacity-dmips-mhz = <1894>;
 			dynamic-power-coefficient = <514>;
+			#cooling-cells = <2>;
 			L2_4: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
@@ -322,6 +333,7 @@
 			qcom,freq-domain = <&cpufreq_hw 2 4>;
 			capacity-dmips-mhz = <1894>;
 			dynamic-power-coefficient = <598>;
+			#cooling-cells = <2>;
 			L2_7: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x80000>;
@@ -464,11 +476,11 @@
 
 		xbl_aop_mem: xbl_aop_region@80700000 {
 			no-map;
-			reg = <0x0 0x80700000 0x0 0x120000>;
+			reg = <0x0 0x80700000 0x0 0x160000>;
 		};
 
-		cmd_db: reserved-memory@80820000 {
-			reg = <0x0 0x80820000 0x0 0x20000>;
+		cmd_db: reserved-memory@80860000 {
+			reg = <0x0 0x80860000 0x0 0x20000>;
 			compatible = "qcom,cmd-db";
 			no-map;
 		};
@@ -478,86 +490,96 @@
 			reg = <0x0 0x80900000 0x0 0x200000>;
 		};
 
-		removed_mem: removed_region@80b00000 {
+		lpass_pcie_mem: lpass_pcie_region@80b00000 {
 			no-map;
-			reg = <0x0 0x80b00000 0x0 0x1300000>;
+			reg = <0x0 0x80b00000 0x0 0x100000>;
 		};
 
-		qtee_apps_mem: qtee_apps_region@81e00000 {
+		ssc_pcie_mem: ssc_pcie_region@80c00000 {
 			no-map;
-			reg = <0x0 0x81e00000 0x0 0x2600000>;
+			reg = <0x0 0x80c00000 0x0 0x100000>;
 		};
 
-		pil_camera_mem: pil_camera_region@86000000 {
+		removed_mem: removed_region@80d00000 {
+			no-map;
+			reg = <0x0 0x80d00000 0x0 0x1300000>;
+		};
+
+		qtee_apps_mem: qtee_apps_region@82000000 {
+			no-map;
+			reg = <0x0 0x82000000 0x0 0x2600000>;
+		};
+
+		pil_camera_mem: pil_camera_region@86200000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86000000 0x0 0x500000>;
+			reg = <0x0 0x86200000 0x0 0x500000>;
 		};
 
-		pil_wlan_fw_mem: pil_wlan_fw_region@86500000 {
+		pil_wlan_fw_mem: pil_wlan_fw_region@86700000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86500000 0x0 0x100000>;
+			reg = <0x0 0x86700000 0x0 0x100000>;
 		};
 
-		pil_ipa_fw_mem: pil_ipa_fw_region@86600000 {
+		pil_ipa_fw_mem: pil_ipa_fw_region@86800000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86600000 0x0 0x10000>;
+			reg = <0x0 0x86800000 0x0 0x10000>;
 		};
 
-		pil_ipa_gsi_mem: pil_ipa_gsi_region@86610000 {
+		pil_ipa_gsi_mem: pil_ipa_gsi_region@86810000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86610000 0x0 0xa000>;
+			reg = <0x0 0x86810000 0x0 0xa000>;
 		};
 
-		pil_gpu_mem: pil_gpu_region@8661a000 {
+		pil_gpu_mem: pil_gpu_region@8681a000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x8661a000 0x0 0x2000>;
+			reg = <0x0 0x8681a000 0x0 0x2000>;
 		};
 
-		pil_npu_mem: pil_npu_region@86700000 {
+		pil_npu_mem: pil_npu_region@86900000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86700000 0x0 0x500000>;
+			reg = <0x0 0x86900000 0x0 0x500000>;
 		};
 
-		pil_video_mem: pil_video_region@86c00000 {
+		pil_video_mem: pil_video_region@86e00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86c00000 0x0 0x500000>;
+			reg = <0x0 0x86e00000 0x0 0x500000>;
 		};
 
-		pil_cvp_mem: pil_cvp_region@87100000 {
+		pil_cvp_mem: pil_cvp_region@87300000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x87100000 0x0 0x500000>;
+			reg = <0x0 0x87300000 0x0 0x500000>;
 		};
 
-		pil_cdsp_mem: pil_cdsp_region@87600000 {
+		pil_cdsp_mem: pil_cdsp_region@87800000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x87600000 0x0 0x800000>;
+			reg = <0x0 0x87800000 0x0 0x800000>;
 		};
 
-		pil_slpi_mem: pil_slpi_region@87e00000 {
+		pil_slpi_mem: pil_slpi_region@88000000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x87e00000 0x0 0x1500000>;
+			reg = <0x0 0x88000000 0x0 0x1500000>;
 		};
 
-		pil_adsp_mem: pil_adsp_region@89300000 {
+		pil_adsp_mem: pil_adsp_region@89500000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x89300000 0x0 0x1a00000>;
+			reg = <0x0 0x89500000 0x0 0x1c00000>;
 		};
 
-		pil_spss_mem: pil_spss_region@8ad00000 {
+		pil_spss_mem: pil_spss_region@8b100000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x8ad00000 0x0 0x100000>;
+			reg = <0x0 0x8b100000 0x0 0x100000>;
 		};
 
 		adsp_mem: adsp_region {
@@ -584,6 +606,16 @@
 			size = <0x0 0x400000>;
 		};
 
+		cont_splash_memory: cont_splash_region@9c000000 {
+			reg = <0x0 0x9c000000 0x0 0x02400000>;
+			label = "cont_splash_region";
+		};
+
+		disp_rdump_memory: disp_rdump_region@9c000000 {
+			reg = <0x0 0x9c000000 0x0 0x00800000>;
+			label = "disp_rdump_region";
+		};
+
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
@@ -595,7 +627,7 @@
 			alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
 			reusable;
 			alignment = <0x0 0x400000>;
-			size = <0x0 0x800000>;
+			size = <0x0 0xc00000>;
 		};
 
 		qseecom_mem: qseecom_region {
@@ -614,6 +646,14 @@
 			size = <0x0 0x1000000>;
 		};
 
+		secure_display_memory: secure_display_region { /* Secure UI */
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
+			reusable;
+			alignment = <0x0 0x400000>;
+			size = <0x0 0xA000000>;
+		};
+
 		/* global autoconfigured region for contiguous allocations */
 		linux,cma {
 			compatible = "shared-dma-pool";
@@ -707,7 +747,7 @@
 	};
 
 	cache-controller@9200000 {
-		compatible = "qcom,kona-llcc";
+		compatible = "qcom,llcc-v2";
 		reg = <0x9200000 0x1d0000> , <0x9600000 0x50000>;
 		reg-names = "llcc_base", "llcc_broadcast_base";
 		cap-based-alloc-and-pwr-collapse;
@@ -905,6 +945,18 @@
 		};
 	};
 
+	bus_proxy_client: qcom,bus_proxy_client {
+		compatible = "qcom,bus-proxy-client";
+		qcom,msm-bus,name = "bus-proxy-client";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+			<22 512 0 0>, <23 512 0 0>,
+			<22 512 1500000 1500000>, <23 512 1500000 1500000>;
+		qcom,msm-bus,active-only;
+		status = "ok";
+	};
+
 	keepalive_opp_table: keepalive-opp-table {
 		compatible = "operating-points-v2";
 		opp-1 {
@@ -922,11 +974,6 @@
 		operating-points-v2 = <&keepalive_opp_table>;
 	};
 
-	venus_bus_cnoc_bw_table: bus-cnoc-bw-table {
-		compatible = "operating-points-v2";
-		BW_OPP_ENTRY( 200, 4);
-	};
-
 	llcc_bw_opp_table: llcc-bw-opp-table {
 		compatible = "operating-points-v2";
 		BW_OPP_ENTRY(  150, 16); /*  2288 MB/s */
@@ -938,6 +985,18 @@
 		BW_OPP_ENTRY( 1000, 16); /* 15258 MB/s */
 	};
 
+	suspendable_llcc_bw_opp_table: suspendable-llcc-bw-opp-table {
+		compatible = "operating-points-v2";
+		BW_OPP_ENTRY(    0, 16); /*     0 MB/s */
+		BW_OPP_ENTRY(  150, 16); /*  2288 MB/s */
+		BW_OPP_ENTRY(  300, 16); /*  4577 MB/s */
+		BW_OPP_ENTRY(  466, 16); /*  7110 MB/s */
+		BW_OPP_ENTRY(  600, 16); /*  9155 MB/s */
+		BW_OPP_ENTRY(  806, 16); /* 12298 MB/s */
+		BW_OPP_ENTRY(  933, 16); /* 14236 MB/s */
+		BW_OPP_ENTRY( 1000, 16); /* 15258 MB/s */
+	};
+
 	ddr_bw_opp_table: ddr-bw-opp-table {
 		compatible = "operating-points-v2";
 		BW_OPP_ENTRY(  200, 4); /*   762 MB/s */
@@ -1025,23 +1084,30 @@
 
 	npu_npu_ddr_bwmon: qcom,npu-npu-ddr-bwmon@60300 {
 		compatible = "qcom,bimc-bwmon4";
-		reg = <0x00060300 0x300>, <0x00060400 0x200>;
+		reg = <0x00060400 0x300>, <0x00060300 0x200>;
 		reg-names = "base", "global_base";
-		interrupts = <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,mport = <0>;
 		qcom,hw-timer-hz = <19200000>;
 		qcom,target-dev = <&npu_npu_ddr_bw>;
 		qcom,count-unit = <0x10000>;
 	};
 
-	npu_npu_ddr_bwmon_dsp: qcom,npu-npu-ddr-bwmoni_dsp@70200 {
+	npudsp_npu_ddr_bw: qcom,npudsp-npu-ddr-bw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports = <MSM_BUS_MASTER_NPU MSM_BUS_SLAVE_EBI_CH0>;
+		operating-points-v2 = <&suspendable_ddr_bw_opp_table>;
+	};
+
+	npudsp_npu_ddr_bwmon: qcom,npudsp-npu-ddr-bwmon@70200 {
 		compatible = "qcom,bimc-bwmon4";
-		reg = <0x00070200 0x300>, <0x00070300 0x200>;
+		reg = <0x00070300 0x300>, <0x00070200 0x200>;
 		reg-names = "base", "global_base";
 		interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,mport = <0>;
 		qcom,hw-timer-hz = <19200000>;
-		qcom,target-dev = <&npu_npu_ddr_bw>;
+		qcom,target-dev = <&npudsp_npu_ddr_bw>;
 		qcom,count-unit = <0x10000>;
 	};
 
@@ -1153,7 +1219,7 @@
 		compatible = "qcom,arm-memlat-mon";
 		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
 		qcom,target-dev = <&cpu0_llcc_ddr_lat>;
-		qcom,cachemiss-ev = <0x1000>;
+		qcom,cachemiss-ev = <0x2A>;
 		qcom,core-dev-table =
 			<  300000 MHZ_TO_MBPS(  200, 4) >,
 			<  729600 MHZ_TO_MBPS(  451, 4) >,
@@ -1175,7 +1241,7 @@
 		compatible = "qcom,arm-memlat-mon";
 		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
 		qcom,target-dev = <&cpu4_llcc_ddr_lat>;
-		qcom,cachemiss-ev = <0x1000>;
+		qcom,cachemiss-ev = <0x2A>;
 		qcom,core-dev-table =
 			<  300000 MHZ_TO_MBPS( 200, 4) >,
 			<  691200 MHZ_TO_MBPS( 451, 4) >,
@@ -1309,6 +1375,21 @@
 		qcom,qsee-reentrancy-support = <2>;
 	};
 
+	qcom_rng: qrng@793000 {
+		compatible = "qcom,msm-rng";
+		reg = <0x793000 0x1000>;
+		qcom,msm-rng-iface-clk;
+		qcom,no-qrng-config;
+		qcom,msm-bus,name = "msm-rng-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<1 618 0 0>,    /* No vote */
+			<1 618 0 300000>;  /* 75 MHz */
+		clocks = <&clock_gcc GCC_PRNG_AHB_CLK>;
+		clock-names = "iface_clk";
+	};
+
 	mdm0: qcom,mdm0 {
 		compatible = "qcom,ext-sdx55m";
 		cell-index = <0>;
@@ -1470,42 +1551,49 @@
 		compatible = "qcom,gdsc";
 		reg = <0x16b004 0x4>;
 		regulator-name = "pcie_0_gdsc";
+		qcom,retain-regs;
 	};
 
 	pcie_1_gdsc: qcom,gdsc@18d004 {
 		compatible = "qcom,gdsc";
 		reg = <0x18d004 0x4>;
 		regulator-name = "pcie_1_gdsc";
+		qcom,retain-regs;
 	};
 
 	pcie_2_gdsc: qcom,gdsc@106004 {
 		compatible = "qcom,gdsc";
 		reg = <0x106004 0x4>;
 		regulator-name = "pcie_2_gdsc";
+		qcom,retain-regs;
 	};
 
 	ufs_card_gdsc: qcom,gdsc@175004 {
 		compatible = "qcom,gdsc";
 		reg = <0x175004 0x4>;
 		regulator-name = "ufs_card_gdsc";
+		qcom,retain-regs;
 	};
 
 	ufs_phy_gdsc: qcom,gdsc@177004 {
 		compatible = "qcom,gdsc";
 		reg = <0x177004 0x4>;
 		regulator-name = "ufs_phy_gdsc";
+		qcom,retain-regs;
 	};
 
 	usb30_prim_gdsc: qcom,gdsc@10f004 {
 		compatible = "qcom,gdsc";
 		reg = <0x10f004 0x4>;
 		regulator-name = "usb30_prim_gdsc";
+		qcom,retain-regs;
 	};
 
 	usb30_sec_gdsc: qcom,gdsc@110004 {
 		compatible = "qcom,gdsc";
 		reg = <0x110004 0x4>;
 		regulator-name = "usb30_sec_gdsc";
+		qcom,retain-regs;
 	};
 
 	hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc: qcom,gdsc@17d050 {
@@ -1550,6 +1638,7 @@
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
 		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	ife_0_gdsc: qcom,gdsc@ad0a004 {
@@ -1560,6 +1649,7 @@
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	ife_1_gdsc: qcom,gdsc@ad0b004 {
@@ -1570,6 +1660,7 @@
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	ipe_0_gdsc: qcom,gdsc@ad08004 {
@@ -1581,6 +1672,7 @@
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
 		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	sbi_gdsc: qcom,gdsc@ad09004 {
@@ -1591,6 +1683,7 @@
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	titan_top_gdsc: qcom,gdsc@ad0c144 {
@@ -1601,6 +1694,7 @@
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	/* DISP_CC GDSC */
@@ -1613,6 +1707,7 @@
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
 		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	/* GPU_CC GDSCs */
@@ -1630,6 +1725,7 @@
 		qcom,no-status-check-on-disable;
 		qcom,clk-dis-wait-val = <8>;
 		qcom,gds-timeout = <500>;
+		qcom,retain-regs;
 	};
 
 	gpu_gx_domain_addr: syscon@3d91508 {
@@ -1651,6 +1747,7 @@
 		parent-supply = <&VDD_GFX_LEVEL>;
 		vdd_parent-supply = <&VDD_GFX_LEVEL>;
 		qcom,reset-aon-logic;
+		qcom,retain-regs;
 	};
 
 	/* NPU GDSC */
@@ -1660,6 +1757,7 @@
 		regulator-name = "npu_core_gdsc";
 		clock-names = "ahb_clk";
 		clocks = <&clock_gcc GCC_NPU_CFG_AHB_CLK>;
+		qcom,retain-regs;
 	};
 
 	qcom,sps {
@@ -1676,7 +1774,7 @@
 		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
-		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	mvs0c_gdsc: qcom,gdsc@abf0bf8 {
@@ -1687,7 +1785,7 @@
 		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
-		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	mvs1_gdsc: qcom,gdsc@abf0d98 {
@@ -1699,6 +1797,7 @@
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
 		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	mvs1c_gdsc: qcom,gdsc@abf0c98 {
@@ -1709,6 +1808,7 @@
 		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	spmi_bus: qcom,spmi@c440000 {
@@ -1720,7 +1820,7 @@
 		      <0xc40a000 0x26000>;
 		reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
 		interrupt-names = "periph_irq";
-		interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,ee = <0>;
 		qcom,channel = <0>;
 		#address-cells = <2>;
@@ -1730,10 +1830,34 @@
 		cell-index = <0>;
 	};
 
+	ufs_ice: ufsice@1d90000 {
+		compatible = "qcom,ice";
+		reg = <0x1d90000 0x8000>;
+		qcom,enable-ice-clk;
+		clock-names = "ufs_core_clk", "bus_clk",
+				"iface_clk", "ice_core_clk";
+		clocks = <&clock_gcc GCC_UFS_PHY_AXI_CLK>,
+			<&clock_gcc GCC_UFS_1X_CLKREF_EN>,
+			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
+			<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+		qcom,op-freq-hz = <0>, <0>, <0>, <300000000>;
+		vdd-hba-supply = <&ufs_phy_gdsc>;
+		qcom,msm-bus,name = "ufs_ice_noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<1 650 0 0>,    /* No vote */
+				<1 650 1000 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+					"MAX";
+		qcom,instance-type = "ufs";
+	};
+
 	ufsphy_mem: ufsphy_mem@1d87000 {
 		reg = <0x1d87000 0xe00>; /* PHY regs */
 		reg-names = "phy_mem";
 		#phy-cells = <0>;
+		ufs-qcom-crypto = <&ufs_ice>;
 
 		lanes-per-direction = <2>;
 
@@ -1753,6 +1877,7 @@
 		interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
 		phys = <&ufsphy_mem>;
 		phy-names = "ufsphy";
+		ufs-qcom-crypto = <&ufs_ice>;
 
 		lanes-per-direction = <2>;
 		dev-ref-clk-freq = <0>; /* 19.2 MHz */
@@ -2062,9 +2187,9 @@
 	spss_utils: qcom,spss_utils {
 		compatible = "qcom,spss-utils";
 		/* spss fuses physical address */
-		qcom,spss-fuse1-addr = <0x007841c4>;
+		qcom,spss-fuse1-addr = <0x00780234>;
 		qcom,spss-fuse1-bit = <27>;
-		qcom,spss-fuse2-addr = <0x007841c4>;
+		qcom,spss-fuse2-addr = <0x00780234>;
 		qcom,spss-fuse2-bit = <26>;
 		qcom,spss-dev-firmware-name  = "spss1d";  /* 8 chars max */
 		qcom,spss-test-firmware-name = "spss1t";  /* 8 chars max */
@@ -2111,6 +2236,7 @@
 
 	ipa_hw: qcom,ipa@1e00000 {
 		compatible = "qcom,ipa";
+		mboxes = <&qmp_aop 0>;
 		reg =
 			<0x1e00000 0x84000>,
 			<0x1e04000 0x23000>;
@@ -2176,6 +2302,12 @@
 		qcom,throughput-threshold = <600 2500 5000>;
 		qcom,scaling-exceptions = <>;
 
+		qcom,entire-ipa-block-size = <0x100000>;
+		qcom,register-collection-on-crash;
+		qcom,testbus-collection-on-crash;
+		qcom,non-tn-collection-on-crash;
+		qcom,secure-debug-check-action = <0>;
+
 		ipa_smmu_ap: ipa_smmu_ap {
 			compatible = "qcom,ipa-smmu-ap-cb";
 			iommus = <&apps_smmu 0x5C0 0x0>;
@@ -2387,7 +2519,8 @@
 	qcom,msm-eud@ff0000 {
 		compatible = "qcom,msm-eud";
 		interrupt-names = "eud_irq";
-		interrupts = <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-parent = <&pdc>;
+		interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
 		reg = <0x088E0000 0x2000>,
 			<0x088E2000 0x1000>;
 		reg-names = "eud_base", "eud_mode_mgr2";
@@ -2420,7 +2553,7 @@
 		qcom,complete-ramdump;
 
 		/* Inputs from lpass */
-		interrupts-extended = <&pdc 96 IRQ_TYPE_LEVEL_HIGH>,
+		interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
 				<&adsp_smp2p_in 0 0>,
 				<&adsp_smp2p_in 2 0>,
 				<&adsp_smp2p_in 1 0>,
@@ -2791,13 +2924,11 @@
 		compatible = "qcom,pil-tz-generic";
 		reg = <0x5c00000 0x4000>;
 
-		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		vdd_cx-supply = <&L11A_LEVEL>;
 		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
-		vdd_mx-supply = <&VDD_MX_LEVEL>;
+		vdd_mx-supply = <&L4A_LEVEL>;
 		qcom,vdd_mx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
-
 		qcom,proxy-reg-names = "vdd_cx", "vdd_mx";
-		qcom,keep-proxy-regs-on;
 
 		clocks = <&clock_rpmh RPMH_CXO_CLK>;
 		clock-names = "xo";
@@ -2812,6 +2943,7 @@
 		status = "ok";
 		memory-region = <&pil_slpi_mem>;
 		qcom,complete-ramdump;
+		qcom,signal-aop;
 
 		/* Inputs from ssc */
 		interrupts-extended = <&pdc 9 IRQ_TYPE_LEVEL_HIGH>,
@@ -2830,6 +2962,7 @@
 		qcom,smem-states = <&dsps_smp2p_out 0>;
 		qcom,smem-state-names = "qcom,force-stop";
 
+		mboxes = <&qmp_aop 0>;
 		mbox-names = "slpi-pil";
 	};
 
@@ -3141,9 +3274,8 @@
 
 	qcom,cnss-qca6390@a0000000 {
 		compatible = "qcom,cnss-qca6390";
-		reg = <0xa0000000 0x10000000>,
-		      <0xb0000000 0x10000>;
-		reg-names = "smmu_iova_base", "smmu_iova_ipa";
+		reg = <0xb0000000 0x10000>;
+		reg-names = "smmu_iova_ipa";
 		wlan-en-gpio = <&tlmm 20 0>;
 		pinctrl-names = "wlan_en_active", "wlan_en_sleep";
 		pinctrl-0 = <&cnss_wlan_en_active>;
@@ -3254,6 +3386,13 @@
 				mhi,brstmode = <2>;
 			};
 		};
+
+		mhi_devices {
+			mhi_qrtr {
+				mhi,chan = "IPCR";
+				qcom,net-id = <0>;
+			};
+		};
 	};
 };
 
@@ -3269,7 +3408,16 @@
 
 	cnss_pci: cnss_pci {
 		reg = <0 0 0 0 0>;
-		qcom,iommu-dma = "disabled";
+		qcom,iommu-group = <&cnss_pci_iommu_group>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		cnss_pci_iommu_group: cnss_pci_iommu_group {
+			qcom,iommu-dma-addr-pool = <0xa0000000 0x10000000>;
+			qcom,iommu-dma = "fastmap";
+			qcom,iommu-pagetable = "coherent";
+		};
 	};
 };
 
@@ -3298,7 +3446,7 @@
 		reg = <0x64>;
 		fmint-gpio = <&tlmm 51 0>;
 		vdd-supply = <&pm8150a_bob>;
-		rtc6226,vdd-supply-voltage = <3300000 3300000>;
+		rtc6226,vdd-supply-voltage = <3296000 3296000>;
 		vio-supply = <&pm8150_s4>;
 		rtc6226,vio-supply-voltage = <1800000 1800000 >;
 	};
diff --git a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
index 8c2dc63..dfb2644 100644
--- a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
@@ -9,12 +9,14 @@
 		compatible = "qcom,gdsc";
 		reg = <0x177004 0x4>;
 		regulator-name = "ufs_phy_gdsc";
+		status = "disabled";
 	};
 
 	usb30_prim_gdsc: qcom,gdsc@10f004 {
 		compatible = "qcom,gdsc";
 		reg = <0x10f004 0x4>;
 		regulator-name = "usb30_prim_gdsc";
+		status = "disabled";
 	};
 
 	hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc: qcom,gdsc@17d050 {
@@ -23,6 +25,7 @@
 		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc";
 		qcom,no-status-check-on-disable;
 		qcom,gds-timeout = <500>;
+		status = "disabled";
 	};
 
 	hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc: qcom,gdsc@17d058 {
@@ -31,6 +34,7 @@
 		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc";
 		qcom,no-status-check-on-disable;
 		qcom,gds-timeout = <500>;
+		status = "disabled";
 	};
 
 	hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc: qcom,gdsc@17d054 {
@@ -39,50 +43,61 @@
 		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc";
 		qcom,no-status-check-on-disable;
 		qcom,gds-timeout = <500>;
+		status = "disabled";
 	};
 
 	/* CAM_CC GDSCs */
 	bps_gdsc: qcom,gdsc@ad07004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad07004 0x4>;
 		regulator-name = "bps_gdsc";
+		status = "disabled";
 	};
 
 	ipe_0_gdsc: qcom,gdsc@ad08004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad08004 0x4>;
 		regulator-name = "ipe_0_gdsc";
+		status = "disabled";
 	};
 
 	ipe_1_gdsc: qcom,gdsc@ad09004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad09004 0x4>;
 		regulator-name = "ipe_1_gdsc";
+		status = "disabled";
 	};
 
 	ife_0_gdsc: qcom,gdsc@ad0a004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad0a004 0x4>;
 		regulator-name = "ife_0_gdsc";
+		status = "disabled";
 	};
 
 	ife_1_gdsc: qcom,gdsc@ad0b004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad0b004 0x4>;
 		regulator-name = "ife_1_gdsc";
+		status = "disabled";
 	};
 
 	titan_top_gdsc: qcom,gdsc@ad0c1c4 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad0c1c4 0x4>;
 		regulator-name = "titan_top_gdsc";
+		status = "disabled";
 	};
 
 	/* DISP_CC GDSC */
 	mdss_core_gdsc: qcom,gdsc@af03000 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xaf03000 0x4>;
 		regulator-name = "mdss_core_gdsc";
+		qcom,support-hw-trigger;
+		proxy-supply = <&mdss_core_gdsc>;
+		qcom,proxy-consumer-enable;
+		status = "disabled";
 	};
 
 	/* GPU_CC GDSCs */
@@ -92,13 +107,14 @@
 	};
 
 	gpu_cx_gdsc: qcom,gdsc@3d9106c {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x3d9106c 0x4>;
 		regulator-name = "gpu_cx_gdsc";
 		hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
 		qcom,no-status-check-on-disable;
 		qcom,clk-dis-wait-val = <8>;
 		qcom,gds-timeout = <500>;
+		status = "disabled";
 	};
 
 	gpu_gx_domain_addr: syscon@3d91508 {
@@ -112,12 +128,13 @@
 	};
 
 	gpu_gx_gdsc: qcom,gdsc@3d9100c {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x3d9100c 0x4>;
 		regulator-name = "gpu_gx_gdsc";
 		domain-addr = <&gpu_gx_domain_addr>;
 		sw-reset = <&gpu_gx_sw_reset>;
 		qcom,reset-aon-logic;
+		status = "disabled";
 	};
 
 	/* NPU GDSC */
@@ -125,24 +142,28 @@
 		compatible = "regulator-fixed";
 		reg = <0x9981004 0x4>;
 		regulator-name = "npu_core_gdsc";
+		status = "disabled";
 	};
 
 	/* VIDEO_CC GDSCs */
 	mvsc_gdsc: qcom,gdsc@ab00814 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xab00814 0x4>;
 		regulator-name = "mvsc_gdsc";
+		status = "disabled";
 	};
 
 	mvs0_gdsc: qcom,gdsc@ab00874 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xab00874 0x4>;
 		regulator-name = "mvs0_gdsc";
+		status = "disabled";
 	};
 
 	mvs1_gdsc: qcom,gdsc@ab008b4 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xab008b4 0x4>;
 		regulator-name = "mvs1_gdsc";
+		status = "disabled";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/lito-ion.dtsi b/arch/arm64/boot/dts/qcom/lito-ion.dtsi
new file mode 100644
index 0000000..e68f421
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/lito-ion.dtsi
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		system_heap: qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+
+		system_secure_heap: qcom,ion-heap@9 {
+			reg = <9>;
+			qcom,ion-heap-type = "SYSTEM_SECURE";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/lito-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/lito-pinctrl.dtsi
index 53d10bc..203e7a6 100644
--- a/arch/arm64/boot/dts/qcom/lito-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-pinctrl.dtsi
@@ -13,6 +13,103 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 
+		qupv3_se2_2uart_pins: qupv3_se2_2uart_pins {
+			qupv3_se2_2uart_active: qupv3_se2_2uart_active {
+				mux {
+					pins = "gpio36", "gpio37";
+					function = "qup02";
+				};
+
+				config {
+					pins = "gpio36", "gpio37";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se2_2uart_sleep: qupv3_se2_2uart_sleep {
+				mux {
+					pins = "gpio36", "gpio37";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio36", "gpio37";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		qupv3_se5_4uart_pins: qupv3_se5_4uart_pins {
+			qupv3_se5_ctsrx: qupv3_se5_ctsrx {
+				mux {
+					pins = "gpio38", "gpio41";
+					function = "qup05";
+				};
+
+				config {
+					pins = "gpio38", "gpio41";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se5_rts: qupv3_se5_rts {
+				mux {
+					pins = "gpio39";
+					function = "qup05";
+				};
+
+				config {
+					pins = "gpio39";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+
+			qupv3_se5_tx: qupv3_se5_tx {
+				mux {
+					pins = "gpio40";
+					function = "qup05";
+				};
+
+				config {
+					pins = "gpio40";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se8_2uart_pins: qupv3_se8_2uart_pins {
+			qupv3_se8_2uart_active: qupv3_se8_2uart_active {
+				mux {
+					pins = "gpio51", "gpio52";
+					function = "qup12";
+				};
+
+				config {
+					pins = "gpio51", "gpio52";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se8_2uart_sleep: qupv3_se8_2uart_sleep {
+				mux {
+					pins = "gpio51", "gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio51", "gpio52";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
 		ufs_dev_reset_assert: ufs_dev_reset_assert {
 			config {
 				pins = "ufs_reset";
diff --git a/arch/arm64/boot/dts/qcom/lito-qupv3.dtsi b/arch/arm64/boot/dts/qcom/lito-qupv3.dtsi
new file mode 100644
index 0000000..e8eb2f4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/lito-qupv3.dtsi
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+	/*QUPv3_0 */
+	qupv3_0: qcom,qupv3_0_geni_se@8c0000 {
+		compatible = "qcom,qupv3-geni-se";
+		reg = <0x8c0000 0x2000>;
+		qcom,bus-mas-id = <MSM_BUS_MASTER_QUP_0>;
+		qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+		iommus = <&apps_smmu 0x4e3 0x0>;
+		qcom,iommu-dma-addr-pool = <0x40000000 0xc0000000>;
+		qcom,iommu-dma = "bypass";
+	};
+
+	/* Debug UART Instance for RUMI platform */
+	qupv3_se2_2uart: qcom,qup_uart@888000 {
+		compatible = "qcom,msm-geni-console";
+		reg = <0x888000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>,
+			<&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se2_2uart_active>;
+		pinctrl-1 = <&qupv3_se2_2uart_sleep>;
+		interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	/* 4-wire UART */
+	qupv3_se5_4uart: qcom,qup_uart@894000 {
+		compatible = "qcom,msm-geni-serial-hs";
+		reg = <0x894000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>,
+			<&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se5_ctsrx>, <&qupv3_se5_rts>,
+						<&qupv3_se5_tx>;
+		pinctrl-1 = <&qupv3_se5_ctsrx>, <&qupv3_se5_rts>,
+						<&qupv3_se5_tx>;
+		interrupts-extended = <&intc GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>,
+				<&tlmm 41 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,wrapper-core = <&qupv3_0>;
+		qcom,wakeup-byte = <0xFD>;
+		status = "disabled";
+	};
+
+	/*QUPv3_1 */
+	qupv3_1: qcom,qupv3_1_geni_se@9c0000 {
+		compatible = "qcom,qupv3-geni-se";
+		reg = <0x9c0000 0x2000>;
+		qcom,bus-mas-id = <MSM_BUS_MASTER_QUP_1>;
+		qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH1>;
+		iommus = <&apps_smmu 0x023 0x0>;
+		qcom,iommu-dma-addr-pool = <0x40000000 0xc0000000>;
+		qcom,iommu-dma = "bypass";
+	};
+
+	/* 2-wire UART */
+	qupv3_se8_2uart: qcom,qup_uart@988000 {
+		compatible = "qcom,msm-geni-console";
+		reg = <0x988000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>,
+			<&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se8_2uart_active>;
+		pinctrl-1 = <&qupv3_se8_2uart_sleep>;
+		interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/lito-regulators.dtsi b/arch/arm64/boot/dts/qcom/lito-regulators.dtsi
index 8d547e0..637a5a9 100644
--- a/arch/arm64/boot/dts/qcom/lito-regulators.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-regulators.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright  (c) 2018 , The Linux Foundation. All rights reserved.
+ * Copyright  (c) 2018-2019 , The Linux Foundation. All rights reserved.
  */
 
 #include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
@@ -212,7 +212,7 @@
 		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 	};
 
-	S7C: pm8150a_s7: regulator-pm8150a-s7 {
+	VDD_MSS_LEVEL: S7C: pm8150a_s7: regulator-pm8150a-s7 {
 		compatible = "qcom,stub-regulator";
 		regulator-name = "pm8150a_s7";
 		regulator-min-microvolt = <1128000>;
diff --git a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
index ce13c7f..9cb6c48 100644
--- a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
@@ -35,6 +35,27 @@
 	usb_nop_phy: usb_nop_phy {
 		compatible = "usb-nop-xceiv";
 	};
+
+	cxo: bi_tcxo {
+		compatible = "fixed-factor-clock";
+		clocks = <&xo_board>;
+		clock-mult = <1>;
+		clock-div = <2>;
+		#clock-cells = <0>;
+	};
+
+	cxo_a: bi_tcxo_ao {
+		compatible = "fixed-factor-clock";
+		clocks = <&xo_board>;
+		clock-mult = <1>;
+		clock-div = <2>;
+		#clock-cells = <0>;
+	};
+};
+
+&rpmhcc {
+	compatible = "qcom,dummycc";
+	clock-output-names = "rpmh_clocks";
 };
 
 &usb0 {
@@ -44,6 +65,15 @@
 	};
 };
 
+&qupv3_se8_2uart {
+	status = "disabled";
+};
+
+/*RUMI UART console*/
+&qupv3_se2_2uart {
+	status = "ok";
+};
+
 &wdog {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/lito-usb.dtsi b/arch/arm64/boot/dts/qcom/lito-usb.dtsi
index 7cc8b61..6cc33c0 100644
--- a/arch/arm64/boot/dts/qcom/lito-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-usb.dtsi
@@ -12,7 +12,9 @@
 		reg = <0x0a600000 0x100000>;
 		reg-names = "core_base";
 
-		qcom,iommu-dma = "disabled";
+		iommus = <&apps_smmu 0xE0 0x0>;
+		qcom,iommu-dma = "atomic";
+		qcom,iommu-dma-addr-pool = <0x90000000 0x60000000>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
diff --git a/arch/arm64/boot/dts/qcom/lito.dtsi b/arch/arm64/boot/dts/qcom/lito.dtsi
index e7063af..609691a 100644
--- a/arch/arm64/boot/dts/qcom/lito.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito.dtsi
@@ -14,6 +14,7 @@
 #include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/clock/qcom,videocc-lito.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
 #include <dt-bindings/soc/qcom,ipcc.h>
 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
 
@@ -24,6 +25,7 @@
 	interrupt-parent = <&intc>;
 
 	aliases {
+		serial0 = &qupv3_se2_2uart;	/*RUMI*/
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
 	};
 
@@ -470,6 +472,13 @@
 			no-map;
 			reg = <0x0 0x9f400000 0x0 0xc00000>;
 		};
+		cdsp_mem: cdsp_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
+			reusable;
+			alignment = <0x0 0x400000>;
+			size = <0x0 0x400000>;
+		};
 
 		/* global autoconfigured region for contiguous allocations */
 		linux,cma {
@@ -836,40 +845,18 @@
 		sleep_clk: sleep-clk {
 			compatible = "fixed-clock";
 			clock-output-names = "chip_sleep_clk";
-			clock-frequency = <32764>;
+			clock-frequency = <32000>;
 			#clock-cells = <0>;
 		};
 	};
 
-	cxo: bi_tcxo {
-		compatible = "fixed-factor-clock";
-		clocks = <&xo_board>;
-		clock-mult = <1>;
-		clock-div = <2>;
-		#clock-cells = <0>;
-	};
-
-	cxo_a: bi_tcxo_ao {
-		compatible = "fixed-factor-clock";
-		clocks = <&xo_board>;
-		clock-mult = <1>;
-		clock-div = <2>;
-		#clock-cells = <0>;
-	};
-
-	rpmhcc: qcom,rpmhclk {
-		compatible = "qcom,dummycc";
-		clock-output-names = "rpmh_clocks";
-		#clock-cells = <1>;
-	};
-
 	aopcc: qcom,aopclk {
 		compatible = "qcom,dummycc";
 		clock-output-names = "qdss_clocks";
 		#clock-cells = <1>;
 	};
 
-	gcc: qcom,gcc {
+	gcc: qcom,gcc@100000 {
 		compatible = "qcom,gcc-lito", "syscon";
 		reg = <0x100000 0x1f0000>;
 		reg-names = "cc_base";
@@ -879,6 +866,50 @@
 		#reset-cells = <1>;
 	};
 
+	camcc: qcom,camcc@ad00000 {
+		compatible = "qcom,lito-camcc", "syscon";
+		reg = <0xad00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_mx-supply = <&VDD_MX_LEVEL>;
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		clock-names = "cfg_ahb_clk";
+		clocks = <&gcc GCC_CAMERA_AHB_CLK>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	videocc: qcom,videocc {
+		compatible = "qcom,lito-videocc", "syscon";
+		reg = <0x0ab00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		clock-names = "cfg_ahb_clk";
+		clocks = <&gcc GCC_VIDEO_AHB_CLK>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	dispcc: qcom,dispcc {
+		compatible = "qcom,lito-dispcc", "syscon";
+		reg = <0xaf00000 0x20000>;
+		reg-names = "cc_base";
+		clock-names = "cfg_ahb_clk";
+		clocks = <&gcc GCC_DISP_AHB_CLK>;
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	gpucc: qcom,gpucc {
+		compatible = "qcom,gpucc-lito", "syscon";
+		reg = <0x3d90000 0x9000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		vdd_mx-supply = <&VDD_MX_LEVEL>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
 	ufsphy_mem: ufsphy_mem@1d87000 {
 		reg = <0x1d87000 0xe00>; /* PHY regs */
 		reg-names = "phy_mem";
@@ -1012,34 +1043,6 @@
 		#reset-cells = <1>;
 	};
 
-	videocc: qcom,videocc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "videocc_clocks";
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-	};
-
-	camcc: qcom,camcc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "camcc_clocks";
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-	};
-
-	dispcc: qcom,dispcc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "dispcc_clocks";
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-	};
-
-	gpucc: qcom,gpucc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "gpucc_clocks";
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-	};
-
 	apps_rsc: rsc@18200000 {
 		label = "apps_rsc";
 		compatible = "qcom,rpmh-rsc";
@@ -1060,6 +1063,11 @@
 		system_pm {
 			compatible = "qcom,system-pm";
 		};
+
+		rpmhcc: qcom,rpmhclk {
+			compatible = "qcom,lito-rpmh-clk";
+			#clock-cells = <1>;
+		};
 	};
 
 	disp_rsc: rsc@af20000 {
@@ -1100,7 +1108,7 @@
 	};
 
 	cache-controller@9200000 {
-		compatible = "qcom,lito-llcc";
+		compatible = "qcom,llcc-v1";
 		reg = <0x9200000 0xd0000> , <0x9600000 0x50000>;
 		reg-names = "llcc_base", "llcc_broadcast_base";
 		interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
@@ -1309,6 +1317,135 @@
 		mbox-names = "aop";
 	};
 
+	pil_modem: qcom,mss@4080000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x4080000 0x100>;
+
+		clocks = <&rpmhcc RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+		vdd_mss-supply = <&VDD_MSS_LEVEL>;
+		qcom,vdd_mss-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+		qcom,proxy-reg-names = "vdd_cx", "vdd_mss";
+
+		qcom,firmware-name = "modem";
+		memory-region = <&modem_wlan_mem>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,sysmon-id = <0>;
+		qcom,ssctl-instance-id = <0x12>;
+		qcom,pas-id = <4>;
+		qcom,smem-id = <421>;
+		qcom,minidump-id = <3>;
+		qcom,complete-ramdump;
+
+		/* Inputs from mss */
+		interrupts-extended = <&intc GIC_SPI 266 IRQ_TYPE_EDGE_RISING>,
+				      <&mpss_smp2p_in 0 IRQ_TYPE_NONE>,
+				      <&mpss_smp2p_in 1 IRQ_TYPE_NONE>,
+				      <&mpss_smp2p_in 2 IRQ_TYPE_NONE>,
+				      <&mpss_smp2p_in 3 IRQ_TYPE_NONE>,
+				      <&mpss_smp2p_in 7 IRQ_TYPE_NONE>;
+
+		interrupt-names = "qcom,wdog",
+				  "qcom,err-fatal",
+				  "qcom,err-ready",
+				  "qcom,proxy-unvote",
+				  "qcom,stop-ack",
+				  "qcom,shutdown-ack";
+
+		/* Outputs to mss */
+		qcom,smem-states = <&mpss_smp2p_out 0>;
+		qcom,smem-state-names = "qcom,force-stop";
+
+		mbox-names = "mss-pil";
+	};
+
+	qcom,lpass@3000000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x3000000 0x00100>;
+
+		vdd_lpi_cx-supply = <&L8A_LEVEL>;
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
+		vdd_lpi_mx-supply = <&L4A_LEVEL>;
+		qcom,vdd_mx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
+		qcom,proxy-reg-names = "vdd_lpi_cx", "vdd_lpi_mx";
+
+		clocks = <&rpmhcc RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <1>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <423>;
+		qcom,sysmon-id = <1>;
+		qcom,ssctl-instance-id = <0x14>;
+		qcom,firmware-name = "adsp";
+		memory-region = <&pil_adsp_mem>;
+		qcom,complete-ramdump;
+
+		/* Inputs from lpass */
+		interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+				      <&adsp_smp2p_in 0 0>,
+				      <&adsp_smp2p_in 1 0>,
+				      <&adsp_smp2p_in 2 0>,
+				      <&adsp_smp2p_in 3 0>;
+
+		interrupt-names = "qcom,wdog",
+				  "qcom,err-fatal",
+				  "qcom,err-ready",
+				  "qcom,proxy-unvote",
+				  "qcom,stop-ack";
+
+		/* Outputs to lpass */
+		qcom,smem-states = <&adsp_smp2p_out 0>;
+		qcom,smem-state-names = "qcom,force-stop";
+
+		mbox-names = "adsp-pil";
+	};
+
+	qcom,turing@8300000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x8300000 0x100000>;
+
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+
+		clocks = <&rpmhcc RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <18>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <601>;
+		qcom,sysmon-id = <7>;
+		qcom,ssctl-instance-id = <0x17>;
+		qcom,firmware-name = "cdsp";
+		memory-region = <&pil_cdsp_mem>;
+		qcom,complete-ramdump;
+
+		/* Inputs from turing */
+		interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+				<&cdsp_smp2p_in 0 0>,
+				<&cdsp_smp2p_in 2 0>,
+				<&cdsp_smp2p_in 1 0>,
+				<&cdsp_smp2p_in 3 0>;
+
+		interrupt-names = "qcom,wdog",
+				"qcom,err-fatal",
+				"qcom,proxy-unvote",
+				"qcom,err-ready",
+				"qcom,stop-ack";
+
+		/* Outputs to turing */
+		qcom,smem-states = <&cdsp_smp2p_out 0>;
+		qcom,smem-state-names = "qcom,force-stop";
+
+		mbox-names = "cdsp-pil";
+	};
 };
 
 #include "lito-pinctrl.dtsi"
@@ -1318,6 +1455,7 @@
 #include "lito-regulators.dtsi"
 #include "lito-smp2p.dtsi"
 #include "lito-usb.dtsi"
+#include "lito-ion.dtsi"
 
 &ufs_phy_gdsc {
 	status = "ok";
@@ -1340,38 +1478,58 @@
 };
 
 &bps_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &ipe_0_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &ipe_1_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &ife_0_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
 	status = "ok";
 };
 
 &ife_1_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
 	status = "ok";
 };
 
 &titan_top_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
 	status = "ok";
 };
 
 &mdss_core_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_DISP_AHB_CLK>;
 	status = "ok";
 };
 
 &gpu_cx_gdsc {
+	parent-supply = <&VDD_CX_LEVEL>;
 	status = "ok";
 };
 
 &gpu_gx_gdsc {
+	parent-supply = <&VDD_GFX_LEVEL>;
+	vdd_parent-supply = <&VDD_GFX_LEVEL>;
 	status = "ok";
 };
 
@@ -1380,13 +1538,23 @@
 };
 
 &mvsc_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_VIDEO_AHB_CLK>;
 	status = "ok";
 };
 
 &mvs0_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_VIDEO_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &mvs1_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_VIDEO_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
+
+#include "lito-qupv3.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
index 4137130..20f1a38 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
@@ -15,6 +15,7 @@
 		qcom,dynamic;
 		qcom,skip-init;
 		qcom,use-3-lvl-tables;
+		qcom,no-dynamic-asid;
 		#global-interrupts = <2>;
 		#size-cells = <1>;
 		#address-cells = <1>;
@@ -40,6 +41,22 @@
 				<GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>;
 
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_GPU_TCU>,
+			<MSM_BUS_SLAVE_EBI_CH0>,
+			<0 0>,
+			<MSM_BUS_MASTER_GPU_TCU>,
+			<MSM_BUS_SLAVE_EBI_CH0>,
+			<0 1000>;
+
+		qcom,actlr =
+			/* All CBs of GFX: +15 deep PF */
+			<0x2 0x400 0x303>,
+			<0x4 0x400 0x303>,
+			<0x5 0x400 0x303>,
+			<0x7 0x400 0x303>,
+			<0x0 0x401 0x303>;
+
 		gfx_0_tbu: gfx_0_tbu@3dc5000 {
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x3DC5000 0x1000>,
@@ -179,6 +196,23 @@
 			<MSM_BUS_SLAVE_IMEM_CFG>,
 			<0 1000>;
 
+		qcom,actlr =
+			/* For HF-0 TBU +3 deep PF */
+			<0x800 0x3ff 0x103>,
+			/* For HF-1 TBU +3 deep PF */
+			<0xC00 0x3ff 0x103>,
+			/* For SF-0 TBU +3 deep PF */
+			<0x2000 0x3ff 0x103>,
+			/* For SF-1 TBU +3 deep PF */
+			<0x2400 0x3ff 0x103>,
+			/* For NPU +3 deep PF */
+			<0x1081 0x400 0x103>,
+			<0x1082 0x400 0x103>,
+			<0x1085 0x400 0x103>,
+			<0x10a1 0x400 0x103>,
+			<0x10a2 0x400 0x103>,
+			<0x10a5 0x400 0x103>;
+
 		anoc_1_tbu: anoc_1_tbu@15185000 {
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x15185000 0x1000>,
@@ -385,22 +419,27 @@
 	kgsl_iommu_test_device {
 		compatible = "iommu-debug-test";
 		iommus = <&kgsl_smmu 0x7 0>;
+		qcom,iommu-dma = "disabled";
 	};
 
 	kgsl_iommu_coherent_test_device {
+		status = "disabled";
 		compatible = "iommu-debug-test";
 		iommus = <&kgsl_smmu 0x9 0>;
+		qcom,iommu-dma = "disabled";
 		dma-coherent;
 	};
 
 	apps_iommu_test_device {
 		compatible = "iommu-debug-test";
 		iommus = <&apps_smmu 0x21 0>;
+		qcom,iommu-dma = "disabled";
 	};
 
 	apps_iommu_coherent_test_device {
 		compatible = "iommu-debug-test";
 		iommus = <&apps_smmu 0x23 0>;
+		qcom,iommu-dma = "disabled";
 		dma-coherent;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-lito.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-lito.dtsi
index 582bac8..8127b96 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-lito.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-lito.dtsi
@@ -7,7 +7,6 @@
 
 &soc {
 	kgsl_smmu: kgsl-smmu@3da0000 {
-		status = "disabled";
 		compatible = "qcom,qsmmu-v500";
 		reg = <0x3da0000 0x10000>,
 		      <0x3dc2000 0x20>;
@@ -20,15 +19,15 @@
 		#size-cells = <1>;
 		#address-cells = <1>;
 		ranges;
-		interrupts = <GIC_SPI 674 IRQ_TYPE_LEVEL_HIGH>,
+		interrupts = <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH>;
+				<GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>;
 		gfx_0_tbu: gfx_0_tbu@3dc5000 {
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x3dc5000 0x1000>,
@@ -205,6 +204,11 @@
 		};
 	};
 
+	kgsl_iommu_test_device {
+		compatible = "iommu-debug-test";
+		iommus = <&kgsl_smmu 0x7 0x400>;
+	};
+
 	apps_iommu_test_device {
 		compatible = "iommu-debug-test";
 		iommus = <&apps_smmu 0x1 0>;
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index ee7dd91..5234049 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -256,11 +256,13 @@
 		sb_7_rx: qcom,msm-dai-q6-sb-7-rx {
 			compatible = "qcom,msm-dai-q6-dev";
 			qcom,msm-dai-q6-dev-id = <16398>;
+			qcom,msm-dai-q6-slim-dev-id = <0>;
 		};
 
 		sb_7_tx: qcom,msm-dai-q6-sb-7-tx {
 			compatible = "qcom,msm-dai-q6-dev";
 			qcom,msm-dai-q6-dev-id = <16399>;
+			qcom,msm-dai-q6-slim-dev-id = <0>;
 		};
 
 		bt_sco_rx: qcom,msm-dai-q6-bt-sco-rx {
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index cd3865e..8c86c41 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -399,7 +399,7 @@
 		};
 
 		intc: interrupt-controller@9bc0000 {
-			compatible = "arm,gic-v3";
+			compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
 			#interrupt-cells = <3>;
 			interrupt-controller;
 			#redistributor-regions = <1>;
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index e81f65f..29495c1 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -48,6 +48,8 @@
 			compatible = "qcom,spmi-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+			io-channels = <&pm8150_vadc ADC_DIE_TEMP>;
+			io-channel-names = "thermal";
 			#thermal-sensor-cells = <0>;
 			qcom,temperature-threshold-set = <1>;
 		};
diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
index e4a9753..c427d85 100644
--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
@@ -31,6 +31,8 @@
 			compatible = "qcom,spmi-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+			io-channels = <&pm8150b_vadc ADC_DIE_TEMP>;
+			io-channel-names = "thermal";
 			#thermal-sensor-cells = <0>;
 			qcom,temperature-threshold-set = <1>;
 		};
@@ -256,7 +258,27 @@
 			qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
 						 <9000 3000>, /* 9V @ 3A */
 						 <12000 2250>; /* 12V @ 2.25A */
-			status = "disabled";
+		};
+
+		pm8150b_bcl: bcl@1d00 {
+			compatible = "qcom,bcl-v5";
+			reg = <0x1d00 0x100>;
+			interrupts = <0x2 0x1d 0x0 IRQ_TYPE_NONE>,
+					<0x2 0x1d 0x1 IRQ_TYPE_NONE>,
+					<0x2 0x1d 0x0 IRQ_TYPE_NONE>,
+					<0x2 0x1d 0x1 IRQ_TYPE_NONE>,
+					<0x2 0x1d 0x2 IRQ_TYPE_NONE>;
+			interrupt-names = "bcl-ibat-lvl0",
+						"bcl-ibat-lvl1",
+						"bcl-vbat-lvl0",
+						"bcl-vbat-lvl1",
+						"bcl-vbat-lvl2";
+			#thermal-sensor-cells = <1>;
+		};
+
+		bcl_soc:bcl-soc {
+			compatible = "qcom,msm-bcl-soc";
+			#thermal-sensor-cells = <0>;
 		};
 
 		pm8150b_fg: qpnp,fg {
@@ -440,7 +462,7 @@
 				     <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
 			interrupt-names = "hap-sc-irq", "hap-play-irq";
 			qcom,actuator-type = "lra";
-			qcom,vmax-mv = <3400>;
+			qcom,vmax-mv = <3600>;
 			qcom,play-rate-us = <6667>;
 			qcom,lra-resonance-sig-shape = "sine";
 			qcom,lra-auto-resonance-mode = "qwd";
@@ -450,9 +472,11 @@
 				/* CLICK */
 				qcom,effect-id = <0>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [3e 3e 3e];
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
 				qcom,wf-play-rate-us = <6667>;
-				qcom,wf-brake-pattern = [01 00 00 00];
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 
@@ -460,9 +484,10 @@
 				/* DOUBLE CLICK */
 				qcom,effect-id = <1>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e 02 02 02 02 02 02];
-				qcom,wf-play-rate-us = <7143>;
-				qcom,wf-repeat-count = <2>;
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
+				qcom,wf-play-rate-us = <6667>;
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
 				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
@@ -471,8 +496,11 @@
 				/* TICK */
 				qcom,effect-id = <2>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e];
-				qcom,wf-play-rate-us = <4000>;
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
+				qcom,wf-play-rate-us = <6667>;
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 
@@ -480,8 +508,11 @@
 				/* THUD */
 				qcom,effect-id = <3>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e 7e];
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
 				qcom,wf-play-rate-us = <6667>;
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 
@@ -489,8 +520,11 @@
 				/* POP */
 				qcom,effect-id = <4>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e];
-				qcom,wf-play-rate-us = <5000>;
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
+				qcom,wf-play-rate-us = <6667>;
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 
@@ -498,9 +532,11 @@
 				/* HEAVY CLICK */
 				qcom,effect-id = <5>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e 7e];
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
 				qcom,wf-play-rate-us = <6667>;
-				qcom,wf-brake-pattern = [03 00 00 00];
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 		};
@@ -534,4 +570,98 @@
 			};
 		};
 	};
+
+	pm8150b-ibat-lvl0 {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&pm8150b_bcl 0>;
+
+		trips {
+			ibat_lvl0:ibat-lvl0 {
+				temperature = <4500>;
+				hysteresis = <200>;
+				type = "passive";
+			};
+		};
+	};
+
+	pm8150b-ibat-lvl1 {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&pm8150b_bcl 1>;
+
+		trips {
+			ibat_lvl1:ibat-lvl1 {
+				temperature = <5000>;
+				hysteresis = <200>;
+				type = "passive";
+			};
+		};
+	};
+
+	pm8150b-vbat-lvl0 {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&pm8150b_bcl 2>;
+		tracks-low;
+
+		trips {
+			vbat_lvl0: vbat-lvl0 {
+				temperature = <3000>;
+				hysteresis = <200>;
+				type = "passive";
+			};
+		};
+	};
+
+	pm8150b-vbat-lvl1 {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&pm8150b_bcl 3>;
+		tracks-low;
+
+		trips {
+			vbat_lvl1:vbat-lvl1 {
+				temperature = <2800>;
+				hysteresis = <200>;
+				type = "passive";
+			};
+		};
+	};
+
+	pm8150b-vbat-lvl2 {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&pm8150b_bcl 4>;
+		tracks-low;
+
+		trips {
+			vbat_lvl2:vbat-lvl2 {
+				temperature = <2600>;
+				hysteresis = <200>;
+				type = "passive";
+			};
+		};
+	};
+
+	soc {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&bcl_soc>;
+		tracks-low;
+
+		trips {
+			soc_trip:soc-trip {
+				temperature = <10>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
index 6d8ca09..da4e4e5 100644
--- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
@@ -31,6 +31,8 @@
 			compatible = "qcom,spmi-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x4 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+			io-channels = <&pm8150l_vadc ADC_DIE_TEMP>;
+			io-channel-names = "thermal";
 			#thermal-sensor-cells = <0>;
 			qcom,temperature-threshold-set = <1>;
 		};
@@ -98,6 +100,18 @@
 			};
 		};
 
+		pm8150l_bcl: bcl@3d00 {
+			compatible = "qcom,bcl-v5";
+			reg = <0x3d00 0x100>;
+			interrupts = <0x4 0x3d 0x0 IRQ_TYPE_NONE>,
+					<0x4 0x3d 0x1 IRQ_TYPE_NONE>,
+					<0x4 0x3d 0x2 IRQ_TYPE_NONE>;
+			interrupt-names = "bcl-vbat-lvl0",
+						"bcl-vbat-lvl1",
+						"bcl-vbat-lvl2";
+			#thermal-sensor-cells = <1>;
+		};
+
 		pm8150l_adc_tm: adc_tm@3500 {
 			compatible = "qcom,adc-tm5";
 			reg = <0x3500 0x100>;
@@ -348,6 +362,14 @@
 			};
 		};
 
+		pm8150l_pwm: qcom,pwms@bc00 {
+			compatible = "qcom,pwm-lpg";
+			reg = <0xbc00 0x200>;
+			reg-names = "lpg-base";
+			#pwm-cells = <2>;
+			qcom,num-lpg-channels = <2>;
+		};
+
 		pm8150l_rgb_led: qcom,leds@d000 {
 			compatible = "qcom,tri-led";
 			reg = <0xd000 0x100>;
@@ -436,4 +458,52 @@
 			};
 		};
 	};
+
+	pm8150l-vph-lvl0 {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&pm8150l_bcl 2>;
+		tracks-low;
+
+		trips {
+			vph_lvl0: vph-lvl0 {
+				temperature = <3000>;
+				hysteresis = <200>;
+				type = "passive";
+			};
+		};
+	};
+
+	pm8150l-vph-lvl1 {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&pm8150l_bcl 3>;
+		tracks-low;
+
+		trips {
+			vph_lvl1:vph-lvl1 {
+				temperature = <2750>;
+				hysteresis = <200>;
+				type = "passive";
+			};
+		};
+	};
+
+	pm8150l-vph-lvl2 {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&pm8150l_bcl 4>;
+		tracks-low;
+
+		trips {
+			vph_lvl2:vph-lvl2 {
+				temperature = <2500>;
+				hysteresis = <200>;
+				type = "passive";
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/smb1390.dtsi b/arch/arm64/boot/dts/qcom/smb1390.dtsi
new file mode 100644
index 0000000..0ba1e3a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/smb1390.dtsi
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+smb1390: qcom,smb1390@10 {
+	compatible = "qcom,i2c-pmic";
+	reg = <0x10>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupt-parent = <&spmi_bus>;
+	interrupts = <0x2 0xC5 0x0 IRQ_TYPE_LEVEL_LOW>;
+	interrupt_names = "smb1390";
+	interrupt-controller;
+	#interrupt-cells = <3>;
+	qcom,periph-map = <0x10>;
+	status = "disabled";
+
+	smb1390_revid: qcom,revid@100 {
+		compatible = "qcom,qpnp-revid";
+		reg = <0x100>;
+	};
+
+	smb1390_charger: qcom,charge_pump {
+		compatible = "qcom,smb1390-charger-psy";
+		qcom,pmic-revid = <&smb1390_revid>;
+		interrupt-parent = <&smb1390>;
+		status = "disabled";
+
+		qcom,core {
+			interrupts = <0x10 0x0 IRQ_TYPE_EDGE_BOTH>,
+				     <0x10 0x1 IRQ_TYPE_EDGE_BOTH>,
+				     <0x10 0x2 IRQ_TYPE_EDGE_BOTH>,
+				     <0x10 0x3 IRQ_TYPE_EDGE_BOTH>,
+				     <0x10 0x4 IRQ_TYPE_EDGE_BOTH>,
+				     <0x10 0x5 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x6 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x7 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "switcher-off-window",
+					  "switcher-off-fault",
+					  "tsd-fault",
+					  "irev-fault",
+					  "vph-ov-hard",
+					  "vph-ov-soft",
+					  "ilim",
+					  "temp-alarm";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index cbd35c0..33cb028 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -1161,6 +1161,9 @@
 				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
 				 <&scif_clk>;
 			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+			       <&dmac2 0x13>, <&dmac2 0x12>;
+			dma-names = "tx", "rx", "tx", "rx";
 			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
 			resets = <&cpg 310>;
 			status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 0cd4446..f60f08b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -951,6 +951,9 @@
 				 <&cpg CPG_CORE R8A77965_CLK_S3D1>,
 				 <&scif_clk>;
 			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+			       <&dmac2 0x13>, <&dmac2 0x12>;
+			dma-names = "tx", "rx", "tx", "rx";
 			power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
 			resets = <&cpg 310>;
 			status = "disabled";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index eb5e8bd..8954c8c 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -101,6 +101,7 @@
 	sdio_pwrseq: sdio_pwrseq {
 		compatible = "mmc-pwrseq-simple";
 		reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
+		post-power-on-delay-ms = <10>;
 	};
 };
 
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index 636be7f..87a22fa 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -58,6 +58,7 @@
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
@@ -84,6 +85,7 @@
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -123,6 +125,7 @@
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -143,6 +146,7 @@
 CONFIG_NETFILTER_XT_MATCH_LIMIT=y
 CONFIG_NETFILTER_XT_MATCH_MAC=y
 CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
@@ -177,10 +181,14 @@
 CONFIG_L2TP=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_NETEM=y
 CONFIG_NET_CLS_U32=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_U32=y
 CONFIG_NET_CLS_ACT=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
 CONFIG_CFG80211=y
 # CONFIG_CFG80211_DEFAULT_PS is not set
 # CONFIG_CFG80211_CRDA_SUPPORT is not set
@@ -223,6 +231,7 @@
 CONFIG_PPP_MPPE=y
 CONFIG_PPTP=y
 CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8152=y
 CONFIG_USB_USBNET=y
 # CONFIG_USB_NET_AX8817X is not set
 # CONFIG_USB_NET_AX88179_178A is not set
@@ -293,6 +302,12 @@
 CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_SOUND=y
 CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+# CONFIG_SND_USB is not set
 CONFIG_HIDRAW=y
 CONFIG_UHID=y
 CONFIG_HID_A4TECH=y
@@ -366,12 +381,12 @@
 # CONFIG_PWRSEQ_SIMPLE is not set
 # CONFIG_MMC_BLOCK is not set
 CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
 # CONFIG_RTC_SYSTOHC is not set
 CONFIG_RTC_DRV_PL031=y
 CONFIG_VIRTIO_PCI=y
 # CONFIG_VIRTIO_PCI_LEGACY is not set
 CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
 CONFIG_VIRTIO_MMIO=y
 CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
 CONFIG_STAGING=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index ee9a471..2fdb64d 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -9,6 +9,7 @@
 CONFIG_TASKSTATS=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -33,6 +34,7 @@
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
@@ -42,6 +44,7 @@
 CONFIG_SLAB_FREELIST_HARDENED=y
 CONFIG_PROFILING=y
 # CONFIG_ZONE_DMA32 is not set
+CONFIG_HOTPLUG_SIZE_BITS=29
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_KONA=y
 CONFIG_PCI=y
@@ -96,12 +99,18 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
+CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_CMA=y
+CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
 CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
@@ -111,6 +120,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
 CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
@@ -144,6 +154,7 @@
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -170,9 +181,9 @@
 CONFIG_NETFILTER_XT_MATCH_MAC=y
 CONFIG_NETFILTER_XT_MATCH_MARK=y
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
@@ -221,6 +232,7 @@
 CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -234,10 +246,12 @@
 CONFIG_QRTR=y
 CONFIG_QRTR_SMD=y
 CONFIG_QRTR_MHI=y
+CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
 CONFIG_BT_SLIM_QCA6390=y
 CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
 CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
@@ -256,6 +270,7 @@
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -266,6 +281,7 @@
 CONFIG_SCSI_UFSHCD=y
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_UEVENT=y
@@ -282,6 +298,8 @@
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
 CONFIG_USB_LAN78XX=y
 CONFIG_USB_USBNET=y
 CONFIG_WIL6210=m
@@ -304,12 +322,14 @@
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVMEM is not set
 CONFIG_SERIAL_MSM_GENI=y
-CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
+CONFIG_I3C=y
+CONFIG_I3C_MASTER_QCOM_GENI=y
 CONFIG_SPI=y
 CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
@@ -329,6 +349,7 @@
 CONFIG_THERMAL_WRITABLE_TRIPS=y
 CONFIG_THERMAL_GOV_USER_SPACE=y
 CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
 CONFIG_DEVFREQ_THERMAL=y
 CONFIG_QCOM_SPMI_TEMP_ALARM=y
 CONFIG_THERMAL_TSENS=y
@@ -338,6 +359,7 @@
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -364,7 +386,9 @@
 CONFIG_MSM_GLOBAL_SYNX=y
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
+CONFIG_DRM_MSM_DP=y
 CONFIG_DRM_MSM_REGISTER_LOGGING=y
+CONFIG_DRM_SDE_RSC=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
 CONFIG_LOGO=y
@@ -401,7 +425,7 @@
 CONFIG_MSM_HSUSB_PHY=y
 CONFIG_USB_QCOM_EMU_PHY=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_VBUS_DRAW=900
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_CONFIGFS_NCM=y
@@ -418,6 +442,8 @@
 CONFIG_USB_CONFIGFS_F_CCID=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_TYPEC=y
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
@@ -454,10 +480,13 @@
 CONFIG_IPA_WDI_UNIFIED_API=y
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
 CONFIG_MSM_11AD=m
 CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
+CONFIG_IPA3_REGDUMP=y
 CONFIG_QCOM_MDSS_PLL=y
+CONFIG_QCOM_MDSS_DP_PLL=y
 CONFIG_QCOM_CLK_RPMH=y
 CONFIG_SPMI_PMIC_CLKDIV=y
 CONFIG_MSM_CLK_AOP_QMP=y
@@ -483,7 +512,11 @@
 CONFIG_RPMSG_QCOM_GLINK_SMEM=y
 CONFIG_RPMSG_QCOM_GLINK_SPSS=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_OVERRIDE_MEMORY_LIMIT=y
 CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_MSM_QBT_HANDLER=y
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_KONA_LLCC=y
@@ -505,6 +538,7 @@
 CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_DCC_V2=y
@@ -554,8 +588,12 @@
 CONFIG_ESOC_MDM_4x=y
 CONFIG_ESOC_MDM_DRV=y
 CONFIG_SENSORS_SSC=y
+CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -568,21 +606,24 @@
 CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_TIMEOUT=-1
 CONFIG_SCHEDSTATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 8029589..fbc5f84 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -8,6 +8,7 @@
 CONFIG_TASKSTATS=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -34,6 +35,7 @@
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
@@ -41,6 +43,7 @@
 CONFIG_SLAB_FREELIST_HARDENED=y
 CONFIG_PROFILING=y
 # CONFIG_ZONE_DMA32 is not set
+CONFIG_HOTPLUG_SIZE_BITS=29
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_KONA=y
 CONFIG_PCI=y
@@ -99,13 +102,20 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_CFQ_GROUP_IOSCHED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
+CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_CLEANCACHE=y
 CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
 CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
@@ -115,6 +125,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
 CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
@@ -148,6 +159,7 @@
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -174,9 +186,9 @@
 CONFIG_NETFILTER_XT_MATCH_MAC=y
 CONFIG_NETFILTER_XT_MATCH_MARK=y
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
@@ -226,6 +238,7 @@
 CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -240,11 +253,12 @@
 CONFIG_QRTR=y
 CONFIG_QRTR_SMD=y
 CONFIG_QRTR_MHI=y
+CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
 CONFIG_BT_SLIM_QCA6390=y
 CONFIG_CFG80211=y
-# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
 CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
@@ -264,6 +278,7 @@
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -274,6 +289,7 @@
 CONFIG_SCSI_UFSHCD=y
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_UEVENT=y
@@ -288,6 +304,8 @@
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
 CONFIG_USB_LAN78XX=y
 CONFIG_WIL6210=m
 CONFIG_CLD_LL_CORE=y
@@ -309,15 +327,19 @@
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_TTY_PRINTK=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
+CONFIG_I3C=y
+CONFIG_I3C_MASTER_QCOM_GENI=y
 CONFIG_SPI=y
 CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
@@ -337,6 +359,7 @@
 CONFIG_THERMAL_WRITABLE_TRIPS=y
 CONFIG_THERMAL_GOV_USER_SPACE=y
 CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
 CONFIG_DEVFREQ_THERMAL=y
 CONFIG_QCOM_SPMI_TEMP_ALARM=y
 CONFIG_THERMAL_TSENS=y
@@ -346,6 +369,7 @@
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -372,7 +396,9 @@
 CONFIG_MSM_GLOBAL_SYNX=y
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
+CONFIG_DRM_MSM_DP=y
 CONFIG_DRM_MSM_REGISTER_LOGGING=y
+CONFIG_DRM_SDE_RSC=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
@@ -410,7 +436,7 @@
 CONFIG_MSM_HSUSB_PHY=y
 CONFIG_USB_QCOM_EMU_PHY=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_VBUS_DRAW=900
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_CONFIGFS_NCM=y
@@ -427,6 +453,8 @@
 CONFIG_USB_CONFIGFS_F_CCID=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_TYPEC=y
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
@@ -467,10 +495,14 @@
 CONFIG_IPA_WDI_UNIFIED_API=y
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
+CONFIG_IPA3_MHI_PRIME_MANAGER=y
+CONFIG_IPA_UT=y
 CONFIG_MSM_11AD=m
 CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
+CONFIG_IPA3_REGDUMP=y
 CONFIG_QCOM_MDSS_PLL=y
+CONFIG_QCOM_MDSS_DP_PLL=y
 CONFIG_QCOM_CLK_RPMH=y
 CONFIG_SPMI_PMIC_CLKDIV=y
 CONFIG_MSM_CLK_AOP_QMP=y
@@ -496,7 +528,11 @@
 CONFIG_RPMSG_QCOM_GLINK_SMEM=y
 CONFIG_RPMSG_QCOM_GLINK_SPSS=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_OVERRIDE_MEMORY_LIMIT=y
 CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_MSM_QBT_HANDLER=y
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_KONA_LLCC=y
@@ -518,6 +554,7 @@
 CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_DCC_V2=y
@@ -576,6 +613,9 @@
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -590,6 +630,7 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
@@ -597,18 +638,22 @@
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_XZ_DEC=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
@@ -621,7 +666,7 @@
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_WQ_WATCHDOG=y
-CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_TIMEOUT=-1
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
 CONFIG_SCHEDSTATS=y
@@ -646,6 +691,9 @@
 CONFIG_IRQSOFF_TRACER=y
 CONFIG_PREEMPT_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_TEST_USER_COPY=m
 CONFIG_MEMTEST=y
 CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_PANIC_ON_DATA_CORRUPTION=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
new file mode 100644
index 0000000..870c6ce
--- /dev/null
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -0,0 +1,473 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_LITO=y
+CONFIG_PCI=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_DIAG_CHAR=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_LITO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_DRM=y
+CONFIG_DRM_MSM_REGISTER_LOGGING=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_EDAC=y
+CONFIG_EDAC_KRYO_ARM64=y
+CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QCOM_GENI_SE=y
+# CONFIG_QCOM_A53PLL is not set
+CONFIG_QCOM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_SM_GCC_LITO=y
+CONFIG_SM_VIDEOCC_LITO=y
+CONFIG_SM_CAMCC_LITO=y
+CONFIG_SM_DISPCC_LITO=y
+CONFIG_SM_GPUCC_LITO=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_GLINK_SPSS=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_IPCC=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_LITO_LLCC=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_IIO=y
+CONFIG_PWM=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_PDC=y
+CONFIG_PHY_XGENE=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SLIMBUS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_IPC_LOGGING=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 03e639a..4c43eb8 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -30,6 +30,7 @@
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
@@ -54,7 +55,6 @@
 # CONFIG_ARM64_VHE is not set
 CONFIG_RANDOMIZE_BASE=y
 CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
-CONFIG_KRYO_PMU_WORKAROUND=y
 CONFIG_COMPAT=y
 CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
@@ -77,6 +77,7 @@
 CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
 CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
 CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
@@ -96,6 +97,7 @@
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
 CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
@@ -104,6 +106,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -158,6 +161,7 @@
 CONFIG_NETFILTER_XT_MATCH_MAC=y
 CONFIG_NETFILTER_XT_MATCH_MARK=y
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
@@ -204,6 +208,7 @@
 CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -219,7 +224,6 @@
 CONFIG_QRTR_SMD=y
 CONFIG_BT=y
 CONFIG_CFG80211=y
-# CONFIG_CFG80211_CRDA_SUPPORT is not set
 CONFIG_RFKILL=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
@@ -228,6 +232,7 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -253,6 +258,8 @@
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -264,6 +271,8 @@
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_TTY_PRINTK=y
 CONFIG_HW_RANDOM=y
@@ -320,15 +329,22 @@
 CONFIG_USB_DWC3_MSM=y
 CONFIG_USB_ISP1760=y
 CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_QCOM_EMU_PHY=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_VBUS_DRAW=900
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_HID=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_TEST=y
@@ -340,6 +356,8 @@
 CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y
 CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y
 CONFIG_EDAC_QCOM=y
+CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y
+CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PM8XXX=y
 CONFIG_DMADEVICES=y
@@ -350,9 +368,15 @@
 CONFIG_ION=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QCOM_GENI_SE=y
 # CONFIG_QCOM_A53PLL is not set
 CONFIG_QCOM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
 CONFIG_SM_GCC_LITO=y
+CONFIG_SM_VIDEOCC_LITO=y
+CONFIG_SM_CAMCC_LITO=y
+CONFIG_SM_DISPCC_LITO=y
+CONFIG_SM_GPUCC_LITO=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
@@ -368,6 +392,7 @@
 CONFIG_RPMSG_QCOM_GLINK_SPSS=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_LITO_LLCC=y
@@ -386,12 +411,14 @@
 CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_EUD=y
 CONFIG_MSM_CORE_HANG_DETECT=y
 CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
 CONFIG_MSM_EVENT_TIMER=y
@@ -427,12 +454,14 @@
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HARDENED_USERCOPY_PAGESPAN=y
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
@@ -457,15 +486,17 @@
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_WQ_WATCHDOG=y
-CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_TIMEOUT=-1
 CONFIG_SCHEDSTATS=y
 CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_TORTURE_TEST=m
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_RCU_TORTURE_TEST=m
 CONFIG_FAULT_INJECTION=y
 CONFIG_FAIL_PAGE_ALLOC=y
 CONFIG_FAULT_INJECTION_DEBUG_FS=y
@@ -477,9 +508,14 @@
 CONFIG_IRQSOFF_TRACER=y
 CONFIG_PREEMPT_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_TEST_USER_COPY=m
 CONFIG_MEMTEST=y
 CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
 CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
 CONFIG_CORESIGHT_SOURCE_ETM4X=y
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index bb139e0..2870259 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -390,27 +390,33 @@
  * 	size:		size of the region
  * 	Corrupts:	kaddr, size, tmp1, tmp2
  */
+	.macro __dcache_op_workaround_clean_cache, op, kaddr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+	dc	\op, \kaddr
+alternative_else
+	dc	civac, \kaddr
+alternative_endif
+	.endm
+
 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
 	dcache_line_size \tmp1, \tmp2
 	add	\size, \kaddr, \size
 	sub	\tmp2, \tmp1, #1
 	bic	\kaddr, \kaddr, \tmp2
 9998:
-	.if	(\op == cvau || \op == cvac)
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
-	dc	\op, \kaddr
-alternative_else
-	dc	civac, \kaddr
-alternative_endif
-	.elseif	(\op == cvap)
-alternative_if ARM64_HAS_DCPOP
-	sys 3, c7, c12, 1, \kaddr	// dc cvap
-alternative_else
-	dc	cvac, \kaddr
-alternative_endif
+	.ifc	\op, cvau
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvac
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvap
+	sys	3, c7, c12, 1, \kaddr	// dc cvap
 	.else
 	dc	\op, \kaddr
 	.endif
+	.endif
+	.endif
 	add	\kaddr, \kaddr, \tmp1
 	cmp	\kaddr, \size
 	b.lo	9998b
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 48d6061..3b21bcd 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -138,7 +138,23 @@
 #define __raw_readq(a)		__raw_read_logged((a), q, u64)
 
 /* IO barriers */
-#define __iormb()		rmb()
+#define __iormb(v)							\
+({									\
+	unsigned long tmp;						\
+									\
+	rmb();								\
+									\
+	/*								\
+	 * Create a dummy control dependency from the IO read to any	\
+	 * later instructions. This ensures that a subsequent call to	\
+	 * udelay() will be ordered due to the ISB in get_cycles().	\
+	 */								\
+	asm volatile("eor	%0, %1, %1\n"				\
+		     "cbnz	%0, ."					\
+		     : "=r" (tmp) : "r" ((unsigned long)(v))		\
+		     : "memory");					\
+})
+
 #define __iowmb()		wmb()
 
 #define mmiowb()		do { } while (0)
@@ -179,10 +195,10 @@
  * following Normal memory access. Writes are ordered relative to any prior
  * Normal memory access.
  */
-#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
-#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
-#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
-#define readq(c)		({ u64 __v = readq_relaxed(c); __iormb(); __v; })
+#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(__v); __v; })
+#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
+#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
+#define readq(c)		({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
 
 #define writeb(v,c)		({ __iowmb(); writeb_relaxed((v),(c)); })
 #define writew(v,c)		({ __iowmb(); writew_relaxed((v),(c)); })
@@ -190,13 +206,13 @@
 #define writeq(v,c)		({ __iowmb(); writeq_relaxed((v),(c)); })
 
 #define readb_no_log(c) \
-		({ u8  __v = readb_relaxed_no_log(c); __iormb(); __v; })
+		({ u8  __v = readb_relaxed_no_log(c); __iormb(__v); __v; })
 #define readw_no_log(c) \
-		({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; })
+		({ u16 __v = readw_relaxed_no_log(c); __iormb(__v); __v; })
 #define readl_no_log(c) \
-		({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; })
+		({ u32 __v = readl_relaxed_no_log(c); __iormb(__v); __v; })
 #define readq_no_log(c) \
-		({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; })
+		({ u64 __v = readq_relaxed_no_log(c); __iormb(__v); __v; })
 
 #define writeb_no_log(v, c) \
 		({ __iowmb(); writeb_relaxed_no_log((v), (c)); })
@@ -251,9 +267,9 @@
 /*
  * io{read,write}{16,32,64}be() macros
  */
-#define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw_no_log(p)); __iormb(); __v; })
-#define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl_no_log(p)); __iormb(); __v; })
-#define ioread64be(p)		({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq_no_log(p)); __iormb(); __v; })
+#define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw_no_log(p)); __iormb(__v); __v; })
+#define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl_no_log(p)); __iormb(__v); __v; })
+#define ioread64be(p)		({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq_no_log(p)); __iormb(__v); __v; })
 
 #define iowrite16be(v,p)	({ __iowmb(); __raw_writew_no_log((__force __u16)cpu_to_be16(v), p); })
 #define iowrite32be(v,p)	({ __iowmb(); __raw_writel_no_log((__force __u32)cpu_to_be32(v), p); })
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index aa45df7..8b284cb 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -24,6 +24,8 @@
 
 /* Hyp Configuration Register (HCR) bits */
 #define HCR_FWB		(UL(1) << 46)
+#define HCR_API		(UL(1) << 41)
+#define HCR_APK		(UL(1) << 40)
 #define HCR_TEA		(UL(1) << 37)
 #define HCR_TERR	(UL(1) << 36)
 #define HCR_TLOR	(UL(1) << 35)
@@ -87,6 +89,7 @@
 			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
 			 HCR_FMO | HCR_IMO)
 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 
 /* TCR_EL2 Registers bits */
@@ -104,7 +107,7 @@
 			 TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
 
 /* VTCR_EL2 Registers bits */
-#define VTCR_EL2_RES1		(1 << 31)
+#define VTCR_EL2_RES1		(1U << 31)
 #define VTCR_EL2_HD		(1 << 22)
 #define VTCR_EL2_HA		(1 << 21)
 #define VTCR_EL2_PS_MASK	TCR_EL2_PS_MASK
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3445e15..4189d86 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -76,12 +76,17 @@
 /*
  * KASAN requires 1/8th of the kernel virtual address space for the shadow
  * region. KASAN can bloat the stack significantly, so double the (minimum)
- * stack size when KASAN is in use.
+ * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
+ * on.
  */
 #ifdef CONFIG_KASAN
 #define KASAN_SHADOW_SCALE_SHIFT 3
 #define KASAN_SHADOW_SIZE	(UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_THREAD_SHIFT	2
+#else
 #define KASAN_THREAD_SHIFT	1
+#endif /* CONFIG_KASAN_EXTRA */
 #else
 #define KASAN_SHADOW_SIZE	(0)
 #define KASAN_THREAD_SHIFT	0
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e0d0f5b..d520518 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -40,8 +40,9 @@
  * The following SVCs are ARM private.
  */
 #define __ARM_NR_COMPAT_BASE		0x0f0000
-#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
-#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
+#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE + 2)
+#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE + 5)
+#define __ARM_NR_COMPAT_END		(__ARM_NR_COMPAT_BASE + 0x800)
 
 #define __NR_compat_syscalls		399
 #endif
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 98c4ce5..ad64d2c 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -130,7 +130,7 @@
 
 /* Offset from the start of struct user_sve_header to the register data */
 #define SVE_PT_REGS_OFFSET					\
-	((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1))	\
+	((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1))	\
 		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
 
 /*
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 1175f58..295951f 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -79,7 +79,6 @@
 	.macro mcount_get_lr reg
 	ldr	\reg, [x29]
 	ldr	\reg, [\reg, #8]
-	mcount_adjust_addr	\reg, \reg
 	.endm
 
 	.macro mcount_get_lr_addr reg
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b085306..651a06b 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -494,10 +494,9 @@
 #endif
 
 	/* Hyp configuration. */
-	mov	x0, #HCR_RW			// 64-bit EL1
+	mov_q	x0, HCR_HOST_NVHE_FLAGS
 	cbz	x2, set_hcr
-	orr	x0, x0, #HCR_TGE		// Enable Host Extensions
-	orr	x0, x0, #HCR_E2H
+	mov_q	x0, HCR_HOST_VHE_FLAGS
 set_hcr:
 	msr	hcr_el2, x0
 	isb
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 29cdc99..9859e11 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -299,8 +299,10 @@
 		dcache_clean_range(__idmap_text_start, __idmap_text_end);
 
 		/* Clean kvm setup code to PoC? */
-		if (el2_reset_needed())
+		if (el2_reset_needed()) {
 			dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
+			dcache_clean_range(__hyp_text_start, __hyp_text_end);
+		}
 
 		/* make the crash dump kernel image protected again */
 		crash_post_resume();
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index e1261fb..17f325b 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -28,6 +28,8 @@
 #include <asm/virt.h>
 
 	.text
+	.pushsection	.hyp.text, "ax"
+
 	.align 11
 
 ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index a820ed0..8da289d 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -76,16 +76,6 @@
 __efistub_stext_offset = stext - _text;
 
 /*
- * Prevent the symbol aliases below from being emitted into the kallsyms
- * table, by forcing them to be absolute symbols (which are conveniently
- * ignored by scripts/kallsyms) rather than section relative symbols.
- * The distinction is only relevant for partial linking, and only for symbols
- * that are defined within a section declaration (which is not the case for
- * the definitions below) so the resulting values will be identical.
- */
-#define KALLSYMS_HIDE(sym)	ABSOLUTE(sym)
-
-/*
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
  * isolate it from the kernel proper. The following symbols are legally
  * accessed by the stub, so provide some aliases to make them accessible.
@@ -94,28 +84,28 @@
  * linked at. The routines below are all implemented in assembler in a
  * position independent manner
  */
-__efistub_memcmp		= KALLSYMS_HIDE(__pi_memcmp);
-__efistub_memchr		= KALLSYMS_HIDE(__pi_memchr);
-__efistub_memcpy		= KALLSYMS_HIDE(__pi_memcpy);
-__efistub_memmove		= KALLSYMS_HIDE(__pi_memmove);
-__efistub_memset		= KALLSYMS_HIDE(__pi_memset);
-__efistub_strlen		= KALLSYMS_HIDE(__pi_strlen);
-__efistub_strnlen		= KALLSYMS_HIDE(__pi_strnlen);
-__efistub_strcmp		= KALLSYMS_HIDE(__pi_strcmp);
-__efistub_strncmp		= KALLSYMS_HIDE(__pi_strncmp);
-__efistub_strrchr		= KALLSYMS_HIDE(__pi_strrchr);
-__efistub___flush_dcache_area	= KALLSYMS_HIDE(__pi___flush_dcache_area);
+__efistub_memcmp		= __pi_memcmp;
+__efistub_memchr		= __pi_memchr;
+__efistub_memcpy		= __pi_memcpy;
+__efistub_memmove		= __pi_memmove;
+__efistub_memset		= __pi_memset;
+__efistub_strlen		= __pi_strlen;
+__efistub_strnlen		= __pi_strnlen;
+__efistub_strcmp		= __pi_strcmp;
+__efistub_strncmp		= __pi_strncmp;
+__efistub_strrchr		= __pi_strrchr;
+__efistub___flush_dcache_area	= __pi___flush_dcache_area;
 
 #ifdef CONFIG_KASAN
-__efistub___memcpy		= KALLSYMS_HIDE(__pi_memcpy);
-__efistub___memmove		= KALLSYMS_HIDE(__pi_memmove);
-__efistub___memset		= KALLSYMS_HIDE(__pi_memset);
+__efistub___memcpy		= __pi_memcpy;
+__efistub___memmove		= __pi_memmove;
+__efistub___memset		= __pi_memset;
 #endif
 
-__efistub__text			= KALLSYMS_HIDE(_text);
-__efistub__end			= KALLSYMS_HIDE(_end);
-__efistub__edata		= KALLSYMS_HIDE(_edata);
-__efistub_screen_info		= KALLSYMS_HIDE(screen_info);
+__efistub__text			= _text;
+__efistub__end			= _end;
+__efistub__edata		= _edata;
+__efistub_screen_info		= screen_info;
 
 #endif
 
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index f0e6ab8..b09b6f7 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/types.h>
 
+#include <asm/cacheflush.h>
 #include <asm/fixmap.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
@@ -43,7 +44,7 @@
 	return ret;
 }
 
-static __init const u8 *get_cmdline(void *fdt)
+static __init const u8 *kaslr_get_cmdline(void *fdt)
 {
 	static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
 
@@ -87,6 +88,7 @@
 	 * we end up running with module randomization disabled.
 	 */
 	module_alloc_base = (u64)_etext - MODULES_VSIZE;
+	__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
 
 	/*
 	 * Try to map the FDT early. If this fails, we simply bail,
@@ -109,7 +111,7 @@
 	 * Check if 'nokaslr' appears on the command line, and
 	 * return 0 if that is the case.
 	 */
-	cmdline = get_cmdline(fdt);
+	cmdline = kaslr_get_cmdline(fdt);
 	str = strstr(cmdline, "nokaslr");
 	if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
 		return 0;
@@ -169,5 +171,8 @@
 	module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
 	module_alloc_base &= PAGE_MASK;
 
+	__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+	__flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+
 	return offset;
 }
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index d89eb53..c1bb288 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -30,7 +30,7 @@
 #include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
-static DEFINE_PER_CPU(bool, is_hotplugging);
+static DEFINE_PER_CPU(bool, perf_event_is_hotplugging);
 
 /*
  * ARMv8 PMUv3 Performance Events handling code.
@@ -1148,7 +1148,7 @@
 	if (!cpu_pmu)
 		return;
 
-	if (__this_cpu_read(is_hotplugging))
+	if (__this_cpu_read(perf_event_is_hotplugging))
 		return;
 
 	hw_events = this_cpu_ptr(cpu_pmu->hw_events);
@@ -1385,13 +1385,13 @@
 #ifdef CONFIG_HOTPLUG_CPU
 static int perf_event_hotplug_coming_up(unsigned int cpu)
 {
-	per_cpu(is_hotplugging, cpu) = false;
+	per_cpu(perf_event_is_hotplugging, cpu) = false;
 	return 0;
 }
 
 static int perf_event_hotplug_going_down(unsigned int cpu)
 {
-	per_cpu(is_hotplugging, cpu) = true;
+	per_cpu(perf_event_is_hotplugging, cpu) = true;
 	return 0;
 }
 
@@ -1428,7 +1428,7 @@
 	int ret, cpu;
 
 	for_each_possible_cpu(cpu)
-		per_cpu(is_hotplugging, cpu) = false;
+		per_cpu(perf_event_is_hotplugging, cpu) = false;
 
 	ret = perf_event_cpu_hp_init();
 	if (ret)
@@ -1447,6 +1447,7 @@
 	.driver		= {
 		.name	= ARMV8_PMU_PDEV_NAME,
 		.of_match_table = armv8_pmu_of_device_ids,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= armv8_pmu_device_probe,
 };
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index b5a367d..30bb137 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -478,13 +478,13 @@
 	    addr < (unsigned long)__entry_text_end) ||
 	    (addr >= (unsigned long)__idmap_text_start &&
 	    addr < (unsigned long)__idmap_text_end) ||
+	    (addr >= (unsigned long)__hyp_text_start &&
+	    addr < (unsigned long)__hyp_text_end) ||
 	    !!search_exception_tables(addr))
 		return true;
 
 	if (!is_kernel_in_hyp_mode()) {
-		if ((addr >= (unsigned long)__hyp_text_start &&
-		    addr < (unsigned long)__hyp_text_end) ||
-		    (addr >= (unsigned long)__hyp_idmap_text_start &&
+		if ((addr >= (unsigned long)__hyp_idmap_text_start &&
 		    addr < (unsigned long)__hyp_idmap_text_end))
 			return true;
 	}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index be938ea..dd1e817 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -250,12 +250,12 @@
 			u32	data;
 
 			if (probe_kernel_address(p, data))
-				printk(KERN_DEBUG " ********");
+				pr_cont(" ********");
 			else
-				printk(KERN_DEBUG " %08x", data);
+				pr_cont(" %08x", data);
 			++p;
 		}
-		printk(KERN_DEBUG "\n");
+		pr_cont("\n");
 	}
 }
 
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index a610982..010212d 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -66,12 +66,11 @@
 /*
  * Handle all unrecognised system calls.
  */
-long compat_arm_syscall(struct pt_regs *regs)
+long compat_arm_syscall(struct pt_regs *regs, int scno)
 {
 	siginfo_t info;
-	unsigned int no = regs->regs[7];
 
-	switch (no) {
+	switch (scno) {
 	/*
 	 * Flush a region from virtual address 'r0' to virtual address 'r1'
 	 * _exclusive_.  There is no alignment requirement on either address;
@@ -102,12 +101,12 @@
 
 	default:
 		/*
-		 * Calls 9f00xx..9f07ff are defined to return -ENOSYS
+		 * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
 		 * if not implemented, rather than raising SIGILL. This
 		 * way the calling program can gracefully determine whether
 		 * a feature is supported.
 		 */
-		if ((no & 0xffff) <= 0x7ff)
+		if (scno < __ARM_NR_COMPAT_END)
 			return -ENOSYS;
 		break;
 	}
@@ -119,6 +118,6 @@
 	info.si_addr  = (void __user *)instruction_pointer(regs) -
 			 (compat_thumb_mode(regs) ? 2 : 4);
 
-	arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no);
+	arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, scno);
 	return 0;
 }
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 032d223..5610ac0 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -13,16 +13,15 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 
-long compat_arm_syscall(struct pt_regs *regs);
-
+long compat_arm_syscall(struct pt_regs *regs, int scno);
 long sys_ni_syscall(void);
 
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
+static long do_ni_syscall(struct pt_regs *regs, int scno)
 {
 #ifdef CONFIG_COMPAT
 	long ret;
 	if (is_compat_task()) {
-		ret = compat_arm_syscall(regs);
+		ret = compat_arm_syscall(regs, scno);
 		if (ret != -ENOSYS)
 			return ret;
 	}
@@ -47,7 +46,7 @@
 		syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
 		ret = __invoke_syscall(regs, syscall_fn);
 	} else {
-		ret = do_ni_syscall(regs);
+		ret = do_ni_syscall(regs, scno);
 	}
 
 	regs->regs[0] = ret;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 605d1b6..74e469f 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -99,7 +99,8 @@
 		*(.discard)
 		*(.discard.*)
 		*(.interp .dynamic)
-		*(.dynsym .dynstr .hash)
+		*(.dynsym .dynstr .hash .gnu.hash)
+		*(.eh_frame)
 	}
 
 	. = KIMAGE_VADDR + TEXT_OFFSET;
@@ -176,12 +177,12 @@
 
 	PERCPU_SECTION(L1_CACHE_BYTES)
 
-	.rela : ALIGN(8) {
+	.rela.dyn : ALIGN(8) {
 		*(.rela .rela*)
 	}
 
-	__rela_offset	= ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
-	__rela_size	= SIZEOF(.rela);
+	__rela_offset	= ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
+	__rela_size	= SIZEOF(.rela.dyn);
 
 	. = ALIGN(SEGMENT_ALIGN);
 	__initdata_end = .;
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ca46153..a1c32c1 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -157,7 +157,7 @@
 	mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
 
 	write_sysreg(mdcr_el2, mdcr_el2);
-	write_sysreg(HCR_RW, hcr_el2);
+	write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
 	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 }
 
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 131c777..c041eab 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -15,14 +15,19 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/irqflags.h>
+
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 #include <asm/tlbflush.h>
 
-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
+						 unsigned long *flags)
 {
 	u64 val;
 
+	local_irq_save(*flags);
+
 	/*
 	 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
 	 * most TLB operations target EL2/EL0. In order to affect the
@@ -37,7 +42,8 @@
 	isb();
 }
 
-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
+						  unsigned long *flags)
 {
 	write_sysreg(kvm->arch.vttbr, vttbr_el2);
 	isb();
@@ -48,7 +54,8 @@
 			    __tlb_switch_to_guest_vhe,
 			    ARM64_HAS_VIRT_HOST_EXTN);
 
-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
+						unsigned long flags)
 {
 	/*
 	 * We're done with the TLB operation, let's restore the host's
@@ -56,9 +63,12 @@
 	 */
 	write_sysreg(0, vttbr_el2);
 	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+	isb();
+	local_irq_restore(flags);
 }
 
-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
+						 unsigned long flags)
 {
 	write_sysreg(0, vttbr_el2);
 }
@@ -70,11 +80,13 @@
 
 void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
+	unsigned long flags;
+
 	dsb(ishst);
 
 	/* Switch to requested VMID */
 	kvm = kern_hyp_va(kvm);
-	__tlb_switch_to_guest()(kvm);
+	__tlb_switch_to_guest()(kvm, &flags);
 
 	/*
 	 * We could do so much better if we had the VA as well.
@@ -117,36 +129,39 @@
 	if (!has_vhe() && icache_is_vpipt())
 		__flush_icache_all();
 
-	__tlb_switch_to_host()(kvm);
+	__tlb_switch_to_host()(kvm, flags);
 }
 
 void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 {
+	unsigned long flags;
+
 	dsb(ishst);
 
 	/* Switch to requested VMID */
 	kvm = kern_hyp_va(kvm);
-	__tlb_switch_to_guest()(kvm);
+	__tlb_switch_to_guest()(kvm, &flags);
 
 	__tlbi(vmalls12e1is);
 	dsb(ish);
 	isb();
 
-	__tlb_switch_to_host()(kvm);
+	__tlb_switch_to_host()(kvm, flags);
 }
 
 void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+	unsigned long flags;
 
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest()(kvm);
+	__tlb_switch_to_guest()(kvm, &flags);
 
 	__tlbi(vmalle1);
 	dsb(nsh);
 	isb();
 
-	__tlb_switch_to_host()(kvm);
+	__tlb_switch_to_host()(kvm, flags);
 }
 
 void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 0369e84..000fb44 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -285,6 +285,9 @@
  *	- size    - size in question
  */
 ENTRY(__clean_dcache_area_pop)
+	alternative_if_not ARM64_HAS_DCPOP
+	b	__clean_dcache_area_poc
+	alternative_else_nop_endif
 	dcache_by_line_op cvap, sy, x0, x1, x2, x3
 	ret
 ENDPIPROC(__clean_dcache_area_pop)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 6aa21ed..09577b5 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1114,6 +1114,11 @@
 			naddr + nsize);
 		return;
 	}
+	if (naddr == 0 || nsize == 0) {
+		dev_err(dev, "Invalid #address-cells %d or #size-cells %d\n",
+			naddr, nsize);
+		return;
+	}
 
 	*dma_addr = of_read_number(ranges, naddr);
 	*dma_size = of_read_number(ranges + naddr, nsize);
@@ -1238,6 +1243,11 @@
 	struct iommu_domain *domain;
 	struct iommu_group *group = dev->iommu_group;
 
+	if (!dev || !mapping) {
+		pr_err("%s: Error input is NULL\n", __func__);
+		return -EINVAL;
+	}
+
 	if (!group) {
 		dev_err(dev, "No iommu associated with device\n");
 		return -EINVAL;
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index cc305b3..fb10aa0 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -33,7 +33,11 @@
 		__clean_dcache_area_pou(kaddr, len);
 		__flush_icache_all();
 	} else {
-		flush_icache_range(addr, addr + len);
+		/*
+		 * Don't issue kick_all_cpus_sync() after I-cache invalidation
+		 * for user mappings.
+		 */
+		__flush_icache_range(addr, addr + len);
 	}
 }
 
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3bc6139..55d16c5 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -785,7 +785,8 @@
 __initcall(register_mem_limit_dumper);
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
+		bool want_memblock)
 {
 	pg_data_t *pgdat;
 	unsigned long start_pfn = start >> PAGE_SHIFT;
@@ -833,7 +834,7 @@
 
 	pgdat = NODE_DATA(nid);
 
-	ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
+	ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
 
 	/*
 	 * Make the pages usable after they have been added.
@@ -874,7 +875,7 @@
 
 }
 
-int arch_remove_memory(u64 start, u64 size)
+int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 {
 	unsigned long start_pfn = start >> PAGE_SHIFT;
 	unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -883,7 +884,7 @@
 	int ret = 0;
 
 	zone = page_zone(page);
-	ret = __remove_pages(zone, start_pfn, nr_pages);
+	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
 	WARN_ON_ONCE(ret);
 
 	kernel_physical_mapping_remove(start, start + size);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ca1feee..fdf213a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1165,7 +1165,7 @@
 			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 			if (!p) {
 #ifdef CONFIG_MEMORY_HOTPLUG
-				vmemmap_free(start, end);
+				vmemmap_free(start, end, altmap);
 #endif
 				ret = -ENOMEM;
 				break;
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 6181e41..fe3ddd7 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -55,12 +55,12 @@
  */
 #ifdef CONFIG_SUN3
 #define PTRS_PER_PTE   16
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   2048
 #elif defined(CONFIG_COLDFIRE)
 #define PTRS_PER_PTE	512
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define PTRS_PER_PMD	1
 #define PTRS_PER_PGD	1024
 #else
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 5d3596c..de44899 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -165,8 +165,6 @@
 					be32_to_cpu(m->addr);
 				m68k_memory[m68k_num_memory].size =
 					be32_to_cpu(m->size);
-				memblock_add(m68k_memory[m68k_num_memory].addr,
-					     m68k_memory[m68k_num_memory].size);
 				m68k_num_memory++;
 			} else
 				pr_warn("%s: too many memory chunks\n",
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 4e17ecb..2eb2b31 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -228,6 +228,7 @@
 
 	min_addr = m68k_memory[0].addr;
 	max_addr = min_addr + m68k_memory[0].size;
+	memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
 	for (i = 1; i < m68k_num_memory;) {
 		if (m68k_memory[i].addr < min_addr) {
 			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
@@ -238,6 +239,7 @@
 				(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
 			continue;
 		}
+		memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
 		addr = m68k_memory[i].addr + m68k_memory[i].size;
 		if (addr > max_addr)
 			max_addr = addr;
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 7b650ab..2ca5985 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -63,7 +63,7 @@
 
 #include <asm-generic/4level-fixup.h>
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3551199..201caf2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -794,6 +794,7 @@
 	select SYS_SUPPORTS_HIGHMEM
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select ZONE_DMA32 if 64BIT
+	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 config SIBYTE_LITTLESUR
 	bool "Sibyte BCM91250C2-LittleSur"
@@ -814,6 +815,7 @@
 	select SYS_HAS_CPU_SB1
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 config SIBYTE_BIGSUR
 	bool "Sibyte BCM91480B-BigSur"
@@ -826,6 +828,7 @@
 	select SYS_SUPPORTS_HIGHMEM
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select ZONE_DMA32 if 64BIT
+	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 config SNI_RM
 	bool "SNI RM200/300/400"
@@ -3149,6 +3152,7 @@
 config MIPS32_N32
 	bool "Kernel support for n32 binaries"
 	depends on 64BIT
+	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
 	select COMPAT
 	select MIPS32_COMPAT
 	select SYSVIPC_COMPAT if SYSVIPC
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 6054d49..fe37735 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -173,6 +173,31 @@
 	pm_power_off = bcm47xx_machine_halt;
 }
 
+#ifdef CONFIG_BCM47XX_BCMA
+static struct device * __init bcm47xx_setup_device(void)
+{
+	struct device *dev;
+	int err;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	err = dev_set_name(dev, "bcm47xx_soc");
+	if (err) {
+		pr_err("Failed to set SoC device name: %d\n", err);
+		kfree(dev);
+		return NULL;
+	}
+
+	err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (err)
+		pr_err("Failed to set SoC DMA mask: %d\n", err);
+
+	return dev;
+}
+#endif
+
 /*
  * This finishes bus initialization doing things that were not possible without
  * kmalloc. Make sure to call it late enough (after mm_init).
@@ -183,6 +208,10 @@
 	if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
 		int err;
 
+		bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
+		if (!bcm47xx_bus.bcma.dev)
+			panic("Failed to setup SoC device\n");
+
 		err = bcma_host_soc_init(&bcm47xx_bus.bcma);
 		if (err)
 			panic("Failed to initialize BCMA bus (err %d)", err);
@@ -235,6 +264,8 @@
 #endif
 #ifdef CONFIG_BCM47XX_BCMA
 	case BCM47XX_BUS_TYPE_BCMA:
+		if (device_register(bcm47xx_bus.bcma.dev))
+			pr_err("Failed to register SoC device\n");
 		bcma_bus_register(&bcm47xx_bus.bcma.bus);
 		break;
 #endif
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index 07b4c65..8e73d65 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -70,6 +70,8 @@
 
 static int shared_device_registered;
 
+static u64 enet_dmamask = DMA_BIT_MASK(32);
+
 static struct resource enet0_res[] = {
 	{
 		.start		= -1, /* filled at runtime */
@@ -99,6 +101,8 @@
 	.resource	= enet0_res,
 	.dev		= {
 		.platform_data = &enet0_pd,
+		.dma_mask = &enet_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -131,6 +135,8 @@
 	.resource	= enet1_res,
 	.dev		= {
 		.platform_data = &enet1_pd,
+		.dma_mask = &enet_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -157,6 +163,8 @@
 	.resource	= enetsw_res,
 	.dev		= {
 		.platform_data = &enetsw_pd,
+		.dma_mask = &enet_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 37fe58c..542c3ed 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -13,6 +13,7 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include "../../../../include/linux/sizes.h"
 
 int main(int argc, char *argv[])
 {
@@ -45,11 +46,11 @@
 	vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
 
 	/*
-	 * Align with 16 bytes: "greater than that used for any standard data
-	 * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
+	 * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
+	 * which may be as large as 64KB depending on the kernel configuration.
 	 */
 
-	vmlinuz_load_addr += (16 - vmlinux_size % 16);
+	vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
 
 	printf("0x%llx\n", vmlinuz_load_addr);
 
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
index 65af3f6..84328af 100644
--- a/arch/mips/boot/dts/img/boston.dts
+++ b/arch/mips/boot/dts/img/boston.dts
@@ -141,6 +141,12 @@
 				#size-cells = <2>;
 				#interrupt-cells = <1>;
 
+				eg20t_phub@2,0,0 {
+					compatible = "pci8086,8801";
+					reg = <0x00020000 0 0 0 0>;
+					intel,eg20t-prefetch = <0>;
+				};
+
 				eg20t_mac@2,0,1 {
 					compatible = "pci8086,8802";
 					reg = <0x00020100 0 0 0 0>;
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 50cff3c..4f7b1fa 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -76,7 +76,7 @@
 	status = "okay";
 
 	pinctrl-names = "default";
-	pinctrl-0 = <&pins_uart2>;
+	pinctrl-0 = <&pins_uart3>;
 };
 
 &uart4 {
@@ -196,9 +196,9 @@
 		bias-disable;
 	};
 
-	pins_uart2: uart2 {
-		function = "uart2";
-		groups = "uart2-data", "uart2-hwflow";
+	pins_uart3: uart3 {
+		function = "uart3";
+		groups = "uart3-data", "uart3-hwflow";
 		bias-disable;
 	};
 
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 6c79e8a..3ddbb98 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -286,7 +286,8 @@
 	case 3:
 		return CVMX_HELPER_INTERFACE_MODE_LOOP;
 	case 4:
-		return CVMX_HELPER_INTERFACE_MODE_RGMII;
+		/* TODO: Implement support for AGL (RGMII). */
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
 	default:
 		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
 	}
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 951c423..4c47b3f 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -71,6 +71,7 @@
 # CONFIG_SERIAL_8250_PCI is not set
 CONFIG_SERIAL_8250_NR_UARTS=1
 CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_AR933X=y
 CONFIG_SERIAL_AR933X_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index d4ea7a5..9e80531 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -306,7 +306,7 @@
 {									      \
 	long result;							      \
 									      \
-	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
+	if (kernel_uses_llsc) {						      \
 		long temp;						      \
 									      \
 		__asm__ __volatile__(					      \
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index a41059d..ed7ffe4 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -50,7 +50,7 @@
 #define MIPS_CACHE_PINDEX	0x00000020	/* Physically indexed cache */
 
 struct cpuinfo_mips {
-	unsigned long		asid_cache;
+	u64			asid_cache;
 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
 	unsigned long		asid_mask;
 #endif
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
index e9cc62c..ff50aeb 100644
--- a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
+++ b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
@@ -4,8 +4,6 @@
 
 struct jz4740_mmc_platform_data {
 	int gpio_power;
-	int gpio_card_detect;
-	int gpio_read_only;
 	unsigned card_detect_active_low:1;
 	unsigned read_only_active_low:1;
 	unsigned power_active_low:1;
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index c9f7e23..59c8b11 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -21,6 +21,7 @@
 #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL
 
 #define pa_to_nid(addr)  (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
+#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
 
 #define LEVELS_PER_SLICE 128
 
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 0740be7..24d6b42 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -7,7 +7,7 @@
 #include <linux/wait.h>
 
 typedef struct {
-	unsigned long asid[NR_CPUS];
+	u64 asid[NR_CPUS];
 	void *vdso;
 	atomic_t fp_mode_switching;
 
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 9441456..a589585 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -76,14 +76,14 @@
  *  All unused by hardware upper bits will be considered
  *  as a software asid extension.
  */
-static unsigned long asid_version_mask(unsigned int cpu)
+static inline u64 asid_version_mask(unsigned int cpu)
 {
 	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
 
-	return ~(asid_mask | (asid_mask - 1));
+	return ~(u64)(asid_mask | (asid_mask - 1));
 }
 
-static unsigned long asid_first_version(unsigned int cpu)
+static inline u64 asid_first_version(unsigned int cpu)
 {
 	return ~asid_version_mask(cpu) + 1;
 }
@@ -102,14 +102,12 @@
 static inline void
 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 {
-	unsigned long asid = asid_cache(cpu);
+	u64 asid = asid_cache(cpu);
 
 	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
 		if (cpu_has_vtag_icache)
 			flush_icache_all();
 		local_flush_tlb_all();	/* start new asid cycle */
-		if (!asid)		/* fix version if needed */
-			asid = asid_first_version(cpu);
 	}
 
 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index f085fba..b826b84 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -7,7 +7,18 @@
 #define _ASM_MMZONE_H_
 
 #include <asm/page.h>
-#include <mmzone.h>
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+# include <mmzone.h>
+#endif
+
+#ifndef pa_to_nid
+#define pa_to_nid(addr) 0
+#endif
+
+#ifndef nid_to_addrbase
+#define nid_to_addrbase(nid) 0
+#endif
 
 #ifdef CONFIG_DISCONTIGMEM
 
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 0036ea0..93a9dce 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -265,6 +265,11 @@
 
 static inline int pmd_present(pmd_t pmd)
 {
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+		return pmd_val(pmd) & _PAGE_PRESENT;
+#endif
+
 	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
 }
 
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 7f12d7e..e519012 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -20,6 +20,7 @@
 #include <asm/cpu-features.h>
 #include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
+#include <asm/mmzone.h>
 #include <linux/uaccess.h> /* for uaccess_kernel() */
 
 extern void (*r4k_blast_dcache)(void);
@@ -747,4 +748,25 @@
 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
 
+/* Currently, this is very specific to Loongson-3 */
+#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
+static inline void blast_##pfx##cache##lsize##_node(long node)		\
+{									\
+	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
+	unsigned long end = start + current_cpu_data.desc.waysize;	\
+	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
+	unsigned long ws_end = current_cpu_data.desc.ways <<		\
+			       current_cpu_data.desc.waybit;		\
+	unsigned long ws, addr;						\
+									\
+	for (ws = 0; ws < ws_end; ws += ws_inc)				\
+		for (addr = start; addr < end; addr += lsize * 32)	\
+			cache##lsize##_unroll32(addr|ws, indexop);	\
+}
+
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
+
 #endif /* _ASM_R4KCACHE_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index c05dcf5..273ef58 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -369,8 +369,8 @@
 	mm_ext_op = 0x02c,
 	mm_pool32axf_op = 0x03c,
 	mm_srl32_op = 0x040,
+	mm_srlv32_op = 0x050,
 	mm_sra_op = 0x080,
-	mm_srlv32_op = 0x090,
 	mm_rotr_op = 0x0c0,
 	mm_lwxs_op = 0x118,
 	mm_addu32_op = 0x150,
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index d31bc2f..fb2b6d0 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -74,14 +74,15 @@
 						    get_order(VDMA_PGTBL_SIZE));
 	BUG_ON(!pgtbl);
 	dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
-	pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
+	pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
 
 	/*
 	 * Clear the R4030 translation table
 	 */
 	vdma_pgtbl_init();
 
-	r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
+	r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
+			  CPHYSADDR((unsigned long)pgtbl));
 	r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
 	r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
 
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index af0c8ac..705593d 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -43,7 +43,6 @@
 #include "clock.h"
 
 /* GPIOs */
-#define QI_LB60_GPIO_SD_CD		JZ_GPIO_PORTD(0)
 #define QI_LB60_GPIO_SD_VCC_EN_N	JZ_GPIO_PORTD(2)
 
 #define QI_LB60_GPIO_KEYOUT(x)		(JZ_GPIO_PORTC(10) + (x))
@@ -386,12 +385,18 @@
 };
 
 static struct jz4740_mmc_platform_data qi_lb60_mmc_pdata = {
-	.gpio_card_detect	= QI_LB60_GPIO_SD_CD,
-	.gpio_read_only		= -1,
 	.gpio_power		= QI_LB60_GPIO_SD_VCC_EN_N,
 	.power_active_low	= 1,
 };
 
+static struct gpiod_lookup_table qi_lb60_mmc_gpio_table = {
+	.dev_id = "jz4740-mmc.0",
+	.table = {
+		GPIO_LOOKUP("GPIOD", 0, "cd", GPIO_ACTIVE_HIGH),
+		{ },
+	},
+};
+
 /* beeper */
 static struct pwm_lookup qi_lb60_pwm_lookup[] = {
 	PWM_LOOKUP("jz4740-pwm", 4, "pwm-beeper", NULL, 0,
@@ -500,6 +505,7 @@
 	gpiod_add_lookup_table(&qi_lb60_audio_gpio_table);
 	gpiod_add_lookup_table(&qi_lb60_nand_gpio_table);
 	gpiod_add_lookup_table(&qi_lb60_spigpio_gpio_table);
+	gpiod_add_lookup_table(&qi_lb60_mmc_gpio_table);
 
 	spi_register_board_info(qi_lb60_spi_board_info,
 				ARRAY_SIZE(qi_lb60_spi_board_info));
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 0b9535b..6b2a4a9 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -54,10 +54,9 @@
 unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
 			      unsigned long new, unsigned int size)
 {
-	u32 mask, old32, new32, load32;
+	u32 mask, old32, new32, load32, load;
 	volatile u32 *ptr32;
 	unsigned int shift;
-	u8 load;
 
 	/* Check that ptr is naturally aligned */
 	WARN_ON((unsigned long)ptr & (size - 1));
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index ba150c75..85b6c60 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -52,6 +52,7 @@
 void __init init_IRQ(void)
 {
 	int i;
+	unsigned int order = get_order(IRQ_STACK_SIZE);
 
 	for (i = 0; i < NR_IRQS; i++)
 		irq_set_noprobe(i);
@@ -62,8 +63,7 @@
 	arch_init_irq();
 
 	for_each_possible_cpu(i) {
-		int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
-		void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
+		void *s = (void *)__get_free_pages(GFP_KERNEL, order);
 
 		irq_stack[i] = s;
 		pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 8f5bd04..7f3f136 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -457,5 +457,5 @@
 	}
 
 	/* reprime cause register */
-	write_gcr_error_cause(0);
+	write_gcr_error_cause(cm_error);
 }
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index d4f7fd4..85522c1 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -371,7 +371,7 @@
 static int get_frame_info(struct mips_frame_info *info)
 {
 	bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
-	union mips_instruction insn, *ip, *ip_end;
+	union mips_instruction insn, *ip;
 	const unsigned int max_insns = 128;
 	unsigned int last_insn_size = 0;
 	unsigned int i;
@@ -384,10 +384,9 @@
 	if (!ip)
 		goto err;
 
-	ip_end = (void *)ip + info->func_size;
-
-	for (i = 0; i < max_insns && ip < ip_end; i++) {
+	for (i = 0; i < max_insns; i++) {
 		ip = (void *)ip + last_insn_size;
+
 		if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
 			insn.word = ip->halfword[0] << 16;
 			last_insn_size = 2;
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 48a9c6b..9df3ebd 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -126,8 +126,8 @@
 
 	/* Map delay slot emulation page */
 	base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
-			   VM_READ|VM_WRITE|VM_EXEC|
-			   VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+			   VM_READ | VM_EXEC |
+			   VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
 			   0, NULL);
 	if (IS_ERR_VALUE(base)) {
 		ret = base;
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index f0bc331..c4ef1c3 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -224,9 +224,11 @@
 	.irq_set_type = ltq_eiu_settype,
 };
 
-static void ltq_hw_irqdispatch(int module)
+static void ltq_hw_irq_handler(struct irq_desc *desc)
 {
+	int module = irq_desc_get_irq(desc) - 2;
 	u32 irq;
+	int hwirq;
 
 	irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
 	if (irq == 0)
@@ -237,7 +239,8 @@
 	 * other bits might be bogus
 	 */
 	irq = __fls(irq);
-	do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
+	hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
+	generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
 
 	/* if this is a EBU irq, we need to ack it or get a deadlock */
 	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@@ -245,49 +248,6 @@
 			LTQ_EBU_PCC_ISTAT);
 }
 
-#define DEFINE_HWx_IRQDISPATCH(x)					\
-	static void ltq_hw ## x ## _irqdispatch(void)			\
-	{								\
-		ltq_hw_irqdispatch(x);					\
-	}
-DEFINE_HWx_IRQDISPATCH(0)
-DEFINE_HWx_IRQDISPATCH(1)
-DEFINE_HWx_IRQDISPATCH(2)
-DEFINE_HWx_IRQDISPATCH(3)
-DEFINE_HWx_IRQDISPATCH(4)
-
-#if MIPS_CPU_TIMER_IRQ == 7
-static void ltq_hw5_irqdispatch(void)
-{
-	do_IRQ(MIPS_CPU_TIMER_IRQ);
-}
-#else
-DEFINE_HWx_IRQDISPATCH(5)
-#endif
-
-static void ltq_hw_irq_handler(struct irq_desc *desc)
-{
-	ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
-	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
-	int irq;
-
-	if (!pending) {
-		spurious_interrupt();
-		return;
-	}
-
-	pending >>= CAUSEB_IP;
-	while (pending) {
-		irq = fls(pending) - 1;
-		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
-		pending &= ~BIT(irq);
-	}
-}
-
 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
 {
 	struct irq_chip *chip = &ltq_irq_type;
@@ -343,28 +303,10 @@
 	for (i = 0; i < MAX_IM; i++)
 		irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
 
-	if (cpu_has_vint) {
-		pr_info("Setting up vectored interrupts\n");
-		set_vi_handler(2, ltq_hw0_irqdispatch);
-		set_vi_handler(3, ltq_hw1_irqdispatch);
-		set_vi_handler(4, ltq_hw2_irqdispatch);
-		set_vi_handler(5, ltq_hw3_irqdispatch);
-		set_vi_handler(6, ltq_hw4_irqdispatch);
-		set_vi_handler(7, ltq_hw5_irqdispatch);
-	}
-
 	ltq_domain = irq_domain_add_linear(node,
 		(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
 		&irq_domain_ops, 0);
 
-#ifndef CONFIG_MIPS_MT_SMP
-	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
-		IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#else
-	set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
-		IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#endif
-
 	/* tell oprofile which irq to use */
 	ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
 
diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c
index a60715e..b26892c 100644
--- a/arch/mips/loongson64/common/reset.c
+++ b/arch/mips/loongson64/common/reset.c
@@ -59,7 +59,12 @@
 {
 #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
 	mach_prepare_shutdown();
-	unreachable();
+
+	/*
+	 * It needs a wait loop here, but mips/kernel/reset.c already calls
+	 * a generic delay loop, machine_hang(), so simply return.
+	 */
+	return;
 #else
 	void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
 
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 5450f4d..e2d46cb 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -214,8 +214,9 @@
 {
 	int isa16 = get_isa16_mode(regs->cp0_epc);
 	mips_instruction break_math;
-	struct emuframe __user *fr;
-	int err, fr_idx;
+	unsigned long fr_uaddr;
+	struct emuframe fr;
+	int fr_idx, ret;
 
 	/* NOP is easy */
 	if (ir == 0)
@@ -250,27 +251,31 @@
 		fr_idx = alloc_emuframe();
 	if (fr_idx == BD_EMUFRAME_NONE)
 		return SIGBUS;
-	fr = &dsemul_page()[fr_idx];
 
 	/* Retrieve the appropriately encoded break instruction */
 	break_math = BREAK_MATH(isa16);
 
 	/* Write the instructions to the frame */
 	if (isa16) {
-		err = __put_user(ir >> 16,
-				 (u16 __user *)(&fr->emul));
-		err |= __put_user(ir & 0xffff,
-				  (u16 __user *)((long)(&fr->emul) + 2));
-		err |= __put_user(break_math >> 16,
-				  (u16 __user *)(&fr->badinst));
-		err |= __put_user(break_math & 0xffff,
-				  (u16 __user *)((long)(&fr->badinst) + 2));
+		union mips_instruction _emul = {
+			.halfword = { ir >> 16, ir }
+		};
+		union mips_instruction _badinst = {
+			.halfword = { break_math >> 16, break_math }
+		};
+
+		fr.emul = _emul.word;
+		fr.badinst = _badinst.word;
 	} else {
-		err = __put_user(ir, &fr->emul);
-		err |= __put_user(break_math, &fr->badinst);
+		fr.emul = ir;
+		fr.badinst = break_math;
 	}
 
-	if (unlikely(err)) {
+	/* Write the frame to user memory */
+	fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
+	ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
+				FOLL_FORCE | FOLL_WRITE);
+	if (unlikely(ret != sizeof(fr))) {
 		MIPS_FPU_EMU_INC_STATS(errors);
 		free_emuframe(fr_idx, current->mm);
 		return SIGBUS;
@@ -282,10 +287,7 @@
 	atomic_set(&current->thread.bd_emu_frame, fr_idx);
 
 	/* Change user register context to execute the frame */
-	regs->cp0_epc = (unsigned long)&fr->emul | isa16;
-
-	/* Ensure the icache observes our newly written frame */
-	flush_cache_sigtramp((unsigned long)&fr->emul);
+	regs->cp0_epc = fr_uaddr | isa16;
 
 	return 0;
 }
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 3466fcd..01848cd 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -245,7 +245,7 @@
 	pmd_t *pmdp;
 	pte_t *ptep;
 
-	pr_debug("cpage[%08lx,%08lx]\n",
+	pr_debug("cpage[%08llx,%08lx]\n",
 		 cpu_context(smp_processor_id(), mm), addr);
 
 	/* No ASID => no such page in the cache.  */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index a9ef057..05a539d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -459,11 +459,28 @@
 		r4k_blast_scache = blast_scache128;
 }
 
+static void (*r4k_blast_scache_node)(long node);
+
+static void r4k_blast_scache_node_setup(void)
+{
+	unsigned long sc_lsize = cpu_scache_line_size();
+
+	if (current_cpu_type() != CPU_LOONGSON3)
+		r4k_blast_scache_node = (void *)cache_noop;
+	else if (sc_lsize == 16)
+		r4k_blast_scache_node = blast_scache16_node;
+	else if (sc_lsize == 32)
+		r4k_blast_scache_node = blast_scache32_node;
+	else if (sc_lsize == 64)
+		r4k_blast_scache_node = blast_scache64_node;
+	else if (sc_lsize == 128)
+		r4k_blast_scache_node = blast_scache128_node;
+}
+
 static inline void local_r4k___flush_cache_all(void * args)
 {
 	switch (current_cpu_type()) {
 	case CPU_LOONGSON2:
-	case CPU_LOONGSON3:
 	case CPU_R4000SC:
 	case CPU_R4000MC:
 	case CPU_R4400SC:
@@ -480,6 +497,11 @@
 		r4k_blast_scache();
 		break;
 
+	case CPU_LOONGSON3:
+		/* Use get_ebase_cpunum() for both NUMA=y/n */
+		r4k_blast_scache_node(get_ebase_cpunum() >> 2);
+		break;
+
 	case CPU_BMIPS5000:
 		r4k_blast_scache();
 		__sync();
@@ -840,10 +862,14 @@
 
 	preempt_disable();
 	if (cpu_has_inclusive_pcaches) {
-		if (size >= scache_size)
-			r4k_blast_scache();
-		else
+		if (size >= scache_size) {
+			if (current_cpu_type() != CPU_LOONGSON3)
+				r4k_blast_scache();
+			else
+				r4k_blast_scache_node(pa_to_nid(addr));
+		} else {
 			blast_scache_range(addr, addr + size);
+		}
 		preempt_enable();
 		__sync();
 		return;
@@ -877,9 +903,12 @@
 
 	preempt_disable();
 	if (cpu_has_inclusive_pcaches) {
-		if (size >= scache_size)
-			r4k_blast_scache();
-		else {
+		if (size >= scache_size) {
+			if (current_cpu_type() != CPU_LOONGSON3)
+				r4k_blast_scache();
+			else
+				r4k_blast_scache_node(pa_to_nid(addr));
+		} else {
 			/*
 			 * There is no clearly documented alignment requirement
 			 * for the cache instruction on MIPS processors and
@@ -1918,6 +1947,7 @@
 	r4k_blast_scache_page_setup();
 	r4k_blast_scache_page_indexed_setup();
 	r4k_blast_scache_setup();
+	r4k_blast_scache_node_setup();
 #ifdef CONFIG_EVA
 	r4k_blast_dcache_user_page_setup();
 	r4k_blast_icache_user_page_setup();
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index aeb7b1b..9bda82ed 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -343,12 +343,15 @@
 	const struct bpf_prog *prog = ctx->skf;
 	int stack_adjust = ctx->stack_size;
 	int store_offset = stack_adjust - 8;
+	enum reg_val_type td;
 	int r0 = MIPS_R_V0;
 
-	if (dest_reg == MIPS_R_RA &&
-	    get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+	if (dest_reg == MIPS_R_RA) {
 		/* Don't let zero extended value escape. */
-		emit_instr(ctx, sll, r0, r0, 0);
+		td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
+		if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+			emit_instr(ctx, sll, r0, r0, 0);
+	}
 
 	if (ctx->flags & EBPF_SAVE_RA) {
 		emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
@@ -1815,7 +1818,7 @@
 
 	/* Update the icache */
 	flush_icache_range((unsigned long)ctx.target,
-			   (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+			   (unsigned long)&ctx.target[ctx.idx]);
 
 	if (bpf_jit_enable > 1)
 		/* Dump JIT code */
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2a5bb84..288b58b 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -369,7 +369,9 @@
 	int irq;
 	struct irq_chip *msi;
 
-	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+		return 0;
+	} else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
 		msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
 		msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
 		msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 5017d58..fc29b85 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -568,6 +568,11 @@
 	if (octeon_has_feature(OCTEON_FEATURE_PCIE))
 		return 0;
 
+	if (!octeon_is_pci_host()) {
+		pr_notice("Not in host mode, PCI Controller not initialized\n");
+		return 0;
+	}
+
 	/* Point pcibios_map_irq() to the PCI version of it */
 	octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
 
@@ -579,11 +584,6 @@
 	else
 		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
 
-	if (!octeon_is_pci_host()) {
-		pr_notice("Not in host mode, PCI Controller not initialized\n");
-		return 0;
-	}
-
 	/* PCI I/O and PCI MEM values */
 	set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
 	ioport_resource.start = 0;
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 1f9cb0e..613d617 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -38,6 +38,7 @@
 
 	config SOC_MT7620
 		bool "MT7620/8"
+		select CPU_MIPSR2_IRQ_VI
 		select HW_HAS_PCI
 
 	config SOC_MT7621
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
index b3d6bf2..3ef3fb6 100644
--- a/arch/mips/sibyte/common/Makefile
+++ b/arch/mips/sibyte/common/Makefile
@@ -1,4 +1,5 @@
 obj-y := cfe.o
+obj-$(CONFIG_SWIOTLB)			+= dma.o
 obj-$(CONFIG_SIBYTE_BUS_WATCHER)	+= bus_watcher.o
 obj-$(CONFIG_SIBYTE_CFE_CONSOLE)	+= cfe_console.o
 obj-$(CONFIG_SIBYTE_TBPROF)		+= sb_tbprof.o
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
new file mode 100644
index 0000000..eb47a94
--- /dev/null
+++ b/arch/mips/sibyte/common/dma.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *	DMA support for Broadcom SiByte platforms.
+ *
+ *	Copyright (c) 2018  Maciej W. Rozycki
+ */
+
+#include <linux/swiotlb.h>
+#include <asm/bootinfo.h>
+
+void __init plat_swiotlb_setup(void)
+{
+	swiotlb_init(1);
+}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 34605ca..6f10312 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -8,6 +8,7 @@
 	$(filter -E%,$(KBUILD_CFLAGS)) \
 	$(filter -mmicromips,$(KBUILD_CFLAGS)) \
 	$(filter -march=%,$(KBUILD_CFLAGS)) \
+	$(filter -m%-float,$(KBUILD_CFLAGS)) \
 	-D__VDSO__
 
 ifeq ($(cc-name),clang)
@@ -128,7 +129,7 @@
 	$(call cmd,force_checksrc)
 	$(call if_changed_rule,cc_o_c)
 
-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
+$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
 	$(call if_changed_dep,cpp_lds_S)
 
@@ -168,7 +169,7 @@
 	$(call cmd,force_checksrc)
 	$(call if_changed_rule,cc_o_c)
 
-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
+$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
 	$(call if_changed_dep,cpp_lds_S)
 
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index d3e19a5..9f52db9 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -4,7 +4,7 @@
 #ifndef _ASMNDS32_PGTABLE_H
 #define _ASMNDS32_PGTABLE_H
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #include <asm-generic/4level-fixup.h>
 #include <asm-generic/sizes.h>
 
diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile
index 6b68558..7c5c15a 100644
--- a/arch/nds32/mm/Makefile
+++ b/arch/nds32/mm/Makefile
@@ -4,4 +4,8 @@
 
 obj-$(CONFIG_ALIGNMENT_TRAP)	+= alignment.o
 obj-$(CONFIG_HIGHMEM)           += highmem.o
-CFLAGS_proc-n13.o		+= -fomit-frame-pointer
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_proc.o     = $(CC_FLAGS_FTRACE)
+endif
+CFLAGS_proc.o              += -fomit-frame-pointer
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index fa6b7c78..ff0860b 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -117,7 +117,7 @@
 #if CONFIG_PGTABLE_LEVELS == 3
 #define BITS_PER_PMD	(PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
 #else
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define BITS_PER_PMD	0
 #endif
 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 2582df1..0964c23 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -308,15 +308,29 @@
 
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
-	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
-	    tracehook_report_syscall_entry(regs)) {
+	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+		int rc = tracehook_report_syscall_entry(regs);
+
 		/*
-		 * Tracing decided this syscall should not happen or the
-		 * debugger stored an invalid system call number. Skip
-		 * the system call and the system call restart handling.
+		 * As tracesys_next does not set %r28 to -ENOSYS
+		 * when %r20 is set to -1, initialize it here.
 		 */
-		regs->gr[20] = -1UL;
-		goto out;
+		regs->gr[28] = -ENOSYS;
+
+		if (rc) {
+			/*
+			 * A nonzero return code from
+			 * tracehook_report_syscall_entry() tells us
+			 * to prevent the syscall execution.  Skip
+			 * the syscall call and the syscall restart handling.
+			 *
+			 * Note that the tracer may also just change
+			 * regs->gr[20] to an invalid syscall number,
+			 * that is handled by tracesys_next.
+			 */
+			regs->gr[20] = -1UL;
+			return -1;
+		}
 	}
 
 	/* Do the secure computing check after ptrace. */
@@ -340,7 +354,6 @@
 			regs->gr[24] & 0xffffffff,
 			regs->gr[23] & 0xffffffff);
 
-out:
 	/*
 	 * Sign extend the syscall number to 64bit since it may have been
 	 * modified by a compat ptrace call
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index d2824b0..c4c0399 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -160,8 +160,17 @@
 CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
 endif
 
+ifdef CONFIG_FUNCTION_TRACER
+CC_FLAGS_FTRACE := -pg
 ifdef CONFIG_MPROFILE_KERNEL
-	CC_FLAGS_FTRACE := -pg -mprofile-kernel
+CC_FLAGS_FTRACE += -mprofile-kernel
+endif
+# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
+ifneq ($(cc-name),clang)
+CC_FLAGS_FTRACE	+= $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
+endif
 endif
 
 CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
@@ -229,11 +238,6 @@
 KBUILD_CFLAGS		+= -mcpu=powerpc
 endif
 
-# Work around a gcc code-gen bug with -fno-omit-frame-pointer.
-ifdef CONFIG_FUNCTION_TRACER
-KBUILD_CFLAGS		+= -mno-sched-epilog
-endif
-
 cpu-as-$(CONFIG_4xx)		+= -Wa,-m405
 cpu-as-$(CONFIG_ALTIVEC)	+= $(call as-option,-Wa$(comma)-maltivec)
 cpu-as-$(CONFIG_E200)		+= -Wa,-me200
@@ -408,36 +412,9 @@
 # to stdout and these checks are run even on install targets.
 TOUT	:= .tmp_gas_check
 
-# Check gcc and binutils versions:
-# - gcc-3.4 and binutils-2.14 are a fatal combination
-# - Require gcc 4.0 or above on 64-bit
-# - gcc-4.2.0 has issues compiling modules on 64-bit
+# Check toolchain versions:
+# - gcc-4.6 is the minimum kernel-wide version so nothing required.
 checkbin:
-	@if test "$(cc-name)" != "clang" \
-	    && test "$(cc-version)" = "0304" ; then \
-		if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
-			echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
-			echo 'correctly with gcc-3.4 and your version of binutils.'; \
-			echo '*** Please upgrade your binutils or downgrade your gcc'; \
-			false; \
-		fi ; \
-	fi
-	@if test "$(cc-name)" != "clang" \
-	    && test "$(cc-version)" -lt "0400" \
-	    && test "x${CONFIG_PPC64}" = "xy" ; then \
-                echo -n "Sorry, GCC v4.0 or above is required to build " ; \
-                echo "the 64-bit powerpc kernel." ; \
-                false ; \
-        fi
-	@if test "$(cc-name)" != "clang" \
-	    && test "$(cc-fullversion)" = "040200" \
-	    && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
-		echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
-		echo 'kernel with modules enabled.' ; \
-		echo -n '*** Please use a different GCC version or ' ; \
-		echo 'disable kernel modules' ; \
-		false ; \
-	fi
 	@if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
 	    && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
 		echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 0fb96c2..25e3184 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -55,6 +55,11 @@
 
 BOOTARFLAGS	:= -cr$(KBUILD_ARFLAGS)
 
+ifdef CONFIG_CC_IS_CLANG
+BOOTCFLAGS += $(CLANG_FLAGS)
+BOOTAFLAGS += $(CLANG_FLAGS)
+endif
+
 ifdef CONFIG_DEBUG_INFO
 BOOTCFLAGS	+= -g
 endif
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 32dfe6d..9b9d174 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -15,7 +15,7 @@
 RELA = 7
 RELACOUNT = 0x6ffffff9
 
-	.text
+	.data
 	/* A procedure descriptor used when booting this as a COFF file.
 	 * When making COFF, this comes first in the link and we're
 	 * linked at 0x500000.
@@ -23,6 +23,8 @@
 	.globl	_zimage_start_opd
 _zimage_start_opd:
 	.long	0x500000, 0, 0, 0
+	.text
+	b	_zimage_start
 
 #ifdef __powerpc64__
 .balign 8
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 2a24865..855dbae 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1234,21 +1234,13 @@
 
 #define pmd_move_must_withdraw pmd_move_must_withdraw
 struct spinlock;
-static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
-					 struct spinlock *old_pmd_ptl,
-					 struct vm_area_struct *vma)
-{
-	if (radix_enabled())
-		return false;
-	/*
-	 * Archs like ppc64 use pgtable to store per pmd
-	 * specific information. So when we switch the pmd,
-	 * we should also withdraw and deposit the pgtable
-	 */
-	return true;
-}
-
-
+extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+				  struct spinlock *old_pmd_ptl,
+				  struct vm_area_struct *vma);
+/*
+ * Hash translation mode use the deposited table to store hash pte
+ * slot information.
+ */
 #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
 static inline bool arch_needs_pgtable_deposit(void)
 {
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 1e7a335..15bc07a 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -200,7 +200,7 @@
 	unsigned long long	size;
 };
 
-extern int is_fadump_boot_memory_area(u64 addr, ulong size);
+extern int is_fadump_memory_area(u64 addr, ulong size);
 extern int early_init_dt_scan_fw_dump(unsigned long node,
 		const char *uname, int depth, void *data);
 extern int fadump_reserve_mem(void);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index bac225b..23bea99 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -63,7 +63,7 @@
 #endif
 
 #define access_ok(type, addr, size)		\
-	(__chk_user_ptr(addr),			\
+	(__chk_user_ptr(addr), (void)(type),		\
 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
 
 /*
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 3b66f2c..eac1879 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -5,6 +5,9 @@
 
 CFLAGS_ptrace.o		+= -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
+# Disable clang warning for using setjmp without setjmp.h header
+CFLAGS_crash.o		+= $(call cc-disable-warning, builtin-requires-header)
+
 subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
 
 ifdef CONFIG_PPC64
@@ -22,10 +25,10 @@
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
-CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_cputable.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_prom_init.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE)
 endif
 
 obj-y				:= cputable.o ptrace.o syscalls.o \
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index a711d22..c02c952 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -118,13 +118,19 @@
 
 /*
  * If fadump is registered, check if the memory provided
- * falls within boot memory area.
+ * falls within boot memory area and reserved memory area.
  */
-int is_fadump_boot_memory_area(u64 addr, ulong size)
+int is_fadump_memory_area(u64 addr, ulong size)
 {
+	u64 d_start = fw_dump.reserve_dump_area_start;
+	u64 d_end = d_start + fw_dump.reserve_dump_area_size;
+
 	if (!fw_dump.dump_registered)
 		return 0;
 
+	if (((addr + size) > d_start) && (addr <= d_end))
+		return 1;
+
 	return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
 }
 
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 81d4574..9fd2ff2 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -919,11 +919,12 @@
 
 	/* set up the PTE pointers for the Abatron bdiGDB.
 	*/
-	tovirt(r6,r6)
 	lis	r5, abatron_pteptrs@h
 	ori	r5, r5, abatron_pteptrs@l
 	stw	r5, 0xf0(0)	/* Must match your Abatron config file */
 	tophys(r5,r5)
+	lis	r6, swapper_pg_dir@h
+	ori	r6, r6, swapper_pg_dir@l
 	stw	r6, 0(r5)
 
 /* Now turn on the MMU for real! */
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index f6f469f..1b395b8 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -22,7 +22,7 @@
 	COUNT_CACHE_FLUSH_SW	= 0x2,
 	COUNT_CACHE_FLUSH_HW	= 0x4,
 };
-static enum count_cache_flush_type count_cache_flush_type;
+static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
 
 bool barrier_nospec_enabled;
 static bool no_nospec;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index e6474a4..fd59fef 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -848,7 +848,23 @@
 	/* If TM bits are set to the reserved value, it's an invalid context */
 	if (MSR_TM_RESV(msr_hi))
 		return 1;
-	/* Pull in the MSR TM bits from the user context */
+
+	/*
+	 * Disabling preemption, since it is unsafe to be preempted
+	 * with MSR[TS] set without recheckpointing.
+	 */
+	preempt_disable();
+
+	/*
+	 * CAUTION:
+	 * After regs->MSR[TS] being updated, make sure that get_user(),
+	 * put_user() or similar functions are *not* called. These
+	 * functions can generate page faults which will cause the process
+	 * to be de-scheduled with MSR[TS] set but without calling
+	 * tm_recheckpoint(). This can cause a bug.
+	 *
+	 * Pull in the MSR TM bits from the user context
+	 */
 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
@@ -873,6 +889,8 @@
 	}
 #endif
 
+	preempt_enable();
+
 	return 0;
 }
 #endif
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 83d51bf..bbd1c73 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -467,20 +467,6 @@
 	if (MSR_TM_RESV(msr))
 		return -EINVAL;
 
-	/* pull in MSR TS bits from user context */
-	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
-
-	/*
-	 * Ensure that TM is enabled in regs->msr before we leave the signal
-	 * handler. It could be the case that (a) user disabled the TM bit
-	 * through the manipulation of the MSR bits in uc_mcontext or (b) the
-	 * TM bit was disabled because a sufficient number of context switches
-	 * happened whilst in the signal handler and load_tm overflowed,
-	 * disabling the TM bit. In either case we can end up with an illegal
-	 * TM state leading to a TM Bad Thing when we return to userspace.
-	 */
-	regs->msr |= MSR_TM;
-
 	/* pull in MSR LE from user context */
 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
@@ -572,6 +558,34 @@
 	tm_enable();
 	/* Make sure the transaction is marked as failed */
 	tsk->thread.tm_texasr |= TEXASR_FS;
+
+	/*
+	 * Disabling preemption, since it is unsafe to be preempted
+	 * with MSR[TS] set without recheckpointing.
+	 */
+	preempt_disable();
+
+	/* pull in MSR TS bits from user context */
+	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+
+	/*
+	 * Ensure that TM is enabled in regs->msr before we leave the signal
+	 * handler. It could be the case that (a) user disabled the TM bit
+	 * through the manipulation of the MSR bits in uc_mcontext or (b) the
+	 * TM bit was disabled because a sufficient number of context switches
+	 * happened whilst in the signal handler and load_tm overflowed,
+	 * disabling the TM bit. In either case we can end up with an illegal
+	 * TM state leading to a TM Bad Thing when we return to userspace.
+	 *
+	 * CAUTION:
+	 * After regs->MSR[TS] being updated, make sure that get_user(),
+	 * put_user() or similar functions are *not* called. These
+	 * functions can generate page faults which will cause the process
+	 * to be de-scheduled with MSR[TS] set but without calling
+	 * tm_recheckpoint(). This can cause a bug.
+	 */
+	regs->msr |= MSR_TM;
+
 	/* This loads the checkpointed FP/VEC state, if used */
 	tm_recheckpoint(&tsk->thread);
 
@@ -585,6 +599,8 @@
 		regs->msr |= MSR_VEC;
 	}
 
+	preempt_enable();
+
 	return err;
 }
 #endif
diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
index d22d8ba..d868ba4 100644
--- a/arch/powerpc/kernel/trace/Makefile
+++ b/arch/powerpc/kernel/trace/Makefile
@@ -7,7 +7,7 @@
 
 ifdef CONFIG_FUNCTION_TRACER
 # do not trace tracer code
-CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
 endif
 
 obj32-$(CONFIG_FUNCTION_TRACER)		+= ftrace_32.o
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 07ae018..53016c7 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -296,6 +296,10 @@
 #ifdef CONFIG_PPC32
 	.data : AT(ADDR(.data) - LOAD_OFFSET) {
 		DATA_DATA
+#ifdef CONFIG_UBSAN
+		*(.data..Lubsan_data*)
+		*(.data..Lubsan_type*)
+#endif
 		*(.data.rel*)
 		*(SDATA_MAIN)
 		*(.sdata2)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index eba5756..79b7940 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -543,8 +543,11 @@
 #ifdef CONFIG_PPC_BOOK3S_64
 	case KVM_CAP_SPAPR_TCE:
 	case KVM_CAP_SPAPR_TCE_64:
-		/* fallthrough */
+		r = 1;
+		break;
 	case KVM_CAP_SPAPR_TCE_VFIO:
+		r = !!cpu_has_feature(CPU_FTR_HVMODE);
+		break;
 	case KVM_CAP_PPC_RTAS:
 	case KVM_CAP_PPC_FIXUP_HCALL:
 	case KVM_CAP_PPC_ENABLE_HCALL:
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index bdf33b9..8464c2c 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -19,6 +19,7 @@
 #include <linux/hugetlb.h>
 #include <linux/io.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <asm/fixmap.h>
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d51cf5f..365526e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -221,7 +221,9 @@
 static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
 			     unsigned long address)
 {
-	if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) {
+	/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
+	if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
+				      DSISR_PROTFAULT))) {
 		printk_ratelimited(KERN_CRIT "kernel tried to execute"
 				   " exec-protected page (%lx) -"
 				   "exploit attempt? (uid: %d)\n",
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7a9886f..a5091c0 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -188,15 +188,20 @@
 	pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
 
 	for (; start < end; start += page_size) {
-		void *p;
+		void *p = NULL;
 		int rc;
 
 		if (vmemmap_populated(start, page_size))
 			continue;
 
+		/*
+		 * Allocate from the altmap first if we have one. This may
+		 * fail due to alignment issues when using 16MB hugepages, so
+		 * fall back to system memory if the altmap allocation fail.
+		 */
 		if (altmap)
 			p = altmap_alloc_block_buf(page_size, altmap);
-		else
+		if (!p)
 			p = vmemmap_alloc_block_buf(page_size, node);
 		if (!p)
 			return -ENOMEM;
@@ -255,8 +260,15 @@
 {
 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 	unsigned long page_order = get_order(page_size);
+	unsigned long alt_start = ~0, alt_end = ~0;
+	unsigned long base_pfn;
 
 	start = _ALIGN_DOWN(start, page_size);
+	if (altmap) {
+		alt_start = altmap->base_pfn;
+		alt_end = altmap->base_pfn + altmap->reserve +
+			  altmap->free + altmap->alloc + altmap->align;
+	}
 
 	pr_debug("vmemmap_free %lx...%lx\n", start, end);
 
@@ -280,8 +292,9 @@
 		page = pfn_to_page(addr >> PAGE_SHIFT);
 		section_base = pfn_to_page(vmemmap_section_start(start));
 		nr_pages = 1 << page_order;
+		base_pfn = PHYS_PFN(addr);
 
-		if (altmap) {
+		if (base_pfn >= alt_start && base_pfn < alt_end) {
 			vmem_altmap_free(altmap, nr_pages);
 		} else if (PageReserved(page)) {
 			/* allocated from bootmem */
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 01d7c0f..297db66 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -477,3 +477,25 @@
 		   atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
 }
 #endif /* CONFIG_PROC_FS */
+
+/*
+ * For hash translation mode, we use the deposited table to store hash slot
+ * information and they are stored at PTRS_PER_PMD offset from related pmd
+ * location. Hence a pmd move requires deposit and withdraw.
+ *
+ * For radix translation with split pmd ptl, we store the deposited table in the
+ * pmd page. Hence if we have different pmd page we need to withdraw during pmd
+ * move.
+ *
+ * With hash we use deposited table always irrespective of anon or not.
+ * With radix we use deposited table only for anonymous mapping.
+ */
+int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+			   struct spinlock *old_pmd_ptl,
+			   struct vm_area_struct *vma)
+{
+	if (radix_enabled())
+		return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
+
+	return true;
+}
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 177de81..6a2f65d 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -226,8 +226,13 @@
 	u64 mmcra = mfspr(SPRN_MMCRA);
 	u64 exp = MMCRA_THR_CTR_EXP(mmcra);
 	u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
+	u64 sier = mfspr(SPRN_SIER);
+	u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
 
-	*weight = mantissa << (2 * exp);
+	if (val == 0 || val == 7)
+		*weight = 0;
+	else
+		*weight = mantissa << (2 * exp);
 }
 
 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 882944c..5d8e8b6 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -49,7 +49,7 @@
 	cpu = info->policy->cpu;
 	busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
 
-	CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
+	info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1);
 	pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
 			cpu, busy_spus, info->busy_spus);
 
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index c9ef3c5..9fcccb4 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -987,9 +987,9 @@
 	unsigned long active_tasks; /* fixed-point */
 
 	active_tasks = count_active_contexts() * FIXED_1;
-	CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
-	CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
-	CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
+	spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks);
+	spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks);
+	spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks);
 }
 
 static void spusched_wake(struct timer_list *unused)
@@ -1071,9 +1071,6 @@
 	}
 }
 
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
 static int show_spu_loadavg(struct seq_file *s, void *private)
 {
 	int a, b, c;
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index f2839ee..561a67d 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -3,7 +3,7 @@
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
-CFLAGS_REMOVE_bootx_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_bootx_init.o = $(CC_FLAGS_FTRACE)
 endif
 
 obj-y				+= pic.o setup.o time.o feature.o pci.o \
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index fe96910..7639b21 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -299,7 +299,7 @@
 	if (alloc_userspace_copy) {
 		offset = 0;
 		uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
-				levels, tce_table_size, &offset,
+				tmplevels, tce_table_size, &offset,
 				&total_allocated_uas);
 		if (!uas)
 			goto free_tces_exit;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index a0b20c0..e3010b1 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -272,6 +272,8 @@
 	if (rc)
 		return rc;
 
+	of_node_put(dn);
+
 	return 0;
 }
 
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index c1578f5..e4c658c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -389,8 +389,11 @@
 	phys_addr = lmb->base_addr;
 
 #ifdef CONFIG_FA_DUMP
-	/* Don't hot-remove memory that falls in fadump boot memory area */
-	if (is_fadump_boot_memory_area(phys_addr, block_sz))
+	/*
+	 * Don't hot-remove memory that falls in fadump boot memory area
+	 * and memory that is reserved for capturing old kernel memory.
+	 */
+	if (is_fadump_memory_area(phys_addr, block_sz))
 		return false;
 #endif
 
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 1bc3abb..9d7d8e6 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -1,14 +1,17 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for xmon
 
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+# Disable clang warning for using setjmp without setjmp.h header
+subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header)
+
+subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror
 
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 
 # Disable ftrace for the entire directory
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)))
+KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
 
 ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
 
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 4264aed..dd6badc 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -75,6 +75,9 @@
 #define xmon_owner 0
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_PPC_PSERIES
+static int set_indicator_token = RTAS_UNKNOWN_SERVICE;
+#endif
 static unsigned long in_xmon __read_mostly = 0;
 static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
 
@@ -358,7 +361,6 @@
 #ifdef CONFIG_PPC_PSERIES
 	/* Since this can't be a module, args should end up below 4GB. */
 	static struct rtas_args args;
-	int token;
 
 	/*
 	 * At this point we have got all the cpus we can into
@@ -367,11 +369,11 @@
 	 * If we did try to take rtas.lock there would be a
 	 * real possibility of deadlock.
 	 */
-	token = rtas_token("set-indicator");
-	if (token == RTAS_UNKNOWN_SERVICE)
+	if (set_indicator_token == RTAS_UNKNOWN_SERVICE)
 		return;
 
-	rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
+	rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL,
+			   SURVEILLANCE_TOKEN, 0, 0);
 
 #endif /* CONFIG_PPC_PSERIES */
 }
@@ -3672,6 +3674,14 @@
 		__debugger_iabr_match = xmon_iabr_match;
 		__debugger_break_match = xmon_break_match;
 		__debugger_fault_handler = xmon_fault_handler;
+
+#ifdef CONFIG_PPC_PSERIES
+		/*
+		 * Get the token here to avoid trying to get a lock
+		 * during the crash, causing a deadlock.
+		 */
+		set_indicator_token = rtas_token("set-indicator");
+#endif
 	} else {
 		__debugger = NULL;
 		__debugger_ipi = NULL;
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index 2fa2942..470755c 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -35,6 +35,12 @@
 #define _PAGE_SPECIAL   _PAGE_SOFT
 #define _PAGE_TABLE     _PAGE_PRESENT
 
+/*
+ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
+ * distinguish them from swapped out pages
+ */
+#define _PAGE_PROT_NONE _PAGE_READ
+
 #define _PAGE_PFN_SHIFT 10
 
 /* Set of bits to preserve across pte_modify() */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 1630196..a8179a8 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -44,7 +44,7 @@
 /* Page protection bits */
 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
 
-#define PAGE_NONE		__pgprot(0)
+#define PAGE_NONE		__pgprot(_PAGE_PROT_NONE)
 #define PAGE_READ		__pgprot(_PAGE_BASE | _PAGE_READ)
 #define PAGE_WRITE		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
 #define PAGE_EXEC		__pgprot(_PAGE_BASE | _PAGE_EXEC)
@@ -98,7 +98,7 @@
 
 static inline int pmd_present(pmd_t pmd)
 {
-	return (pmd_val(pmd) & _PAGE_PRESENT);
+	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
 }
 
 static inline int pmd_none(pmd_t pmd)
@@ -178,7 +178,7 @@
 
 static inline int pte_present(pte_t pte)
 {
-	return (pte_val(pte) & _PAGE_PRESENT);
+	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
 }
 
 static inline int pte_none(pte_t pte)
@@ -380,7 +380,7 @@
  *
  * Format of swap PTE:
  *	bit            0:	_PAGE_PRESENT (zero)
- *	bit            1:	reserved for future use (zero)
+ *	bit            1:	_PAGE_PROT_NONE (zero)
  *	bits      2 to 6:	swap type
  *	bits 7 to XLEN-1:	swap offset
  */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3fe4af8..c23578a 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -22,7 +22,7 @@
  * This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE >> 1)
+#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE / 3)
 
 #define STACK_TOP		TASK_SIZE
 #define STACK_TOP_MAX		STACK_TOP
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 9f82a7e..9db7d00 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -120,6 +120,6 @@
 
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
-		trace_sys_exit(regs, regs->regs[0]);
+		trace_sys_exit(regs, regs_return_value(regs));
 #endif
 }
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index b2d26d9..9713d4e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -186,7 +186,7 @@
 	BUG_ON(mem_size == 0);
 
 	set_max_mapnr(PFN_DOWN(mem_size));
-	max_low_pfn = memblock_end_of_DRAM();
+	max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	setup_initrd();
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 58a522f..200a4b3 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -29,7 +29,8 @@
 	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 
 #ifdef CONFIG_ZONE_DMA32
-	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
+			(unsigned long) PFN_PHYS(max_low_pfn)));
 #endif
 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 433a994..54f3756 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -25,10 +25,6 @@
 
 #include "appldata.h"
 
-
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
 /*
  * OS data
  *
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f1ab942..09b61d0 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -89,8 +89,6 @@
 {
 	int cpu = smp_processor_id();
 
-	if (prev == next)
-		return;
 	S390_lowcore.user_asce = next->context.asce;
 	cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
 	/* Clear previous user-ASCE from CR1 and CR7 */
@@ -102,7 +100,8 @@
 		__ctl_load(S390_lowcore.vdso_asce, 7, 7);
 		clear_cpu_flag(CIF_ASCE_SECONDARY);
 	}
-	cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+	if (prev != next)
+		cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
 }
 
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index 2bb1f3b..48c784f 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -147,8 +147,8 @@
  * @cprb_len:		CPRB header length [0x0020]
  * @cprb_ver_id:	CPRB version id.   [0x04]
  * @pad_000:		Alignment pad bytes
- * @flags:		Admin cmd [0x80] or functional cmd [0x00]
- * @func_id:		Function id / subtype [0x5434]
+ * @flags:		Admin bit [0x80], Special bit [0x20]
+ * @func_id:		Function id / subtype [0x5434] "T4"
  * @source_id:		Source id [originator id]
  * @target_id:		Target id [usage/ctrl domain id]
  * @ret_code:		Return code
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 5b28b43..e7e6608 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -64,10 +64,10 @@
 	if (stsi(vmms, 3, 2, 2) || !vmms->count)
 		return;
 
-	/* Running under KVM? If not we assume z/VM */
+	/* Detect known hypervisors */
 	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
-	else
+	else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
 }
 
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c637c12..a0097f8 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -882,6 +882,8 @@
 		pr_info("Linux is running under KVM in 64-bit mode\n");
 	else if (MACHINE_IS_LPAR)
 		pr_info("Linux is running natively in 64-bit mode\n");
+	else
+		pr_info("Linux is running as a guest in 64-bit mode\n");
 
 	/* Have one command line that is parsed and saved in /proc/cmdline */
 	/* boot_command_line has been already set up in early.c */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2f8f7d7..da02f40 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -371,9 +371,13 @@
  */
 void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
+	struct lowcore *lc = pcpu_devices->lowcore;
+
+	if (pcpu_devices[0].address == stap())
+		lc = &S390_lowcore;
+
 	pcpu_delegate(&pcpu_devices[0], func, data,
-		      pcpu_devices->lowcore->panic_stack -
-		      PANIC_FRAME_OFFSET + PAGE_SIZE);
+		      lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
 }
 
 int smp_find_processor_id(u16 address)
@@ -1152,7 +1156,11 @@
 {
 	int rc;
 
+	rc = lock_device_hotplug_sysfs();
+	if (rc)
+		return rc;
 	rc = smp_rescan_cpus();
+	unlock_device_hotplug();
 	return rc ? rc : count;
 }
 static DEVICE_ATTR_WO(rescan);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 19b2d2a..eeb7450 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -436,7 +436,7 @@
 	struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
 	int rc;
 
-	rrb = clp_alloc_block(GFP_KERNEL);
+	rrb = clp_alloc_block(GFP_ATOMIC);
 	if (!rrb)
 		return -ENOMEM;
 
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index e59c577..c70bc78 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -25,7 +25,6 @@
 #include <linux/memblock.h>
 #include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
-#include <linux/mtd/onenand.h>
 #include <linux/mtd/physmap.h>
 #include <linux/platform_data/lv5207lp.h>
 #include <linux/platform_device.h>
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 7485398..9c04562 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -197,12 +197,17 @@
 
 static inline pte_t pte_wrprotect(pte_t pte)
 { 
-	pte_clear_bits(pte, _PAGE_RW);
+	if (likely(pte_get_bits(pte, _PAGE_RW)))
+		pte_clear_bits(pte, _PAGE_RW);
+	else
+		return pte;
 	return(pte_mknewprot(pte)); 
 }
 
 static inline pte_t pte_mkread(pte_t pte)
 { 
+	if (unlikely(pte_get_bits(pte, _PAGE_USER)))
+		return pte;
 	pte_set_bits(pte, _PAGE_USER);
 	return(pte_mknewprot(pte)); 
 }
@@ -221,6 +226,8 @@
 
 static inline pte_t pte_mkwrite(pte_t pte)	
 {
+	if (unlikely(pte_get_bits(pte,  _PAGE_RW)))
+		return pte;
 	pte_set_bits(pte, _PAGE_RW);
 	return(pte_mknewprot(pte)); 
 }
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 6403789..f105ae8 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -600,6 +600,14 @@
 	leal	TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
 	movl	%eax, %cr3
 3:
+	/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
+	pushl	%ecx
+	movl	$MSR_EFER, %ecx
+	rdmsr
+	btsl	$_EFER_LME, %eax
+	wrmsr
+	popl	%ecx
+
 	/* Enable PAE and LA57 (if required) paging modes */
 	movl	$X86_CR4_PAE, %eax
 	cmpl	$0, %edx
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 91f7563..6ff7e81 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -6,7 +6,7 @@
 #define TRAMPOLINE_32BIT_PGTABLE_OFFSET	0
 
 #define TRAMPOLINE_32BIT_CODE_OFFSET	PAGE_SIZE
-#define TRAMPOLINE_32BIT_CODE_SIZE	0x60
+#define TRAMPOLINE_32BIT_CODE_SIZE	0x70
 
 #define TRAMPOLINE_32BIT_STACK_END	TRAMPOLINE_32BIT_SIZE
 
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 9e21573..f8debf7 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -1,5 +1,7 @@
+#include <linux/efi.h>
 #include <asm/e820/types.h>
 #include <asm/processor.h>
+#include <asm/efi.h>
 #include "pgtable.h"
 #include "../string.h"
 
@@ -37,9 +39,10 @@
 
 static unsigned long find_trampoline_placement(void)
 {
-	unsigned long bios_start, ebda_start;
+	unsigned long bios_start = 0, ebda_start = 0;
 	unsigned long trampoline_start;
 	struct boot_e820_entry *entry;
+	char *signature;
 	int i;
 
 	/*
@@ -47,8 +50,18 @@
 	 * This code is based on reserve_bios_regions().
 	 */
 
-	ebda_start = *(unsigned short *)0x40e << 4;
-	bios_start = *(unsigned short *)0x413 << 10;
+	/*
+	 * EFI systems may not provide legacy ROM. The memory may not be mapped
+	 * at all.
+	 *
+	 * Only look for values in the legacy ROM for non-EFI system.
+	 */
+	signature = (char *)&boot_params->efi_info.efi_loader_signature;
+	if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
+	    strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
+		ebda_start = *(unsigned short *)0x40e << 4;
+		bios_start = *(unsigned short *)0x413 << 10;
+	}
 
 	if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
 		bios_start = BIOS_START_MAX;
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index d873790..4b536d0 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -58,6 +58,7 @@
 # CONFIG_ACPI_FAN is not set
 # CONFIG_ACPI_THERMAL is not set
 # CONFIG_X86_PM_TIMER is not set
+CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_X86_ACPI_CPUFREQ=y
 CONFIG_PCI_MSI=y
@@ -81,6 +82,7 @@
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -95,6 +97,7 @@
 CONFIG_NET_IPVTI=y
 CONFIG_INET_ESP=y
 # CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_TCP_CONG_ADVANCED=y
 # CONFIG_TCP_CONG_BIC is not set
@@ -127,6 +130,7 @@
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -147,6 +151,7 @@
 CONFIG_NETFILTER_XT_MATCH_LIMIT=y
 CONFIG_NETFILTER_XT_MATCH_MAC=y
 CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
@@ -182,10 +187,14 @@
 CONFIG_IP6_NF_RAW=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_NETEM=y
 CONFIG_NET_CLS_U32=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_U32=y
 CONFIG_NET_CLS_ACT=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_RFKILL=y
@@ -228,6 +237,7 @@
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_MPPE=y
+CONFIG_USB_RTL8152=y
 CONFIG_USB_USBNET=y
 # CONFIG_USB_NET_AX8817X is not set
 # CONFIG_USB_NET_AX88179_178A is not set
@@ -305,6 +315,12 @@
 CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_SOUND=y
 CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+# CONFIG_SND_USB is not set
 CONFIG_HIDRAW=y
 CONFIG_UHID=y
 CONFIG_HID_A4TECH=y
@@ -379,10 +395,10 @@
 # CONFIG_PWRSEQ_SIMPLE is not set
 # CONFIG_MMC_BLOCK is not set
 CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
 CONFIG_SW_SYNC=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
 CONFIG_VIRTIO_MMIO=y
 CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
 CONFIG_STAGING=y
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 7d0df78..40d2834 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -356,7 +356,8 @@
 
 	/* Need to switch before accessing the thread stack. */
 	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
-	movq	%rsp, %rdi
+	/* In the Xen PV case we already run on the thread stack. */
+	ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
 	pushq	6*8(%rdi)		/* regs->ss */
@@ -365,8 +366,9 @@
 	pushq	3*8(%rdi)		/* regs->cs */
 	pushq	2*8(%rdi)		/* regs->ip */
 	pushq	1*8(%rdi)		/* regs->orig_ax */
-
 	pushq	(%rdi)			/* pt_regs->di */
+.Lint80_keep_stack:
+
 	pushq	%rsi			/* pt_regs->si */
 	xorl	%esi, %esi		/* nospec   si */
 	pushq	%rdx			/* pt_regs->dx */
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 141d415..c3d7ccd 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -171,7 +171,8 @@
 		 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
 VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
-	$(call ld-option, --build-id) -Bsymbolic
+	$(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
+	-Bsymbolic
 GCOV_PROFILE := n
 
 #
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index c8d08da..a415543 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1970,7 +1970,7 @@
  */
 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
 {
-	kfree(cpuc->shared_regs);
+	intel_cpuc_finish(cpuc);
 	kfree(cpuc);
 }
 
@@ -1982,14 +1982,11 @@
 	cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
 	if (!cpuc)
 		return ERR_PTR(-ENOMEM);
-
-	/* only needed, if we have extra_regs */
-	if (x86_pmu.extra_regs) {
-		cpuc->shared_regs = allocate_shared_regs(cpu);
-		if (!cpuc->shared_regs)
-			goto error;
-	}
 	cpuc->is_fake = 1;
+
+	if (intel_cpuc_prepare(cpuc, cpu))
+		goto error;
+
 	return cpuc;
 error:
 	free_fake_cpuc(cpuc);
@@ -2253,6 +2250,19 @@
 		x86_pmu.check_microcode();
 }
 
+static int x86_pmu_check_period(struct perf_event *event, u64 value)
+{
+	if (x86_pmu.check_period && x86_pmu.check_period(event, value))
+		return -EINVAL;
+
+	if (value && x86_pmu.limit_period) {
+		if (x86_pmu.limit_period(event, value) > value)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
 static struct pmu pmu = {
 	.pmu_enable		= x86_pmu_enable,
 	.pmu_disable		= x86_pmu_disable,
@@ -2277,6 +2287,7 @@
 	.event_idx		= x86_pmu_event_idx,
 	.sched_task		= x86_pmu_sched_task,
 	.task_ctx_size          = sizeof(struct x86_perf_task_context),
+	.check_period		= x86_pmu_check_period,
 };
 
 void arch_perf_update_userpage(struct perf_event *event,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 155fa4b..12453cf 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1995,6 +1995,39 @@
 	intel_pmu_enable_all(added);
 }
 
+static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
+{
+	u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
+
+	if (cpuc->tfa_shadow != val) {
+		cpuc->tfa_shadow = val;
+		wrmsrl(MSR_TSX_FORCE_ABORT, val);
+	}
+}
+
+static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
+{
+	/*
+	 * We're going to use PMC3, make sure TFA is set before we touch it.
+	 */
+	if (cntr == 3 && !cpuc->is_fake)
+		intel_set_tfa(cpuc, true);
+}
+
+static void intel_tfa_pmu_enable_all(int added)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+	/*
+	 * If we find PMC3 is no longer used when we enable the PMU, we can
+	 * clear TFA.
+	 */
+	if (!test_bit(3, cpuc->active_mask))
+		intel_set_tfa(cpuc, false);
+
+	intel_pmu_enable_all(added);
+}
+
 static inline u64 intel_pmu_get_status(void)
 {
 	u64 status;
@@ -2653,6 +2686,35 @@
 }
 
 static struct event_constraint *
+dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
+{
+	WARN_ON_ONCE(!cpuc->constraint_list);
+
+	if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+		struct event_constraint *cx;
+
+		/*
+		 * grab pre-allocated constraint entry
+		 */
+		cx = &cpuc->constraint_list[idx];
+
+		/*
+		 * initialize dynamic constraint
+		 * with static constraint
+		 */
+		*cx = *c;
+
+		/*
+		 * mark constraint as dynamic
+		 */
+		cx->flags |= PERF_X86_EVENT_DYNAMIC;
+		c = cx;
+	}
+
+	return c;
+}
+
+static struct event_constraint *
 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
 			   int idx, struct event_constraint *c)
 {
@@ -2682,27 +2744,7 @@
 	 * only needed when constraint has not yet
 	 * been cloned (marked dynamic)
 	 */
-	if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
-		struct event_constraint *cx;
-
-		/*
-		 * grab pre-allocated constraint entry
-		 */
-		cx = &cpuc->constraint_list[idx];
-
-		/*
-		 * initialize dynamic constraint
-		 * with static constraint
-		 */
-		*cx = *c;
-
-		/*
-		 * mark constraint as dynamic, so we
-		 * can free it later on
-		 */
-		cx->flags |= PERF_X86_EVENT_DYNAMIC;
-		c = cx;
-	}
+	c = dyn_constraint(cpuc, c, idx);
 
 	/*
 	 * From here on, the constraint is dynamic.
@@ -3229,6 +3271,26 @@
 	return c;
 }
 
+static bool allow_tsx_force_abort = true;
+
+static struct event_constraint *
+tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+			  struct perf_event *event)
+{
+	struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
+
+	/*
+	 * Without TFA we must not use PMC3.
+	 */
+	if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
+		c = dyn_constraint(cpuc, c, idx);
+		c->idxmsk64 &= ~(1ULL << 3);
+		c->weight--;
+	}
+
+	return c;
+}
+
 /*
  * Broadwell:
  *
@@ -3282,7 +3344,7 @@
 	return x86_event_sysfs_show(page, config, event);
 }
 
-struct intel_shared_regs *allocate_shared_regs(int cpu)
+static struct intel_shared_regs *allocate_shared_regs(int cpu)
 {
 	struct intel_shared_regs *regs;
 	int i;
@@ -3314,23 +3376,24 @@
 	return c;
 }
 
-static int intel_pmu_cpu_prepare(int cpu)
-{
-	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 
+int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
+{
 	if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
 		cpuc->shared_regs = allocate_shared_regs(cpu);
 		if (!cpuc->shared_regs)
 			goto err;
 	}
 
-	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+	if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
 		size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
 
-		cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
+		cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
 		if (!cpuc->constraint_list)
 			goto err_shared_regs;
+	}
 
+	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
 		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
 		if (!cpuc->excl_cntrs)
 			goto err_constraint_list;
@@ -3352,6 +3415,11 @@
 	return -ENOMEM;
 }
 
+static int intel_pmu_cpu_prepare(int cpu)
+{
+	return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
+}
+
 static void flip_smm_bit(void *data)
 {
 	unsigned long set = *(unsigned long *)data;
@@ -3423,9 +3491,8 @@
 	}
 }
 
-static void free_excl_cntrs(int cpu)
+static void free_excl_cntrs(struct cpu_hw_events *cpuc)
 {
-	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 	struct intel_excl_cntrs *c;
 
 	c = cpuc->excl_cntrs;
@@ -3433,14 +3500,19 @@
 		if (c->core_id == -1 || --c->refcnt == 0)
 			kfree(c);
 		cpuc->excl_cntrs = NULL;
-		kfree(cpuc->constraint_list);
-		cpuc->constraint_list = NULL;
 	}
+
+	kfree(cpuc->constraint_list);
+	cpuc->constraint_list = NULL;
 }
 
 static void intel_pmu_cpu_dying(int cpu)
 {
-	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+	fini_debug_store_on_cpu(cpu);
+}
+
+void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+{
 	struct intel_shared_regs *pc;
 
 	pc = cpuc->shared_regs;
@@ -3450,9 +3522,12 @@
 		cpuc->shared_regs = NULL;
 	}
 
-	free_excl_cntrs(cpu);
+	free_excl_cntrs(cpuc);
+}
 
-	fini_debug_store_on_cpu(cpu);
+static void intel_pmu_cpu_dead(int cpu)
+{
+	intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
 }
 
 static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3462,6 +3537,11 @@
 	intel_pmu_lbr_sched_task(ctx, sched_in);
 }
 
+static int intel_pmu_check_period(struct perf_event *event, u64 value)
+{
+	return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
+}
+
 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
 
 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3541,6 +3621,9 @@
 	.cpu_prepare		= intel_pmu_cpu_prepare,
 	.cpu_starting		= intel_pmu_cpu_starting,
 	.cpu_dying		= intel_pmu_cpu_dying,
+	.cpu_dead		= intel_pmu_cpu_dead,
+
+	.check_period		= intel_pmu_check_period,
 };
 
 static struct attribute *intel_pmu_attrs[];
@@ -3581,8 +3664,12 @@
 	.cpu_prepare		= intel_pmu_cpu_prepare,
 	.cpu_starting		= intel_pmu_cpu_starting,
 	.cpu_dying		= intel_pmu_cpu_dying,
+	.cpu_dead		= intel_pmu_cpu_dead,
+
 	.guest_get_msrs		= intel_guest_get_msrs,
 	.sched_task		= intel_pmu_sched_task,
+
+	.check_period		= intel_pmu_check_period,
 };
 
 static __init void intel_clovertown_quirk(void)
@@ -3902,8 +3989,11 @@
        NULL
 };
 
+static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
+
 static struct attribute *intel_pmu_attrs[] = {
 	&dev_attr_freeze_on_smi.attr,
+	NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
 	NULL,
 };
 
@@ -4359,6 +4449,15 @@
 		x86_pmu.cpu_events = get_hsw_events_attrs();
 		intel_pmu_pebs_data_source_skl(
 			boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
+
+		if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+			x86_pmu.flags |= PMU_FL_TFA;
+			x86_pmu.get_event_constraints = tfa_get_event_constraints;
+			x86_pmu.enable_all = intel_tfa_pmu_enable_all;
+			x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
+			intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
+		}
+
 		pr_cont("Skylake events, ");
 		name = "skylake";
 		break;
@@ -4500,7 +4599,7 @@
 	hardlockup_detector_perf_restart();
 
 	for_each_online_cpu(c)
-		free_excl_cntrs(c);
+		free_excl_cntrs(&per_cpu(cpu_hw_events, c));
 
 	cpus_read_unlock();
 	pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index c07bee3..b10e043 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1222,6 +1222,8 @@
 	.id_table	= snbep_uncore_pci_ids,
 };
 
+#define NODE_ID_MASK	0x7
+
 /*
  * build pci bus to socket mapping
  */
@@ -1243,7 +1245,7 @@
 		err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
 		if (err)
 			break;
-		nodeid = config;
+		nodeid = config & NODE_ID_MASK;
 		/* get the Node ID mapping */
 		err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
 		if (err)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index c5ad9cc..42a3628 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -243,6 +243,11 @@
 	int excl_thread_id; /* 0 or 1 */
 
 	/*
+	 * SKL TSX_FORCE_ABORT shadow
+	 */
+	u64				tfa_shadow;
+
+	/*
 	 * AMD specific bits
 	 */
 	struct amd_nb			*amd_nb;
@@ -644,6 +649,11 @@
 	 * Intel host/guest support (KVM)
 	 */
 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
+
+	/*
+	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
+	 */
+	int (*check_period) (struct perf_event *event, u64 period);
 };
 
 struct x86_perf_task_context {
@@ -674,6 +684,7 @@
 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
 #define PMU_FL_PEBS_ALL		0x10 /* all events are valid PEBS events */
+#define PMU_FL_TFA		0x20 /* deal with TSX force abort */
 
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -855,7 +866,7 @@
 
 #ifdef CONFIG_CPU_SUP_INTEL
 
-static inline bool intel_pmu_has_bts(struct perf_event *event)
+static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
 {
 	struct hw_perf_event *hwc = &event->hw;
 	unsigned int hw_event, bts_event;
@@ -866,7 +877,14 @@
 	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
 	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
 
-	return hw_event == bts_event && hwc->sample_period == 1;
+	return hw_event == bts_event && period == 1;
+}
+
+static inline bool intel_pmu_has_bts(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	return intel_pmu_has_bts_period(event, hwc->sample_period);
 }
 
 int intel_pmu_save_and_restart(struct perf_event *event);
@@ -875,7 +893,8 @@
 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 			  struct perf_event *event);
 
-struct intel_shared_regs *allocate_shared_regs(int cpu);
+extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
+extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
 
 int intel_pmu_init(void);
 
@@ -1011,9 +1030,13 @@
 	return 0;
 }
 
-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
+static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
 {
-	return NULL;
+	return 0;
+}
+
+static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+{
 }
 
 static inline int is_ht_workaround_enabled(void)
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 8e02b30..3ebd777 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -51,7 +51,7 @@
 /*
  * fill in the user structure for a core dump..
  */
-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
+static void fill_dump(struct pt_regs *regs, struct user32 *dump)
 {
 	u32 fs, gs;
 	memset(dump, 0, sizeof(*dump));
@@ -157,10 +157,12 @@
 	fs = get_fs();
 	set_fs(KERNEL_DS);
 	has_dumped = 1;
+
+	fill_dump(cprm->regs, &dump);
+
 	strncpy(dump.u_comm, current->comm, sizeof(current->comm));
 	dump.u_ar0 = offsetof(struct user32, regs);
 	dump.signal = cprm->siginfo->si_signo;
-	dump_thread32(cprm->regs, &dump);
 
 	/*
 	 * If the size of the dump file exceeds the rlimit, then see
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 89a048c..7b31ee5 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -340,6 +340,7 @@
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
 #define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_TSX_FORCE_ABORT	(18*32+13) /* "" TSX_FORCE_ABORT */
 #define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 69dcdf1..fa2c93c 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -106,6 +106,9 @@
 #define user_insn(insn, output, input...)				\
 ({									\
 	int err;							\
+									\
+	might_fault();							\
+									\
 	asm volatile(ASM_STAC "\n"					\
 		     "1:" #insn "\n\t"					\
 		     "2: " ASM_CLAC "\n"				\
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 022845e..728dc66 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1441,7 +1441,7 @@
 	"cmpb $0, kvm_rebooting \n\t"	      \
 	"jne 668b \n\t"      		      \
 	__ASM_SIZE(push) " $666b \n\t"	      \
-	"call kvm_spurious_fault \n\t"	      \
+	"jmp kvm_spurious_fault \n\t"	      \
 	".popsection \n\t" \
 	_ASM_EXTABLE(666b, 667b)
 
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index eeeb928..2252b63 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -178,6 +178,10 @@
 
 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 
+/*
+ * Init a new mm.  Used on mm copies, like at fork()
+ * and on mm's that are brand-new, like at execve().
+ */
 static inline int init_new_context(struct task_struct *tsk,
 				   struct mm_struct *mm)
 {
@@ -228,8 +232,22 @@
 } while (0)
 #endif
 
+static inline void arch_dup_pkeys(struct mm_struct *oldmm,
+				  struct mm_struct *mm)
+{
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+		return;
+
+	/* Duplicate the oldmm pkey state in mm: */
+	mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
+	mm->context.execute_only_pkey   = oldmm->context.execute_only_pkey;
+#endif
+}
+
 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
+	arch_dup_pkeys(oldmm, mm);
 	paravirt_arch_dup_mmap(oldmm, mm);
 	return ldt_dup_context(oldmm, mm);
 }
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b3486c8..f14ca0b 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -389,6 +389,7 @@
 #define MSR_F15H_NB_PERF_CTR		0xc0010241
 #define MSR_F15H_PTSC			0xc0010280
 #define MSR_F15H_IC_CFG			0xc0011021
+#define MSR_F15H_EX_CFG			0xc001102c
 
 /* Fam 10h MSRs */
 #define MSR_FAM10H_MMIO_CONF_BASE	0xc0010058
@@ -628,6 +629,12 @@
 
 #define MSR_IA32_TSC_DEADLINE		0x000006E0
 
+
+#define MSR_TSX_FORCE_ABORT		0x0000010F
+
+#define MSR_TFA_RTM_FORCE_ABORT_BIT	0
+#define MSR_TFA_RTM_FORCE_ABORT		BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+
 /* P4/Xeon+ specific */
 #define MSR_IA32_MCG_EAX		0x00000180
 #define MSR_IA32_MCG_EBX		0x00000181
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index b99d497..0b6352a 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -7,7 +7,11 @@
 #endif
 
 #ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_STACK_ORDER 2
+#else
 #define KASAN_STACK_ORDER 1
+#endif
 #else
 #define KASAN_STACK_ORDER 0
 #endif
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 84bd9bd..88bca45 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -111,6 +111,11 @@
  */
 #define MAXMEM			(1UL << MAX_PHYSMEM_BITS)
 
+#define GUARD_HOLE_PGD_ENTRY	-256UL
+#define GUARD_HOLE_SIZE		(16UL << PGDIR_SHIFT)
+#define GUARD_HOLE_BASE_ADDR	(GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
+#define GUARD_HOLE_END_ADDR	(GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
+
 #define LDT_PGD_ENTRY		-240UL
 #define LDT_BASE_ADDR		(LDT_PGD_ENTRY << PGDIR_SHIFT)
 #define LDT_END_ADDR		(LDT_BASE_ADDR + PGDIR_SIZE)
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 3de6933..afbc872 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -104,9 +104,9 @@
 
 void math_emulate(struct math_emu_info *);
 #ifndef CONFIG_X86_32
-asmlinkage void smp_thermal_interrupt(void);
-asmlinkage void smp_threshold_interrupt(void);
-asmlinkage void smp_deferred_error_interrupt(void);
+asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
+asmlinkage void smp_threshold_interrupt(struct pt_regs *regs);
+asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs);
 #endif
 
 extern void ist_enter(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index aae77eb..4111edb 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -293,8 +293,7 @@
 		__put_user_asm(x, ptr, retval, "l", "k", "ir", errret);	\
 		break;							\
 	case 8:								\
-		__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,	\
-				   errret);				\
+		__put_user_asm_u64(x, ptr, retval, errret);		\
 		break;							\
 	default:							\
 		__put_user_bad();					\
@@ -440,8 +439,10 @@
 #define __put_user_nocheck(x, ptr, size)			\
 ({								\
 	int __pu_err;						\
+	__typeof__(*(ptr)) __pu_val;				\
+	__pu_val = x;						\
 	__uaccess_begin();					\
-	__put_user_size((x), (ptr), (size), __pu_err, -EFAULT);	\
+	__put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\
 	__uaccess_end();					\
 	__builtin_expect(__pu_err, 0);				\
 })
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index e652a7c..3f697a9 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -48,7 +48,8 @@
 	BIOS_STATUS_SUCCESS		=  0,
 	BIOS_STATUS_UNIMPLEMENTED	= -ENOSYS,
 	BIOS_STATUS_EINVAL		= -EINVAL,
-	BIOS_STATUS_UNAVAIL		= -EBUSY
+	BIOS_STATUS_UNAVAIL		= -EBUSY,
+	BIOS_STATUS_ABORT		= -EINTR,
 };
 
 /* Address map parameters */
@@ -167,4 +168,9 @@
 
 extern struct kobject *sgi_uv_kobj;	/* /sys/firmware/sgi_uv */
 
+/*
+ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
+ */
+extern struct semaphore __efi_uv_runtime_lock;
+
 #endif /* _ASM_X86_UV_BIOS_H */
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 7654feb..652e7ff 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -313,14 +313,13 @@
 	struct apic_chip_data *apicd = apic_chip_data(irqd);
 	int vector, cpu;
 
-	cpumask_and(vector_searchmask, vector_searchmask, affmsk);
-	cpu = cpumask_first(vector_searchmask);
-	if (cpu >= nr_cpu_ids)
-		return -EINVAL;
+	cpumask_and(vector_searchmask, dest, affmsk);
+
 	/* set_affinity might call here for nothing */
 	if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
 		return 0;
-	vector = irq_matrix_alloc_managed(vector_matrix, cpu);
+	vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
+					  &cpu);
 	trace_vector_alloc_managed(irqd->irq, vector, vector);
 	if (vector < 0)
 		return vector;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index eeea634..6a25278 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -818,11 +818,9 @@
 static void init_amd_zn(struct cpuinfo_x86 *c)
 {
 	set_cpu_cap(c, X86_FEATURE_ZEN);
-	/*
-	 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
-	 * all up to and including B1.
-	 */
-	if (c->x86_model <= 1 && c->x86_stepping <= 1)
+
+	/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
+	if (!cpu_has(c, X86_FEATURE_CPB))
 		set_cpu_cap(c, X86_FEATURE_CPB);
 }
 
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 78928f5..1e0c4c7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -69,7 +69,7 @@
 	 * identify_boot_cpu() initialized SMT support information, let the
 	 * core code know.
 	 */
-	cpu_smt_check_topology_early();
+	cpu_smt_check_topology();
 
 	if (!IS_ENABLED(CONFIG_SMP)) {
 		pr_info("CPU: ");
@@ -213,7 +213,7 @@
 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
 	SPECTRE_V2_USER_NONE;
 
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
 static bool spectre_v2_bad_module;
 
 bool retpoline_module_ok(bool has_retpoline)
@@ -1000,7 +1000,8 @@
 #endif
 
 	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
-	if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+	if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
+			e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
 		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
 		pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
 				half_pa);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index 0f53049..627e5c8 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -23,6 +23,7 @@
 
 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/kernfs.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -310,9 +311,11 @@
 		return -EINVAL;
 	buf[nbytes - 1] = '\0';
 
+	cpus_read_lock();
 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 	if (!rdtgrp) {
 		rdtgroup_kn_unlock(of->kn);
+		cpus_read_unlock();
 		return -ENOENT;
 	}
 	rdt_last_cmd_clear();
@@ -367,6 +370,7 @@
 
 out:
 	rdtgroup_kn_unlock(of->kn);
+	cpus_read_unlock();
 	return ret ?: nbytes;
 }
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index cdbedeb..f9e7096 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -783,6 +783,7 @@
 			quirk_no_way_out(i, m, regs);
 
 		if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+			m->bank = i;
 			mce_read_aux(m, i);
 			*msg = tmp;
 			return 1;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index e12454e..9f915a8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -23,6 +23,7 @@
 #include <linux/string.h>
 
 #include <asm/amd_nb.h>
+#include <asm/traps.h>
 #include <asm/apic.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
@@ -99,7 +100,7 @@
 	[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
 };
 
-const char *smca_get_name(enum smca_bank_types t)
+static const char *smca_get_name(enum smca_bank_types t)
 {
 	if (t >= N_SMCA_BANK_TYPES)
 		return NULL;
@@ -824,7 +825,7 @@
 	mce_log(&m);
 }
 
-asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
+asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
 {
 	entering_irq();
 	trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 2da67b7..ee229ce 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -25,6 +25,7 @@
 #include <linux/cpu.h>
 
 #include <asm/processor.h>
+#include <asm/traps.h>
 #include <asm/apic.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
@@ -390,7 +391,7 @@
 
 static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
 
-asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r)
+asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
 {
 	entering_irq();
 	trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index 2b584b3..c21e0a1 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 
 #include <asm/irq_vectors.h>
+#include <asm/traps.h>
 #include <asm/apic.h>
 #include <asm/mce.h>
 #include <asm/trace/irq_vectors.h>
@@ -18,7 +19,7 @@
 
 void (*mce_threshold_vector)(void) = default_threshold_interrupt;
 
-asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
+asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs)
 {
 	entering_irq();
 	trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 07b5fc0..a4e7e10 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -707,7 +707,7 @@
 	if (!p) {
 		return ret;
 	} else {
-		if (boot_cpu_data.microcode == p->patch_id)
+		if (boot_cpu_data.microcode >= p->patch_id)
 			return ret;
 
 		ret = UCODE_NEW;
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 40eee6c..254683b 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -165,6 +165,8 @@
 	struct mtrr_gentry gentry;
 	void __user *arg = (void __user *) __arg;
 
+	memset(&gentry, 0, sizeof(gentry));
+
 	switch (cmd) {
 	case MTRRIOC_ADD_ENTRY:
 	case MTRRIOC_SET_ENTRY:
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 278cd07..9490a28 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -167,6 +167,9 @@
 	struct efi_info *current_ei = &boot_params.efi_info;
 	struct efi_info *ei = &params->efi_info;
 
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return 0;
+
 	if (!current_ei->efi_memmap_size)
 		return 0;
 
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d9b7192..7f89d60 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -457,6 +457,7 @@
 #else
 	u64 ipi_bitmap = 0;
 #endif
+	long ret;
 
 	if (cpumask_empty(mask))
 		return;
@@ -482,8 +483,9 @@
 		} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
 			max = apic_id < max ? max : apic_id;
 		} else {
-			kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+			ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
 				(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+			WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
 			min = max = apic_id;
 			ipi_bitmap = 0;
 		}
@@ -491,8 +493,9 @@
 	}
 
 	if (ipi_bitmap) {
-		kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+		ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
 			(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+		WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
 	}
 
 	local_irq_restore(flags);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f02ecaf..6489067 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1346,7 +1346,7 @@
 	 * extrapolate the boot cpu's data to all packages.
 	 */
 	ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
-	__max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
 	pr_info("Max logical packages: %u\n", __max_logical_packages);
 }
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7bcfa61..98d13c6 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -337,6 +337,7 @@
 	unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
 	unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
 	unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
+	unsigned f_la57 = 0;
 
 	/* cpuid 1.edx */
 	const u32 kvm_cpuid_1_edx_x86_features =
@@ -491,7 +492,10 @@
 			// TSC_ADJUST is emulated
 			entry->ebx |= F(TSC_ADJUST);
 			entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
+			f_la57 = entry->ecx & F(LA57);
 			cpuid_mask(&entry->ecx, CPUID_7_ECX);
+			/* Set LA57 based on hardware capability. */
+			entry->ecx |= f_la57;
 			entry->ecx |= f_umip;
 			/* PKU is not yet implemented for shadow paging. */
 			if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f1d3fe5..b475419 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3399,6 +3399,14 @@
 	kvm_mmu_reset_context(&svm->vcpu);
 	kvm_mmu_load(&svm->vcpu);
 
+	/*
+	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
+	 * doesn't end up in L1.
+	 */
+	svm->vcpu.arch.nmi_injected = false;
+	kvm_clear_exception_queue(&svm->vcpu);
+	kvm_clear_interrupt_queue(&svm->vcpu);
+
 	return 0;
 }
 
@@ -4485,25 +4493,14 @@
 		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
 		break;
 	case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
-		int i;
-		struct kvm_vcpu *vcpu;
-		struct kvm *kvm = svm->vcpu.kvm;
 		struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
 		/*
-		 * At this point, we expect that the AVIC HW has already
-		 * set the appropriate IRR bits on the valid target
-		 * vcpus. So, we just need to kick the appropriate vcpu.
+		 * Update ICR high and low, then emulate sending IPI,
+		 * which is handled when writing APIC_ICR.
 		 */
-		kvm_for_each_vcpu(i, vcpu, kvm) {
-			bool m = kvm_apic_match_dest(vcpu, apic,
-						     icrl & KVM_APIC_SHORT_MASK,
-						     GET_APIC_DEST_FIELD(icrh),
-						     icrl & KVM_APIC_DEST_MASK);
-
-			if (m && !avic_vcpu_is_running(vcpu))
-				kvm_vcpu_wake_up(vcpu);
-		}
+		kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
+		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
 		break;
 	}
 	case AVIC_IPI_FAILURE_INVALID_TARGET:
@@ -5837,6 +5834,13 @@
 
 static bool svm_has_emulated_msr(int index)
 {
+	switch (index) {
+	case MSR_IA32_MCG_EXT_CTL:
+		return false;
+	default:
+		break;
+	}
+
 	return true;
 }
 
@@ -6249,6 +6253,9 @@
 	int asid, ret;
 
 	ret = -EBUSY;
+	if (unlikely(sev->active))
+		return ret;
+
 	asid = sev_asid_new();
 	if (asid < 0)
 		return ret;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c97a9d6..f6da5c3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -27,6 +27,7 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/sched.h>
+#include <linux/sched/smt.h>
 #include <linux/moduleparam.h>
 #include <linux/mod_devicetable.h>
 #include <linux/trace_events.h>
@@ -2756,7 +2757,8 @@
 	if (!entry_only)
 		j = find_msr(&m->host, msr);
 
-	if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+	if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
+		(j < 0 &&  m->host.nr == NR_AUTOLOAD_MSRS)) {
 		printk_once(KERN_WARNING "Not enough msr switch entries. "
 				"Can't add msr %x\n", msr);
 		return;
@@ -3600,9 +3602,11 @@
 	 * secondary cpu-based controls.  Do not include those that
 	 * depend on CPUID bits, they are added later by vmx_cpuid_update.
 	 */
-	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
-		msrs->secondary_ctls_low,
-		msrs->secondary_ctls_high);
+	if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
+		rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
+		      msrs->secondary_ctls_low,
+		      msrs->secondary_ctls_high);
+
 	msrs->secondary_ctls_low = 0;
 	msrs->secondary_ctls_high &=
 		SECONDARY_EXEC_DESC |
@@ -8011,13 +8015,16 @@
 
 	kvm_mce_cap_supported |= MCG_LMCE_P;
 
-	return alloc_kvm_area();
+	r = alloc_kvm_area();
+	if (r)
+		goto out;
+	return 0;
 
 out:
 	for (i = 0; i < VMX_BITMAP_NR; i++)
 		free_page((unsigned long)vmx_bitmap[i]);
 
-    return r;
+	return r;
 }
 
 static __exit void hardware_unsetup(void)
@@ -8287,11 +8294,11 @@
 	if (r < 0)
 		goto out_vmcs02;
 
-	vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+	vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
 	if (!vmx->nested.cached_vmcs12)
 		goto out_cached_vmcs12;
 
-	vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+	vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
 	if (!vmx->nested.cached_shadow_vmcs12)
 		goto out_cached_shadow_vmcs12;
 
@@ -8466,6 +8473,7 @@
 	if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
 		return;
 
+	hrtimer_cancel(&vmx->nested.preemption_timer);
 	vmx->nested.vmxon = false;
 	vmx->nested.smm.vmxon = false;
 	free_vpid(vmx->nested.vpid02);
@@ -11125,7 +11133,7 @@
 			 * Warn upon starting the first VM in a potentially
 			 * insecure environment.
 			 */
-			if (cpu_smt_control == CPU_SMT_ENABLED)
+			if (sched_smt_active())
 				pr_warn_once(L1TF_MSG_SMT);
 			if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
 				pr_warn_once(L1TF_MSG_L1D);
@@ -11471,6 +11479,8 @@
 			kunmap(vmx->nested.pi_desc_page);
 			kvm_release_page_dirty(vmx->nested.pi_desc_page);
 			vmx->nested.pi_desc_page = NULL;
+			vmx->nested.pi_desc = NULL;
+			vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
 		}
 		page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
 		if (is_error_page(page))
@@ -11728,7 +11738,7 @@
 	    !nested_exit_intr_ack_set(vcpu) ||
 	    (vmcs12->posted_intr_nv & 0xff00) ||
 	    (vmcs12->posted_intr_desc_addr & 0x3f) ||
-	    (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
+	    (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
 		return -EINVAL;
 
 	/* tpr shadow is needed by all apicv features. */
@@ -13979,13 +13989,17 @@
 	else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
 		copy_shadow_to_vmcs12(vmx);
 
-	if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
+	/*
+	 * Copy over the full allocated size of vmcs12 rather than just the size
+	 * of the struct.
+	 */
+	if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
 		return -EFAULT;
 
 	if (nested_cpu_has_shadow_vmcs(vmcs12) &&
 	    vmcs12->vmcs_link_pointer != -1ull) {
 		if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
-				 get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
+				 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
 			return -EFAULT;
 	}
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 68b53f0..3a7cf7c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2343,6 +2343,7 @@
 	case MSR_AMD64_PATCH_LOADER:
 	case MSR_AMD64_BU_CFG2:
 	case MSR_AMD64_DC_CFG:
+	case MSR_F15H_EX_CFG:
 		break;
 
 	case MSR_IA32_UCODE_REV:
@@ -2638,6 +2639,7 @@
 	case MSR_AMD64_BU_CFG2:
 	case MSR_IA32_PERF_CTL:
 	case MSR_AMD64_DC_CFG:
+	case MSR_F15H_EX_CFG:
 		msr_info->data = 0;
 		break;
 	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
@@ -4902,6 +4904,13 @@
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 
+	/*
+	 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+	 * is returned, but our callers are not ready for that and they blindly
+	 * call kvm_inject_page_fault.  Ensure that they at least do not leak
+	 * uninitialized kernel stack memory into cr2 and error code.
+	 */
+	memset(exception, 0, sizeof(*exception));
 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
 					  exception);
 }
@@ -6275,8 +6284,7 @@
 		toggle_interruptibility(vcpu, ctxt->interruptibility);
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 		kvm_rip_write(vcpu, ctxt->eip);
-		if (r == EMULATE_DONE &&
-		    (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+		if (r == EMULATE_DONE && ctxt->tf)
 			kvm_vcpu_do_singlestep(vcpu, &r);
 		if (!ctxt->have_exception ||
 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
@@ -6866,10 +6874,10 @@
 	case KVM_HC_CLOCK_PAIRING:
 		ret = kvm_pv_clock_pairing(vcpu, a0, a1);
 		break;
+#endif
 	case KVM_HC_SEND_IPI:
 		ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
 		break;
-#endif
 	default:
 		ret = -KVM_ENOSYS;
 		break;
@@ -7304,7 +7312,7 @@
 
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
-	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
+	if (!kvm_apic_present(vcpu))
 		return;
 
 	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 79778ab..a536651 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -36,8 +36,8 @@
 	u16 status, timer;
 
 	do {
-		outb(I8254_PORT_CONTROL,
-		     I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+		outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
+		     I8254_PORT_CONTROL);
 		status = inb(I8254_PORT_COUNTER0);
 		timer  = inb(I8254_PORT_COUNTER0);
 		timer |= inb(I8254_PORT_COUNTER0) << 8;
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a12afff..c05a818 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -53,10 +53,10 @@
 enum address_markers_idx {
 	USER_SPACE_NR = 0,
 	KERNEL_SPACE_NR,
-	LOW_KERNEL_NR,
-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
 	LDT_NR,
 #endif
+	LOW_KERNEL_NR,
 	VMALLOC_START_NR,
 	VMEMMAP_START_NR,
 #ifdef CONFIG_KASAN
@@ -64,9 +64,6 @@
 	KASAN_SHADOW_END_NR,
 #endif
 	CPU_ENTRY_AREA_NR,
-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
-	LDT_NR,
-#endif
 #ifdef CONFIG_X86_ESPFIX64
 	ESPFIX_START_NR,
 #endif
@@ -493,11 +490,11 @@
 {
 #ifdef CONFIG_X86_64
 	/*
-	 * ffff800000000000 - ffff87ffffffffff is reserved for
-	 * the hypervisor.
+	 * A hole in the beginning of kernel address space reserved
+	 * for a hypervisor.
 	 */
-	return	(idx >= pgd_index(__PAGE_OFFSET) - 16) &&
-		(idx <  pgd_index(__PAGE_OFFSET));
+	return	(idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
+		(idx <  pgd_index(GUARD_HOLE_END_ADDR));
 #else
 	return false;
 #endif
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index faca978..d883869 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -932,7 +932,7 @@
 
 	pages = generic_max_swapfile_size();
 
-	if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+	if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
 		/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
 		unsigned long long l1tf_limit = l1tf_pfn_limit();
 		/*
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dd519f3..a3e9c6e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -585,7 +585,6 @@
 							   paddr_end,
 							   page_size_mask,
 							   prot);
-				__flush_tlb_all();
 				continue;
 			}
 			/*
@@ -628,7 +627,6 @@
 		pud_populate(&init_mm, pud, pmd);
 		spin_unlock(&init_mm.page_table_lock);
 	}
-	__flush_tlb_all();
 
 	update_page_count(PG_LEVEL_1G, pages);
 
@@ -669,7 +667,6 @@
 			paddr_last = phys_pud_init(pud, paddr,
 					paddr_end,
 					page_size_mask);
-			__flush_tlb_all();
 			continue;
 		}
 
@@ -681,7 +678,6 @@
 		p4d_populate(&init_mm, p4d, pud);
 		spin_unlock(&init_mm.page_table_lock);
 	}
-	__flush_tlb_all();
 
 	return paddr_last;
 }
@@ -734,8 +730,6 @@
 	if (pgd_changed)
 		sync_global_pgds(vaddr_start, vaddr_end - 1);
 
-	__flush_tlb_all();
-
 	return paddr_last;
 }
 
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 7ae3686..c9faf34 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -157,8 +157,8 @@
 	pmd = pmd_offset(pud, ppd->vaddr);
 	if (pmd_none(*pmd)) {
 		pte = ppd->pgtable_area;
-		memset(pte, 0, sizeof(pte) * PTRS_PER_PTE);
-		ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE;
+		memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
+		ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
 		set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
 	}
 
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 3d0c83e..a3c9ea2 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -519,8 +519,13 @@
 	 * for a "decoy" virtual address (bit 63 clear) passed to
 	 * set_memory_X(). __pa() on a "decoy" address results in a
 	 * physical address with bit 63 set.
+	 *
+	 * Decoy addresses are not present for 32-bit builds, see
+	 * set_mce_nospec().
 	 */
-	return address & __PHYSICAL_MASK;
+	if (IS_ENABLED(CONFIG_X86_64))
+		return address & __PHYSICAL_MASK;
+	return address;
 }
 
 /*
@@ -546,7 +551,11 @@
 
 	start = sanitize_phys(start);
 	end = sanitize_phys(end);
-	BUG_ON(start >= end); /* end is exclusive */
+	if (start >= end) {
+		WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
+				start, end - 1, cattr_name(req_type));
+		return -EINVAL;
+	}
 
 	if (!pat_enabled()) {
 		/* This is identical to page table setting without PAT */
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index 526536c..ca1e8e6 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -50,8 +50,8 @@
 	word1 = read_pci_config_16(bus, slot, func, 0xc0);
 	word2 = read_pci_config_16(bus, slot, func, 0xc2);
 	if (word1 != word2) {
-		res.start = (word1 << 16) | 0x0000;
-		res.end   = (word2 << 16) | 0xffff;
+		res.start = ((resource_size_t) word1 << 16) | 0x0000;
+		res.end   = ((resource_size_t) word2 << 16) | 0xffff;
 		res.flags = IORESOURCE_MEM;
 		update_res(info, res.start, res.end, res.flags, 0);
 	}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 13f4485..bd372e8 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -641,6 +641,22 @@
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
 
+static void quirk_intel_th_dnv(struct pci_dev *dev)
+{
+	struct resource *r = &dev->resource[4];
+
+	/*
+	 * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
+	 * appears to be 4 MB in reality.
+	 */
+	if (r->end == r->start + 0x7ff) {
+		r->start = 0;
+		r->end   = 0x3fffff;
+		r->flags |= IORESOURCE_UNSET;
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
+
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 
 #define AMD_141b_MMIO_BASE(x)	(0x80 + (x) * 0x8)
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 4a6a5a2..eb33432 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -29,7 +29,8 @@
 
 struct uv_systab *uv_systab;
 
-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+			u64 a4, u64 a5)
 {
 	struct uv_systab *tab = uv_systab;
 	s64 ret;
@@ -51,6 +52,19 @@
 
 	return ret;
 }
+
+s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+{
+	s64 ret;
+
+	if (down_interruptible(&__efi_uv_runtime_lock))
+		return BIOS_STATUS_ABORT;
+
+	ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
+	up(&__efi_uv_runtime_lock);
+
+	return ret;
+}
 EXPORT_SYMBOL_GPL(uv_bios_call);
 
 s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
@@ -59,10 +73,15 @@
 	unsigned long bios_flags;
 	s64 ret;
 
+	if (down_interruptible(&__efi_uv_runtime_lock))
+		return BIOS_STATUS_ABORT;
+
 	local_irq_save(bios_flags);
-	ret = uv_bios_call(which, a1, a2, a3, a4, a5);
+	ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
 	local_irq_restore(bios_flags);
 
+	up(&__efi_uv_runtime_lock);
+
 	return ret;
 }
 
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 52a7c3f..782f98b 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -899,10 +899,7 @@
 	val = native_read_msr_safe(msr, err);
 	switch (msr) {
 	case MSR_IA32_APICBASE:
-#ifdef CONFIG_X86_X2APIC
-		if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
-#endif
-			val &= ~X2APIC_ENABLE;
+		val &= ~X2APIC_ENABLE;
 		break;
 	}
 	return val;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2c84c6a..c8f011e 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -640,19 +640,20 @@
 			  unsigned long limit)
 {
 	int i, nr, flush = 0;
-	unsigned hole_low, hole_high;
+	unsigned hole_low = 0, hole_high = 0;
 
 	/* The limit is the last byte to be touched */
 	limit--;
 	BUG_ON(limit >= FIXADDR_TOP);
 
+#ifdef CONFIG_X86_64
 	/*
 	 * 64-bit has a great big hole in the middle of the address
-	 * space, which contains the Xen mappings.  On 32-bit these
-	 * will end up making a zero-sized hole and so is a no-op.
+	 * space, which contains the Xen mappings.
 	 */
-	hole_low = pgd_index(USER_LIMIT);
-	hole_high = pgd_index(PAGE_OFFSET);
+	hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
+	hole_high = pgd_index(GUARD_HOLE_END_ADDR);
+#endif
 
 	nr = pgd_index(limit) + 1;
 	for (i = 0; i < nr; i++) {
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c84f1e0..01dcccf 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -361,8 +361,6 @@
 {
 	int cpu;
 
-	pvclock_resume();
-
 	if (xen_clockevent != &xen_vcpuop_clockevent)
 		return;
 
@@ -379,12 +377,15 @@
 };
 
 static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
+static u64 xen_clock_value_saved;
 
 void xen_save_time_memory_area(void)
 {
 	struct vcpu_register_time_memory_area t;
 	int ret;
 
+	xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
+
 	if (!xen_clock)
 		return;
 
@@ -404,7 +405,7 @@
 	int ret;
 
 	if (!xen_clock)
-		return;
+		goto out;
 
 	t.addr.v = &xen_clock->pvti;
 
@@ -421,6 +422,11 @@
 	if (ret != 0)
 		pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
 			  ret);
+
+out:
+	/* Need pvclock_resume() before using xen_clocksource_read(). */
+	pvclock_resume();
+	xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
 }
 
 static void xen_setup_vsyscall_time_info(void)
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
index 1090528..e46ae07 100644
--- a/arch/xtensa/boot/dts/xtfpga.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -103,7 +103,7 @@
 			};
 		};
 
-		spi0: spi-master@0d0a0000 {
+		spi0: spi@0d0a0000 {
 			compatible = "cdns,xtfpga-spi";
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 11fed6c..b593816 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -33,6 +33,7 @@
 CONFIG_HOTPLUG_CPU=y
 # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
 # CONFIG_PCI is not set
+CONFIG_VECTORS_OFFSET=0x00002000
 CONFIG_XTENSA_PLATFORM_XTFPGA=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 9053a56..5bd38ea 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -280,12 +280,13 @@
 
 	movi	a2, cpu_start_ccount
 1:
+	memw
 	l32i	a3, a2, 0
 	beqi	a3, 0, 1b
 	movi	a3, 0
 	s32i	a3, a2, 0
-	memw
 1:
+	memw
 	l32i	a3, a2, 0
 	beqi	a3, 0, 1b
 	wsr	a3, ccount
@@ -321,11 +322,13 @@
 	rsr	a0, prid
 	neg	a2, a0
 	movi	a3, cpu_start_id
+	memw
 	s32i	a2, a3, 0
 #if XCHAL_DCACHE_IS_WRITEBACK
 	dhwbi	a3, 0
 #endif
 1:
+	memw
 	l32i	a2, a3, 0
 	dhi	a3, 0
 	bne	a2, a0, 1b
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 4bb6813..5a0e0bd 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -320,8 +320,8 @@
 
 		/* Stack layout: sp-4: ra, sp-3: sp' */
 
-		pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
-		sp = *(unsigned long *)sp - 3;
+		pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
+		sp = SPILL_SLOT(sp, 1);
 	} while (count++ < 16);
 	return 0;
 }
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 932d646..be1f280 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -83,7 +83,7 @@
 {
 	unsigned i;
 
-	for (i = 0; i < max_cpus; ++i)
+	for_each_possible_cpu(i)
 		set_cpu_present(i, true);
 }
 
@@ -96,6 +96,11 @@
 	pr_info("%s: Core Count = %d\n", __func__, ncpus);
 	pr_info("%s: Core Id = %d\n", __func__, core_id);
 
+	if (ncpus > NR_CPUS) {
+		ncpus = NR_CPUS;
+		pr_info("%s: limiting core count by %d\n", __func__, ncpus);
+	}
+
 	for (i = 0; i < ncpus; ++i)
 		set_cpu_possible(i, true);
 }
@@ -195,9 +200,11 @@
 	int i;
 
 #ifdef CONFIG_HOTPLUG_CPU
-	cpu_start_id = cpu;
-	system_flush_invalidate_dcache_range(
-			(unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+	WRITE_ONCE(cpu_start_id, cpu);
+	/* Pairs with the third memw in the cpu_restart */
+	mb();
+	system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
+					     sizeof(cpu_start_id));
 #endif
 	smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
 
@@ -206,18 +213,21 @@
 			ccount = get_ccount();
 		while (!ccount);
 
-		cpu_start_ccount = ccount;
+		WRITE_ONCE(cpu_start_ccount, ccount);
 
-		while (time_before(jiffies, timeout)) {
+		do {
+			/*
+			 * Pairs with the first two memws in the
+			 * .Lboot_secondary.
+			 */
 			mb();
-			if (!cpu_start_ccount)
-				break;
-		}
+			ccount = READ_ONCE(cpu_start_ccount);
+		} while (ccount && time_before(jiffies, timeout));
 
-		if (cpu_start_ccount) {
+		if (ccount) {
 			smp_call_function_single(0, mx_cpu_stop,
-					(void *)cpu, 1);
-			cpu_start_ccount = 0;
+						 (void *)cpu, 1);
+			WRITE_ONCE(cpu_start_ccount, 0);
 			return -EIO;
 		}
 	}
@@ -237,6 +247,7 @@
 	pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
 			__func__, cpu, idle, start_info.stack);
 
+	init_completion(&cpu_running);
 	ret = boot_secondary(cpu, idle);
 	if (ret == 0) {
 		wait_for_completion_timeout(&cpu_running,
@@ -298,8 +309,10 @@
 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 	while (time_before(jiffies, timeout)) {
 		system_invalidate_dcache_range((unsigned long)&cpu_start_id,
-				sizeof(cpu_start_id));
-		if (cpu_start_id == -cpu) {
+					       sizeof(cpu_start_id));
+		/* Pairs with the second memw in the cpu_restart */
+		mb();
+		if (READ_ONCE(cpu_start_id) == -cpu) {
 			platform_cpu_kill(cpu);
 			return;
 		}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index fd524a5..378186b 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -89,7 +89,7 @@
 		container_of(evt, struct ccount_timer, evt);
 
 	if (timer->irq_enabled) {
-		disable_irq(evt->irq);
+		disable_irq_nosync(evt->irq);
 		timer->irq_enabled = 0;
 	}
 	return 0;
diff --git a/block/bio.c b/block/bio.c
index 55a5386..04f5c14 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -580,6 +580,14 @@
 }
 EXPORT_SYMBOL(bio_phys_segments);
 
+static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src)
+{
+#ifdef CONFIG_PFK
+	dst->bi_crypt_key = src->bi_crypt_key;
+	dst->bi_iter.bi_dun = src->bi_iter.bi_dun;
+#endif
+}
+
 /**
  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
  * 	@bio: destination bio
@@ -609,7 +617,7 @@
 	bio->bi_write_hint = bio_src->bi_write_hint;
 	bio->bi_iter = bio_src->bi_iter;
 	bio->bi_io_vec = bio_src->bi_io_vec;
-
+	bio_clone_crypt_key(bio, bio_src);
 	bio_clone_blkcg_association(bio, bio_src);
 }
 EXPORT_SYMBOL(__bio_clone_fast);
diff --git a/block/blk-core.c b/block/blk-core.c
index eb8b522..643b6e4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1832,6 +1832,9 @@
 	bio->bi_next = req->bio;
 	req->bio = bio;
 
+#ifdef CONFIG_PFK
+	WARN_ON(req->__dun || bio->bi_iter.bi_dun);
+#endif
 	req->__sector = bio->bi_iter.bi_sector;
 	req->__data_len += bio->bi_iter.bi_size;
 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
@@ -1981,6 +1984,9 @@
 	else
 		req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
 	req->write_hint = bio->bi_write_hint;
+#ifdef CONFIG_PFK
+	req->__dun = bio->bi_iter.bi_dun;
+#endif
 	blk_rq_bio_prep(req->q, req, bio);
 }
 EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
@@ -3123,8 +3129,13 @@
 	req->__data_len -= total_bytes;
 
 	/* update sector only for requests with clear definition of sector */
-	if (!blk_rq_is_passthrough(req))
+	if (!blk_rq_is_passthrough(req)) {
 		req->__sector += total_bytes >> 9;
+#ifdef CONFIG_PFK
+		if (req->__dun)
+			req->__dun += total_bytes >> 12;
+#endif
+	}
 
 	/* mixed attributes always follow the first bio */
 	if (req->rq_flags & RQF_MIXED_MERGE) {
@@ -3488,6 +3499,9 @@
 {
 	dst->cpu = src->cpu;
 	dst->__sector = blk_rq_pos(src);
+#ifdef CONFIG_PFK
+	dst->__dun = blk_rq_dun(src);
+#endif
 	dst->__data_len = blk_rq_bytes(src);
 	if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
 		dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ce41f66..7648794 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -424,7 +424,7 @@
 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 
-	blk_mq_run_hw_queue(hctx, true);
+	blk_mq_sched_restart(hctx);
 }
 
 /**
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 19923f8..b154e05 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -72,6 +72,7 @@
 #include <linux/sched/loadavg.h>
 #include <linux/sched/signal.h>
 #include <trace/events/block.h>
+#include <linux/blk-mq.h>
 #include "blk-rq-qos.h"
 #include "blk-stat.h"
 
@@ -568,6 +569,9 @@
 		return;
 
 	enabled = blk_iolatency_enabled(iolat->blkiolat);
+	if (!enabled)
+		return;
+
 	while (blkg && blkg->parent) {
 		iolat = blkg_to_lat(blkg);
 		if (!iolat) {
@@ -577,7 +581,7 @@
 		rqw = &iolat->rq_wait;
 
 		atomic_dec(&rqw->inflight);
-		if (!enabled || iolat->min_lat_nsec == 0)
+		if (iolat->min_lat_nsec == 0)
 			goto next;
 		iolatency_record_time(iolat, &bio->bi_issue, now,
 				      issue_as_root);
@@ -721,10 +725,13 @@
 	return 0;
 }
 
-static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+/*
+ * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
+ * return 0.
+ */
+static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
 {
 	struct iolatency_grp *iolat = blkg_to_lat(blkg);
-	struct blk_iolatency *blkiolat = iolat->blkiolat;
 	u64 oldval = iolat->min_lat_nsec;
 
 	iolat->min_lat_nsec = val;
@@ -733,9 +740,10 @@
 				    BLKIOLATENCY_MAX_WIN_SIZE);
 
 	if (!oldval && val)
-		atomic_inc(&blkiolat->enabled);
+		return 1;
 	if (oldval && !val)
-		atomic_dec(&blkiolat->enabled);
+		return -1;
+	return 0;
 }
 
 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -768,6 +776,7 @@
 	u64 lat_val = 0;
 	u64 oldval;
 	int ret;
+	int enable = 0;
 
 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
 	if (ret)
@@ -803,7 +812,12 @@
 	blkg = ctx.blkg;
 	oldval = iolat->min_lat_nsec;
 
-	iolatency_set_min_lat_nsec(blkg, lat_val);
+	enable = iolatency_set_min_lat_nsec(blkg, lat_val);
+	if (enable) {
+		WARN_ON_ONCE(!blk_get_queue(blkg->q));
+		blkg_get(blkg);
+	}
+
 	if (oldval != iolat->min_lat_nsec) {
 		iolatency_clear_scaling(blkg);
 	}
@@ -811,6 +825,24 @@
 	ret = 0;
 out:
 	blkg_conf_finish(&ctx);
+	if (ret == 0 && enable) {
+		struct iolatency_grp *tmp = blkg_to_lat(blkg);
+		struct blk_iolatency *blkiolat = tmp->blkiolat;
+
+		blk_mq_freeze_queue(blkg->q);
+
+		if (enable == 1)
+			atomic_inc(&blkiolat->enabled);
+		else if (enable == -1)
+			atomic_dec(&blkiolat->enabled);
+		else
+			WARN_ON_ONCE(1);
+
+		blk_mq_unfreeze_queue(blkg->q);
+
+		blkg_put(blkg);
+		blk_put_queue(blkg->q);
+	}
 	return ret ?: nbytes;
 }
 
@@ -910,8 +942,14 @@
 {
 	struct iolatency_grp *iolat = pd_to_lat(pd);
 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
+	struct blk_iolatency *blkiolat = iolat->blkiolat;
+	int ret;
 
-	iolatency_set_min_lat_nsec(blkg, 0);
+	ret = iolatency_set_min_lat_nsec(blkg, 0);
+	if (ret == 1)
+		atomic_inc(&blkiolat->enabled);
+	if (ret == -1)
+		atomic_dec(&blkiolat->enabled);
 	iolatency_clear_scaling(blkg);
 }
 
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2e04219..4981eda 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,7 +9,7 @@
 #include <linux/scatterlist.h>
 
 #include <trace/events/block.h>
-
+#include <linux/pfk.h>
 #include "blk.h"
 
 static struct bio *blk_bio_discard_split(struct request_queue *q,
@@ -670,6 +670,11 @@
 	}
 }
 
+static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
+{
+	return (!pfk_allow_merge_bio(bio, nxt));
+}
+
 /*
  * For non-mq, this has to be called with the request spinlock acquired.
  * For mq with scheduling, the appropriate queue wide lock should be held.
@@ -708,6 +713,9 @@
 	if (req->write_hint != next->write_hint)
 		return NULL;
 
+	if (crypto_not_mergeable(req->bio, next->bio))
+		return 0;
+
 	/*
 	 * If we are allowed to merge, then append bio list
 	 * from next to rq and release next. merge_requests_fn
@@ -838,11 +846,18 @@
 	if (rq->write_hint != bio->bi_write_hint)
 		return false;
 
+	if (crypto_not_mergeable(rq->bio, bio))
+		return false;
+
 	return true;
 }
 
 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 {
+#ifdef CONFIG_PFK
+	if (blk_rq_dun(rq) || bio_dun(bio))
+		return ELEVATOR_NO_MERGE;
+#endif
 	if (req_op(rq) == REQ_OP_DISCARD &&
 	    queue_max_discard_segments(rq->q) > 1)
 		return ELEVATOR_DISCARD_MERGE;
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 29bfe80..da1de19 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -54,13 +54,14 @@
  * Mark a hardware queue as needing a restart. For shared queues, maintain
  * a count of how many hardware queues are marked for restart.
  */
-static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
 {
 	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
 		return;
 
 	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
 }
+EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
 
 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
 {
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 4e028ee..fe66076 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -15,6 +15,7 @@
 				struct request **merged_request);
 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
 
 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
diff --git a/block/blk-stat.h b/block/blk-stat.h
index f4a1568..17b47a8 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -145,6 +145,11 @@
 	mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs));
 }
 
+static inline void blk_stat_deactivate(struct blk_stat_callback *cb)
+{
+	del_timer_sync(&cb->timer);
+}
+
 /**
  * blk_stat_activate_msecs() - Gather block statistics during a time window in
  * milliseconds.
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 8ac93fc..0c62bf4 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -760,8 +760,10 @@
 	if (!rqos)
 		return;
 	rwb = RQWB(rqos);
-	if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+	if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
+		blk_stat_deactivate(rwb->cb);
 		rwb->wb_normal = 0;
+	}
 }
 EXPORT_SYMBOL_GPL(wbt_disable_default);
 
diff --git a/block/blk.h b/block/blk.h
index 977d4b5..8c2a9cb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -54,15 +54,6 @@
 		lockdep_assert_held(q->queue_lock);
 }
 
-static inline void queue_flag_set_unlocked(unsigned int flag,
-					   struct request_queue *q)
-{
-	if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
-	    kref_read(&q->kobj.kref))
-		lockdep_assert_held(q->queue_lock);
-	__set_bit(flag, &q->queue_flags);
-}
-
 static inline void queue_flag_clear_unlocked(unsigned int flag,
 					     struct request_queue *q)
 {
diff --git a/block/elevator.c b/block/elevator.c
index fae58b2..a54870f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -422,7 +422,7 @@
 {
 	struct elevator_queue *e = q->elevator;
 	struct request *__rq;
-
+	enum elv_merge ret;
 	/*
 	 * Levels of merges:
 	 * 	nomerges:  No merges at all attempted
@@ -435,9 +435,11 @@
 	/*
 	 * First try one-hit cache.
 	 */
-	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
-		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
+	if (q->last_merge) {
+		if (!elv_bio_merge_ok(q->last_merge, bio))
+			return ELEVATOR_NO_MERGE;
 
+		ret = blk_try_merge(q->last_merge, bio);
 		if (ret != ELEVATOR_NO_MERGE) {
 			*req = q->last_merge;
 			return ret;
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 099a9e05..d5e21ce 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -373,9 +373,16 @@
 
 /*
  * One confusing aspect here is that we get called for a specific
- * hardware queue, but we return a request that may not be for a
+ * hardware queue, but we may return a request that is for a
  * different hardware queue. This is because mq-deadline has shared
  * state for all hardware queues, in terms of sorting, FIFOs, etc.
+ *
+ * For a zoned block device, __dd_dispatch_request() may return NULL
+ * if all the queued write requests are directed at zones that are already
+ * locked due to on-going write requests. In this case, make sure to mark
+ * the queue as needing a restart to ensure that the queue is run again
+ * and the pending writes dispatched once the target zones for the ongoing
+ * write requests are unlocked in dd_finish_request().
  */
 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 {
@@ -384,6 +391,9 @@
 
 	spin_lock(&dd->lock);
 	rq = __dd_dispatch_request(dd);
+	if (!rq && blk_queue_is_zoned(hctx->queue) &&
+	    !list_empty(&dd->fifo_list[WRITE]))
+		blk_mq_sched_mark_restart_hctx(hctx);
 	spin_unlock(&dd->lock);
 
 	return rq;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d3d14e8..5f8db5c5 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -249,9 +249,10 @@
 	.uevent		= part_uevent,
 };
 
-static void delete_partition_rcu_cb(struct rcu_head *head)
+static void delete_partition_work_fn(struct work_struct *work)
 {
-	struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
+	struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct,
+					rcu_work);
 
 	part->start_sect = 0;
 	part->nr_sects = 0;
@@ -262,7 +263,8 @@
 void __delete_partition(struct percpu_ref *ref)
 {
 	struct hd_struct *part = container_of(ref, struct hd_struct, ref);
-	call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+	INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn);
+	queue_rcu_work(system_wq, &part->rcu_work);
 }
 
 /*
diff --git a/build.config.cuttlefish.aarch64 b/build.config.cuttlefish.aarch64
index 5fb372d..fe921b4 100644
--- a/build.config.cuttlefish.aarch64
+++ b/build.config.cuttlefish.aarch64
@@ -6,7 +6,7 @@
 EXTRA_CMDS=''
 KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r346389b/bin
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r349610/bin
 LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
 FILES="
 arch/arm64/boot/Image.gz
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
index a81cb54..31e4057 100644
--- a/build.config.cuttlefish.x86_64
+++ b/build.config.cuttlefish.x86_64
@@ -6,7 +6,7 @@
 EXTRA_CMDS=''
 KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r346389b/bin
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r349610/bin
 LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
 FILES="
 arch/x86/boot/bzImage
diff --git a/crypto/Kconfig b/crypto/Kconfig
index dd2f67b..fdaa0dc 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1084,7 +1084,8 @@
 	  8 for decryption), this implementation only uses just two S-boxes of
 	  256 bytes each, and attempts to eliminate data dependent latencies by
 	  prefetching the entire table into the cache at the start of each
-	  block.
+	  block. Interrupts are also disabled to avoid races where cachelines
+	  are evicted when the CPU is interrupted to do something else.
 
 config CRYPTO_AES_586
 	tristate "AES cipher algorithms (i586)"
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 2dfcf12..5564e73 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -9,7 +9,7 @@
  * Adiantum is a tweakable, length-preserving encryption mode designed for fast
  * and secure disk encryption, especially on CPUs without dedicated crypto
  * instructions.  Adiantum encrypts each sector using the XChaCha12 stream
- * cipher, two passes of an ε-almost-∆-universal (εA∆U) hash function based on
+ * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on
  * NH and Poly1305, and an invocation of the AES-256 block cipher on a single
  * 16-byte block.  See the paper for details:
  *
@@ -21,12 +21,12 @@
  *	- Stream cipher: XChaCha12 or XChaCha20
  *	- Block cipher: any with a 128-bit block size and 256-bit key
  *
- * This implementation doesn't currently allow other εA∆U hash functions, i.e.
+ * This implementation doesn't currently allow other ε-∆U hash functions, i.e.
  * HPolyC is not supported.  This is because Adiantum is ~20% faster than HPolyC
- * but still provably as secure, and also the εA∆U hash function of HBSH is
+ * but still provably as secure, and also the ε-∆U hash function of HBSH is
  * formally defined to take two inputs (tweak, message) which makes it difficult
  * to wrap with the crypto_shash API.  Rather, some details need to be handled
- * here.  Nevertheless, if needed in the future, support for other εA∆U hash
+ * here.  Nevertheless, if needed in the future, support for other ε-∆U hash
  * functions could be added here.
  */
 
@@ -41,7 +41,7 @@
 #include "internal.h"
 
 /*
- * Size of right-hand block of input data, in bytes; also the size of the block
+ * Size of right-hand part of input data, in bytes; also the size of the block
  * cipher's block size and the hash function's output.
  */
 #define BLOCKCIPHER_BLOCK_SIZE		16
@@ -77,7 +77,7 @@
 struct adiantum_request_ctx {
 
 	/*
-	 * Buffer for right-hand block of data, i.e.
+	 * Buffer for right-hand part of data, i.e.
 	 *
 	 *    P_L => P_M => C_M => C_R when encrypting, or
 	 *    C_R => C_M => P_M => P_L when decrypting.
@@ -93,8 +93,8 @@
 	bool enc; /* true if encrypting, false if decrypting */
 
 	/*
-	 * The result of the Poly1305 εA∆U hash function applied to
-	 * (message length, tweak).
+	 * The result of the Poly1305 ε-∆U hash function applied to
+	 * (bulk length, tweak)
 	 */
 	le128 header_hash;
 
@@ -213,13 +213,16 @@
 }
 
 /*
- * Apply the Poly1305 εA∆U hash function to (message length, tweak) and save the
- * result to rctx->header_hash.
+ * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
+ * result to rctx->header_hash.  This is the calculation
  *
- * This value is reused in both the first and second hash steps.  Specifically,
- * it's added to the result of an independently keyed εA∆U hash function (for
- * equal length inputs only) taken over the message.  This gives the overall
- * Adiantum hash of the (tweak, message) pair.
+ *	H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
+ *
+ * from the procedure in section 6.4 of the Adiantum paper.  The resulting value
+ * is reused in both the first and second hash steps.  Specifically, it's added
+ * to the result of an independently keyed ε-∆U hash function (for equal length
+ * inputs only) taken over the left-hand part (the "bulk") of the message, to
+ * give the overall Adiantum hash of the (tweak, left-hand part) pair.
  */
 static void adiantum_hash_header(struct skcipher_request *req)
 {
@@ -248,7 +251,7 @@
 	poly1305_core_emit(&state, &rctx->header_hash);
 }
 
-/* Hash the left-hand block (the "bulk") of the message using NHPoly1305 */
+/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
 static int adiantum_hash_message(struct skcipher_request *req,
 				 struct scatterlist *sgl, le128 *digest)
 {
@@ -536,6 +539,8 @@
 	ictx = skcipher_instance_ctx(inst);
 
 	/* Stream cipher, e.g. "xchacha12" */
+	crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
+				  skcipher_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
 				   0, crypto_requires_sync(algt->type,
 							   algt->mask));
@@ -544,13 +549,15 @@
 	streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
 
 	/* Block cipher, e.g. "aes" */
+	crypto_set_spawn(&ictx->blockcipher_spawn,
+			 skcipher_crypto_instance(inst));
 	err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
 				CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
 	if (err)
 		goto out_drop_streamcipher;
 	blockcipher_alg = ictx->blockcipher_spawn.alg;
 
-	/* NHPoly1305 εA∆U hash function */
+	/* NHPoly1305 ε-∆U hash function */
 	_hash_alg = crypto_alg_mod_lookup(nhpoly1305_name,
 					  CRYPTO_ALG_TYPE_SHASH,
 					  CRYPTO_ALG_TYPE_MASK);
@@ -561,10 +568,8 @@
 	hash_alg = __crypto_shash_alg(_hash_alg);
 	err = crypto_init_shash_spawn(&ictx->hash_spawn, hash_alg,
 				      skcipher_crypto_instance(inst));
-	if (err) {
-		crypto_mod_put(_hash_alg);
-		goto out_drop_blockcipher;
-	}
+	if (err)
+		goto out_put_hash;
 
 	/* Check the set of algorithms */
 	if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
@@ -590,6 +595,8 @@
 		     hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_hash;
 
+	inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags &
+				   CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
 	inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
 	inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
@@ -619,10 +626,13 @@
 	if (err)
 		goto out_drop_hash;
 
+	crypto_mod_put(_hash_alg);
 	return 0;
 
 out_drop_hash:
 	crypto_drop_shash(&ictx->hash_spawn);
+out_put_hash:
+	crypto_mod_put(_hash_alg);
 out_drop_blockcipher:
 	crypto_drop_spawn(&ictx->blockcipher_spawn);
 out_drop_streamcipher:
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
index 03023b2..1ff9785 100644
--- a/crypto/aes_ti.c
+++ b/crypto/aes_ti.c
@@ -269,6 +269,7 @@
 	const u32 *rkp = ctx->key_enc + 4;
 	int rounds = 6 + ctx->key_length / 4;
 	u32 st0[4], st1[4];
+	unsigned long flags;
 	int round;
 
 	st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
@@ -276,6 +277,12 @@
 	st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
 	st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
 
+	/*
+	 * Temporarily disable interrupts to avoid races where cachelines are
+	 * evicted when the CPU is interrupted to do something else.
+	 */
+	local_irq_save(flags);
+
 	st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
 	st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
 	st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
@@ -300,6 +307,8 @@
 	put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
 	put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
 	put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
+
+	local_irq_restore(flags);
 }
 
 static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
@@ -308,6 +317,7 @@
 	const u32 *rkp = ctx->key_dec + 4;
 	int rounds = 6 + ctx->key_length / 4;
 	u32 st0[4], st1[4];
+	unsigned long flags;
 	int round;
 
 	st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
@@ -315,6 +325,12 @@
 	st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
 	st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
 
+	/*
+	 * Temporarily disable interrupts to avoid races where cachelines are
+	 * evicted when the CPU is interrupted to do something else.
+	 */
+	local_irq_save(flags);
+
 	st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
 	st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
 	st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
@@ -339,6 +355,8 @@
 	put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
 	put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
 	put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
+
+	local_irq_restore(flags);
 }
 
 static struct crypto_alg aes_alg = {
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 17eb09d..ec78a04 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -122,8 +122,10 @@
 
 int af_alg_release(struct socket *sock)
 {
-	if (sock->sk)
+	if (sock->sk) {
 		sock_put(sock->sk);
+		sock->sk = NULL;
+	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(af_alg_release);
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 37f54d1..4be293a 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -58,14 +58,22 @@
 		return -EINVAL;
 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
 		return -EINVAL;
-	if (RTA_PAYLOAD(rta) < sizeof(*param))
+
+	/*
+	 * RTA_OK() didn't align the rtattr's payload when validating that it
+	 * fits in the buffer.  Yet, the keys should start on the next 4-byte
+	 * aligned boundary.  To avoid confusion, require that the rtattr
+	 * payload be exactly the param struct, which has a 4-byte aligned size.
+	 */
+	if (RTA_PAYLOAD(rta) != sizeof(*param))
 		return -EINVAL;
+	BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
 
 	param = RTA_DATA(rta);
 	keys->enckeylen = be32_to_cpu(param->enckeylen);
 
-	key += RTA_ALIGN(rta->rta_len);
-	keylen -= RTA_ALIGN(rta->rta_len);
+	key += rta->rta_len;
+	keylen -= rta->rta_len;
 
 	if (keylen < keys->enckeylen)
 		return -EINVAL;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 80a25cc..4741fe8 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -279,7 +279,7 @@
 	struct aead_request *req = areq->data;
 
 	err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
-	aead_request_complete(req, err);
+	authenc_esn_request_complete(req, err);
 }
 
 static int crypto_authenc_esn_decrypt(struct aead_request *req)
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 20987d0..e81e456 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -144,7 +144,7 @@
 
 	do {
 		crypto_cfb_encrypt_one(tfm, iv, dst);
-		crypto_xor(dst, iv, bsize);
+		crypto_xor(dst, src, bsize);
 		iv = src;
 
 		src += bsize;
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 8facafd..adcce31 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -842,15 +842,23 @@
 
 static void ecc_point_mult(struct ecc_point *result,
 			   const struct ecc_point *point, const u64 *scalar,
-			   u64 *initial_z, u64 *curve_prime,
+			   u64 *initial_z, const struct ecc_curve *curve,
 			   unsigned int ndigits)
 {
 	/* R0 and R1 */
 	u64 rx[2][ECC_MAX_DIGITS];
 	u64 ry[2][ECC_MAX_DIGITS];
 	u64 z[ECC_MAX_DIGITS];
+	u64 sk[2][ECC_MAX_DIGITS];
+	u64 *curve_prime = curve->p;
 	int i, nb;
-	int num_bits = vli_num_bits(scalar, ndigits);
+	int num_bits;
+	int carry;
+
+	carry = vli_add(sk[0], scalar, curve->n, ndigits);
+	vli_add(sk[1], sk[0], curve->n, ndigits);
+	scalar = sk[!carry];
+	num_bits = sizeof(u64) * ndigits * 8 + 1;
 
 	vli_set(rx[1], point->x, ndigits);
 	vli_set(ry[1], point->y, ndigits);
@@ -1004,7 +1012,7 @@
 		goto out;
 	}
 
-	ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
+	ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
 	if (ecc_point_is_zero(pk)) {
 		ret = -EAGAIN;
 		goto err_free_point;
@@ -1090,7 +1098,7 @@
 		goto err_alloc_product;
 	}
 
-	ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
+	ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
 
 	ecc_swap_digits(product->x, secret, ndigits);
 
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index c838585..ec831a5 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -9,15 +9,15 @@
  * "NHPoly1305" is the main component of Adiantum hashing.
  * Specifically, it is the calculation
  *
- *	H_M ← Poly1305_{K_M}(NH_{K_N}(pad_{128}(M)))
+ *	H_L ← Poly1305_{K_L}(NH_{K_N}(pad_{128}(L)))
  *
- * from the procedure in section A.5 of the Adiantum paper [1].  It is an
- * ε-almost-∆-universal (εA∆U) hash function for equal-length inputs over
+ * from the procedure in section 6.4 of the Adiantum paper [1].  It is an
+ * ε-almost-∆-universal (ε-∆U) hash function for equal-length inputs over
  * Z/(2^{128}Z), where the "∆" operation is addition.  It hashes 1024-byte
  * chunks of the input with the NH hash function [2], reducing the input length
  * by 32x.  The resulting NH digests are evaluated as a polynomial in
  * GF(2^{130}-5), like in the Poly1305 MAC [3].  Note that the polynomial
- * evaluation by itself would suffice to achieve the εA∆U property; NH is used
+ * evaluation by itself would suffice to achieve the ε-∆U property; NH is used
  * for performance since it's over twice as fast as Poly1305.
  *
  * This is *not* a cryptographic hash function; do not use it as such!
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9a5c60f..c0cf87a 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -100,7 +100,7 @@
 
 	for (i = 0; i <= 63; i++) {
 
-		ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
+		ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
 
 		ss2 = ss1 ^ rol32(a, 12);
 
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a129c12..740ef57 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1736,6 +1736,7 @@
 		ret += tcrypt_test("xts(aes)");
 		ret += tcrypt_test("ctr(aes)");
 		ret += tcrypt_test("rfc3686(ctr(aes))");
+		ret += tcrypt_test("cfb(aes)");
 		break;
 
 	case 11:
@@ -2062,6 +2063,10 @@
 				speed_template_16_24_32);
 		test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
 				speed_template_16_24_32);
+		test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
+				speed_template_16_24_32);
+		test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
+				speed_template_16_24_32);
 		break;
 
 	case 201:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index a843ae2..1ffa4b3 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2697,6 +2697,13 @@
 			}
 		}
 	}, {
+		.alg = "cfb(aes)",
+		.test = alg_test_skcipher,
+		.fips_allowed = 1,
+		.suite = {
+			.cipher = __VECS(aes_cfb_tv_template)
+		},
+	}, {
 		.alg = "chacha20",
 		.test = alg_test_skcipher,
 		.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index e2c259f..dce4bca 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -12575,6 +12575,82 @@
 	},
 };
 
+static const struct cipher_testvec aes_cfb_tv_template[] = {
+	{ /* From NIST SP800-38A */
+		.key	= "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+			  "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+		.klen	= 16,
+		.iv	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ptext	= "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+			  "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+			  "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+			  "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+			  "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+			  "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.ctext	= "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+			  "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+			  "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
+			  "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
+			  "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
+			  "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
+			  "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
+			  "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
+		.len	= 64,
+	}, {
+		.key	= "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+			  "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+			  "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+		.klen	= 24,
+		.iv	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ptext	= "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+			  "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+			  "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+			  "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+			  "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+			  "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.ctext	= "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
+			  "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
+			  "\x67\xce\x7f\x7f\x81\x17\x36\x21"
+			  "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
+			  "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
+			  "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
+			  "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
+			  "\x42\xae\x8f\xba\x58\x4b\x09\xff",
+		.len	= 64,
+	}, {
+		.key	= "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+			  "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+			  "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+			  "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen	= 32,
+		.iv	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ptext	= "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+			  "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+			  "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+			  "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+			  "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+			  "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.ctext	= "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
+			  "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
+			  "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
+			  "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
+			  "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
+			  "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
+			  "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
+			  "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
+		.len	= 64,
+	},
+};
+
 static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
 	{ /* Input data from RFC 2410 Case 1 */
 #ifdef __LITTLE_ENDIAN
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 7e8029c..b37e4b5 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -57,6 +57,8 @@
 
 source "drivers/i2c/Kconfig"
 
+source "drivers/i3c/Kconfig"
+
 source "drivers/spi/Kconfig"
 
 source "drivers/spmi/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index abf600a..120f0a0 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -114,7 +114,7 @@
 obj-$(CONFIG_GAMEPORT)		+= input/gameport/
 obj-$(CONFIG_INPUT)		+= input/
 obj-$(CONFIG_RTC_LIB)		+= rtc/
-obj-y				+= i2c/ media/
+obj-y				+= i2c/ i3c/ media/
 obj-$(CONFIG_PPS)		+= pps/
 obj-y				+= ptp/
 obj-$(CONFIG_W1)		+= w1/
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 02c6fd9..f008ba7 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -691,6 +691,8 @@
 {
 	__ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus);
 
+	ghes_clear_estatus(ghes);
+
 	/* reboot to log the error! */
 	if (!panic_timeout)
 		panic_timeout = ghes_panic_timeout;
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e938576..e48eebc 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -951,9 +951,10 @@
 {
 	struct acpi_iort_node *node;
 	struct acpi_iort_root_complex *rc;
+	struct pci_bus *pbus = to_pci_dev(dev)->bus;
 
 	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
-			      iort_match_node_callback, dev);
+			      iort_match_node_callback, &pbus->dev);
 	if (!node || node->revision < 1)
 		return -ENODEV;
 
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 75b331f..f530d35 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -391,6 +391,32 @@
 	return id;
 }
 
+static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
+		struct nd_cmd_pkg *call_pkg)
+{
+	if (call_pkg) {
+		int i;
+
+		if (nfit_mem->family != call_pkg->nd_family)
+			return -ENOTTY;
+
+		for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
+			if (call_pkg->nd_reserved2[i])
+				return -EINVAL;
+		return call_pkg->nd_command;
+	}
+
+	/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
+	if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+		return cmd;
+
+	/*
+	 * Force function number validation to fail since 0 is never
+	 * published as a valid function in dsm_mask.
+	 */
+	return 0;
+}
+
 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
 		unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
 {
@@ -404,30 +430,23 @@
 	unsigned long cmd_mask, dsm_mask;
 	u32 offset, fw_status = 0;
 	acpi_handle handle;
-	unsigned int func;
 	const guid_t *guid;
-	int rc, i;
+	int func, rc, i;
 
 	if (cmd_rc)
 		*cmd_rc = -EINVAL;
-	func = cmd;
-	if (cmd == ND_CMD_CALL) {
-		call_pkg = buf;
-		func = call_pkg->nd_command;
-
-		for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
-			if (call_pkg->nd_reserved2[i])
-				return -EINVAL;
-	}
 
 	if (nvdimm) {
 		struct acpi_device *adev = nfit_mem->adev;
 
 		if (!adev)
 			return -ENOTTY;
-		if (call_pkg && nfit_mem->family != call_pkg->nd_family)
-			return -ENOTTY;
 
+		if (cmd == ND_CMD_CALL)
+			call_pkg = buf;
+		func = cmd_to_func(nfit_mem, cmd, call_pkg);
+		if (func < 0)
+			return func;
 		dimm_name = nvdimm_name(nvdimm);
 		cmd_name = nvdimm_cmd_name(cmd);
 		cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -438,6 +457,7 @@
 	} else {
 		struct acpi_device *adev = to_acpi_dev(acpi_desc);
 
+		func = cmd;
 		cmd_name = nvdimm_bus_cmd_name(cmd);
 		cmd_mask = nd_desc->cmd_mask;
 		dsm_mask = cmd_mask;
@@ -452,7 +472,13 @@
 	if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
 		return -ENOTTY;
 
-	if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
+	/*
+	 * Check for a valid command.  For ND_CMD_CALL, we also have to
+	 * make sure that the DSM function is supported.
+	 */
+	if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
+		return -ENOTTY;
+	else if (!test_bit(cmd, &cmd_mask))
 		return -ENOTTY;
 
 	in_obj.type = ACPI_TYPE_PACKAGE;
@@ -693,6 +719,7 @@
 	struct acpi_nfit_memory_map *memdev;
 	struct acpi_nfit_desc *acpi_desc;
 	struct nfit_mem *nfit_mem;
+	u16 physical_id;
 
 	mutex_lock(&acpi_desc_lock);
 	list_for_each_entry(acpi_desc, &acpi_descs, list) {
@@ -700,10 +727,11 @@
 		list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
 			memdev = __to_nfit_memdev(nfit_mem);
 			if (memdev->device_handle == device_handle) {
+				*flags = memdev->flags;
+				physical_id = memdev->physical_id;
 				mutex_unlock(&acpi_desc->init_mutex);
 				mutex_unlock(&acpi_desc_lock);
-				*flags = memdev->flags;
-				return memdev->physical_id;
+				return physical_id;
 			}
 		}
 		mutex_unlock(&acpi_desc->init_mutex);
@@ -1764,6 +1792,13 @@
 		return 0;
 	}
 
+	/*
+	 * Function 0 is the command interrogation function, don't
+	 * export it to potential userspace use, and enable it to be
+	 * used as an error value in acpi_nfit_ctl().
+	 */
+	dsm_mask &= ~1UL;
+
 	guid = to_nfit_uuid(nfit_mem->family);
 	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
 		if (acpi_check_dsm(adev_dimm->handle, guid,
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 8516760..0da58f0 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -147,9 +147,9 @@
 		{
 			struct acpi_srat_mem_affinity *p =
 			    (struct acpi_srat_mem_affinity *)header;
-			pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
-				 (unsigned long)p->base_address,
-				 (unsigned long)p->length,
+			pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
+				 (unsigned long long)p->base_address,
+				 (unsigned long long)p->length,
 				 p->proximity_domain,
 				 (p->flags & ACPI_SRAT_MEM_ENABLED) ?
 				 "enabled" : "disabled",
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 316e551..bb5391f 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -27,8 +27,11 @@
 #define GPI1_LDO_ON		(3 << 0)
 #define GPI1_LDO_OFF		(4 << 0)
 
-#define AXP288_ADC_TS_PIN_GPADC	0xf2
-#define AXP288_ADC_TS_PIN_ON	0xf3
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK		GENMASK(1, 0)
+#define AXP288_ADC_TS_CURRENT_OFF			(0 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING		(1 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND		(2 << 0)
+#define AXP288_ADC_TS_CURRENT_ON			(3 << 0)
 
 static struct pmic_table power_table[] = {
 	{
@@ -211,22 +214,44 @@
  */
 static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
 {
+	int ret, adc_ts_pin_ctrl;
 	u8 buf[2];
-	int ret;
 
-	ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
-			   AXP288_ADC_TS_PIN_GPADC);
+	/*
+	 * The current-source used for the battery temp-sensor (TS) is shared
+	 * with the GPADC. For proper fuel-gauge and charger operation the TS
+	 * current-source needs to be permanently on. But to read the GPADC we
+	 * need to temporary switch the TS current-source to ondemand, so that
+	 * the GPADC can use it, otherwise we will always read an all 0 value.
+	 *
+	 * Note that the switching from on to on-ondemand is not necessary
+	 * when the TS current-source is off (this happens on devices which
+	 * do not use the TS-pin).
+	 */
+	ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl);
 	if (ret)
 		return ret;
 
-	/* After switching to the GPADC pin give things some time to settle */
-	usleep_range(6000, 10000);
+	if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
+		ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
+					 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+					 AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
+		if (ret)
+			return ret;
+
+		/* Wait a bit after switching the current-source */
+		usleep_range(6000, 10000);
+	}
 
 	ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
 	if (ret == 0)
 		ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
 
-	regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
+	if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
+		regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
+				   AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+				   AXP288_ADC_TS_CURRENT_ON);
+	}
 
 	return ret;
 }
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1b475bc..665e93c 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -131,6 +131,23 @@
 	}
 }
 
+static bool acpi_power_resource_is_dup(union acpi_object *package,
+				       unsigned int start, unsigned int i)
+{
+	acpi_handle rhandle, dup;
+	unsigned int j;
+
+	/* The caller is expected to check the package element types */
+	rhandle = package->package.elements[i].reference.handle;
+	for (j = start; j < i; j++) {
+		dup = package->package.elements[j].reference.handle;
+		if (dup == rhandle)
+			return true;
+	}
+
+	return false;
+}
+
 int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
 				 struct list_head *list)
 {
@@ -150,6 +167,11 @@
 			err = -ENODEV;
 			break;
 		}
+
+		/* Some ACPI tables contain duplicate power resource references */
+		if (acpi_power_resource_is_dup(package, start, i))
+			continue;
+
 		err = acpi_add_power_resource(rhandle);
 		if (err)
 			break;
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 9d52743..c336784 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -148,6 +148,13 @@
 	}
 
 	switch (table->baud_rate) {
+	case 0:
+		/*
+		 * SPCR 1.04 defines 0 as a preconfigured state of UART.
+		 * Assume firmware or bootloader configures console correctly.
+		 */
+		baud_rate = 0;
+		break;
 	case 3:
 		baud_rate = 9600;
 		break;
@@ -196,6 +203,10 @@
 		 * UART so don't attempt to change to the baud rate state
 		 * in the table because driver cannot calculate the dividers
 		 */
+		baud_rate = 0;
+	}
+
+	if (!baud_rate) {
 		snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype,
 			 table->serial_port.address);
 	} else {
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 7690884..68fcda4 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -358,6 +358,7 @@
  * @min_priority:         minimum scheduling priority
  *                        (invariant after initialized)
  * @inherit_rt:           inherit RT scheduling policy from caller
+ * @txn_security_ctx:     require sender's security context
  *                        (invariant after initialized)
  * @async_todo:           list of async work items
  *                        (protected by @proc->inner_lock)
@@ -397,6 +398,7 @@
 		u8 sched_policy:2;
 		u8 inherit_rt:1;
 		u8 accept_fds:1;
+		u8 txn_security_ctx:1;
 		u8 min_priority;
 	};
 	bool has_async_transaction;
@@ -654,6 +656,7 @@
 	struct binder_priority	saved_priority;
 	bool    set_priority_called;
 	kuid_t	sender_euid;
+	binder_uintptr_t security_ctx;
 	/**
 	 * @lock:  protects @from, @to_proc, and @to_thread
 	 *
@@ -1363,6 +1366,7 @@
 	node->min_priority = to_kernel_prio(node->sched_policy, priority);
 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
 	node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
+	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
 	spin_lock_init(&node->lock);
 	INIT_LIST_HEAD(&node->work.entry);
 	INIT_LIST_HEAD(&node->async_todo);
@@ -2900,6 +2904,8 @@
 	binder_size_t last_fixup_min_off = 0;
 	struct binder_context *context = proc->context;
 	int t_debug_id = atomic_inc_return(&binder_last_id);
+	char *secctx = NULL;
+	u32 secctx_sz = 0;
 
 	e = binder_transaction_log_add(&binder_transaction_log);
 	e->debug_id = t_debug_id;
@@ -3123,6 +3129,20 @@
 		t->priority = target_proc->default_priority;
 	}
 
+	if (target_node && target_node->txn_security_ctx) {
+		u32 secid;
+
+		security_task_getsecid(proc->tsk, &secid);
+		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
+		if (ret) {
+			return_error = BR_FAILED_REPLY;
+			return_error_param = ret;
+			return_error_line = __LINE__;
+			goto err_get_secctx_failed;
+		}
+		extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
+	}
+
 	trace_binder_transaction(reply, t, target_node);
 
 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@@ -3139,6 +3159,19 @@
 		t->buffer = NULL;
 		goto err_binder_alloc_buf_failed;
 	}
+	if (secctx) {
+		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
+				    ALIGN(tr->offsets_size, sizeof(void *)) +
+				    ALIGN(extra_buffers_size, sizeof(void *)) -
+				    ALIGN(secctx_sz, sizeof(u64));
+		char *kptr = t->buffer->data + buf_offset;
+
+		t->security_ctx = (uintptr_t)kptr +
+		    binder_alloc_get_user_buffer_offset(&target_proc->alloc);
+		memcpy(kptr, secctx, secctx_sz);
+		security_release_secctx(secctx, secctx_sz);
+		secctx = NULL;
+	}
 	t->buffer->debug_id = t->debug_id;
 	t->buffer->transaction = t;
 	t->buffer->target_node = target_node;
@@ -3409,6 +3442,9 @@
 	t->buffer->transaction = NULL;
 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
 err_binder_alloc_buf_failed:
+	if (secctx)
+		security_release_secctx(secctx, secctx_sz);
+err_get_secctx_failed:
 	kfree(tcomplete);
 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 err_alloc_tcomplete_failed:
@@ -4055,11 +4091,13 @@
 
 	while (1) {
 		uint32_t cmd;
-		struct binder_transaction_data tr;
+		struct binder_transaction_data_secctx tr;
+		struct binder_transaction_data *trd = &tr.transaction_data;
 		struct binder_work *w = NULL;
 		struct list_head *list = NULL;
 		struct binder_transaction *t = NULL;
 		struct binder_thread *t_from;
+		size_t trsize = sizeof(*trd);
 
 		binder_inner_proc_lock(proc);
 		if (!binder_worklist_empty_ilocked(&thread->todo))
@@ -4255,41 +4293,47 @@
 			struct binder_node *target_node = t->buffer->target_node;
 			struct binder_priority node_prio;
 
-			tr.target.ptr = target_node->ptr;
-			tr.cookie =  target_node->cookie;
+			trd->target.ptr = target_node->ptr;
+			trd->cookie =  target_node->cookie;
 			node_prio.sched_policy = target_node->sched_policy;
 			node_prio.prio = target_node->min_priority;
 			binder_transaction_priority(current, t, node_prio,
 						    target_node->inherit_rt);
 			cmd = BR_TRANSACTION;
 		} else {
-			tr.target.ptr = 0;
-			tr.cookie = 0;
+			trd->target.ptr = 0;
+			trd->cookie = 0;
 			cmd = BR_REPLY;
 		}
-		tr.code = t->code;
-		tr.flags = t->flags;
-		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+		trd->code = t->code;
+		trd->flags = t->flags;
+		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
 
 		t_from = binder_get_txn_from(t);
 		if (t_from) {
 			struct task_struct *sender = t_from->proc->tsk;
 
-			tr.sender_pid = task_tgid_nr_ns(sender,
-							task_active_pid_ns(current));
+			trd->sender_pid =
+				task_tgid_nr_ns(sender,
+						task_active_pid_ns(current));
 		} else {
-			tr.sender_pid = 0;
+			trd->sender_pid = 0;
 		}
 
-		tr.data_size = t->buffer->data_size;
-		tr.offsets_size = t->buffer->offsets_size;
-		tr.data.ptr.buffer = (binder_uintptr_t)
+		trd->data_size = t->buffer->data_size;
+		trd->offsets_size = t->buffer->offsets_size;
+		trd->data.ptr.buffer = (binder_uintptr_t)
 			((uintptr_t)t->buffer->data +
 			binder_alloc_get_user_buffer_offset(&proc->alloc));
-		tr.data.ptr.offsets = tr.data.ptr.buffer +
+		trd->data.ptr.offsets = trd->data.ptr.buffer +
 					ALIGN(t->buffer->data_size,
 					    sizeof(void *));
 
+		tr.secctx = t->security_ctx;
+		if (t->security_ctx) {
+			cmd = BR_TRANSACTION_SEC_CTX;
+			trsize = sizeof(tr);
+		}
 		if (put_user(cmd, (uint32_t __user *)ptr)) {
 			if (t_from)
 				binder_thread_dec_tmpref(t_from);
@@ -4300,7 +4344,7 @@
 			return -EFAULT;
 		}
 		ptr += sizeof(uint32_t);
-		if (copy_to_user(ptr, &tr, sizeof(tr))) {
+		if (copy_to_user(ptr, &tr, trsize)) {
 			if (t_from)
 				binder_thread_dec_tmpref(t_from);
 
@@ -4309,7 +4353,7 @@
 
 			return -EFAULT;
 		}
-		ptr += sizeof(tr);
+		ptr += trsize;
 
 		trace_binder_transaction_received(t);
 		binder_stat_br(proc, thread, cmd);
@@ -4317,16 +4361,18 @@
 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
 			     proc->pid, thread->pid,
 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
-			     "BR_REPLY",
+				(cmd == BR_TRANSACTION_SEC_CTX) ?
+				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
 			     t->debug_id, t_from ? t_from->proc->pid : 0,
 			     t_from ? t_from->pid : 0, cmd,
 			     t->buffer->data_size, t->buffer->offsets_size,
-			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
+			     (u64)trd->data.ptr.buffer,
+			     (u64)trd->data.ptr.offsets);
 
 		if (t_from)
 			binder_thread_dec_tmpref(t_from);
 		t->buffer->allow_user_free = 1;
-		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
 			binder_inner_proc_lock(thread->proc);
 			t->to_parent = thread->transaction_stack;
 			t->to_thread = thread;
@@ -4671,7 +4717,8 @@
 	return ret;
 }
 
-static int binder_ioctl_set_ctx_mgr(struct file *filp)
+static int binder_ioctl_set_ctx_mgr(struct file *filp,
+				    struct flat_binder_object *fbo)
 {
 	int ret = 0;
 	struct binder_proc *proc = filp->private_data;
@@ -4700,7 +4747,7 @@
 	} else {
 		context->binder_context_mgr_uid = curr_euid;
 	}
-	new_node = binder_new_node(proc, NULL);
+	new_node = binder_new_node(proc, fbo);
 	if (!new_node) {
 		ret = -ENOMEM;
 		goto out;
@@ -4823,8 +4870,20 @@
 		binder_inner_proc_unlock(proc);
 		break;
 	}
+	case BINDER_SET_CONTEXT_MGR_EXT: {
+		struct flat_binder_object fbo;
+
+		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
+			ret = -EINVAL;
+			goto err;
+		}
+		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
+		if (ret)
+			goto err;
+		break;
+	}
 	case BINDER_SET_CONTEXT_MGR:
-		ret = binder_ioctl_set_ctx_mgr(filp);
+		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
 		if (ret)
 			goto err;
 		break;
@@ -5474,6 +5533,9 @@
 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
 		struct binder_node *node = rb_entry(n, struct binder_node,
 						    rb_node);
+		if (!print_all && !node->has_async_transaction)
+			continue;
+
 		/*
 		 * take a temporary reference on the node so it
 		 * survives and isn't removed from the tree
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b8c3f9e..adf2878 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4554,6 +4554,7 @@
 	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
 	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
 	{ "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM, },
+	{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
 
 	/* devices that don't properly handle queued TRIM commands */
 	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 10ecb232..03867f5 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -895,7 +895,9 @@
 	int ret = 0;
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0)
+	if (irq < 0)
+		return irq;
+	if (!irq)
 		return -EINVAL;
 
 	priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 29f102d..329ce90 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -717,7 +717,7 @@
 			instead of '/ 512', use '>> 9' to prevent a call
 			to divdu3 on x86 platforms
 		*/
-		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
+		rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
 
 		if (rate_cps < 10)
 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 81c22d2..60e0b77 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -538,6 +538,9 @@
 	}
 	case 'x':	/* gotoxy : LxXXX[yYYY]; */
 	case 'y':	/* gotoxy : LyYYY[xXXX]; */
+		if (priv->esc_seq.buf[priv->esc_seq.len - 1] != ';')
+			break;
+
 		/* If the command is valid, move to the new address */
 		if (parse_xy(esc, &priv->addr.x, &priv->addr.y))
 			charlcd_gotoxy(lcd);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 8bfd27e..e06a579 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -31,6 +31,9 @@
 
 #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
 
+#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+	struct driver_attribute driver_attr_##_name =		\
+		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
 
 static int __must_check bus_rescan_devices_helper(struct device *dev,
 						void *data);
@@ -195,7 +198,7 @@
 	bus_put(bus);
 	return err;
 }
-static DRIVER_ATTR_WO(unbind);
+static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
 
 /*
  * Manually attach a device to a driver.
@@ -231,7 +234,7 @@
 	bus_put(bus);
 	return err;
 }
-static DRIVER_ATTR_WO(bind);
+static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
 
 static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
 {
@@ -611,8 +614,10 @@
 static ssize_t uevent_store(struct device_driver *drv, const char *buf,
 			    size_t count)
 {
-	kobject_synth_uevent(&drv->p->kobj, buf, count);
-	return count;
+	int rc;
+
+	rc = kobject_synth_uevent(&drv->p->kobj, buf, count);
+	return rc ? rc : count;
 }
 static DRIVER_ATTR_WO(uevent);
 
@@ -828,8 +833,10 @@
 static ssize_t bus_uevent_store(struct bus_type *bus,
 				const char *buf, size_t count)
 {
-	kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
-	return count;
+	int rc;
+
+	rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
+	return rc ? rc : count;
 }
 static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
 
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 5d5b598..dd6a685 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -79,8 +79,7 @@
 	ct_idx = get_cacheinfo_idx(this_leaf->type);
 	propname = cache_type_info[ct_idx].size_prop;
 
-	if (of_property_read_u32(np, propname, &this_leaf->size))
-		this_leaf->size = 0;
+	of_property_read_u32(np, propname, &this_leaf->size);
 }
 
 /* not cache_line_size() because that's a macro in include/linux/cache.h */
@@ -114,8 +113,7 @@
 	ct_idx = get_cacheinfo_idx(this_leaf->type);
 	propname = cache_type_info[ct_idx].nr_sets_prop;
 
-	if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
-		this_leaf->number_of_sets = 0;
+	of_property_read_u32(np, propname, &this_leaf->number_of_sets);
 }
 
 static void cache_associativity(struct cacheinfo *this_leaf)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index e7662d1..40c4d48 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1072,8 +1072,14 @@
 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	if (kobject_synth_uevent(&dev->kobj, buf, count))
+	int rc;
+
+	rc = kobject_synth_uevent(&dev->kobj, buf, count);
+
+	if (rc) {
 		dev_err(dev, "uevent: failed to send synthetic uevent\n");
+		return rc;
+	}
 
 	return count;
 }
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index c7aed86..0ef8197 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -211,6 +211,59 @@
 
 #endif
 
+static ssize_t show_sched_load_boost(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	ssize_t rc;
+	unsigned int boost;
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int cpuid = cpu->dev.id;
+
+	boost = per_cpu(sched_load_boost, cpuid);
+	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", boost);
+
+	return rc;
+}
+
+static ssize_t __ref store_sched_load_boost(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int err;
+	int boost;
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int cpuid = cpu->dev.id;
+
+	err = kstrtoint(strstrip((char *)buf), 0, &boost);
+	if (err)
+		return err;
+
+	/*
+	 * -100 is low enough to cancel out CPU's load and make it near zro.
+	 * 1000 is close to the maximum value that cpu_util_freq_{walt,pelt}
+	 * can take without overflow.
+	 */
+	if (boost < -100 || boost > 1000)
+		return -EINVAL;
+
+	per_cpu(sched_load_boost, cpuid) = boost;
+
+	return count;
+}
+
+static DEVICE_ATTR(sched_load_boost, 0644,
+		   show_sched_load_boost,
+		   store_sched_load_boost);
+
+static struct attribute *sched_cpu_attrs[] = {
+	&dev_attr_sched_load_boost.attr,
+	NULL
+};
+
+static struct attribute_group sched_cpu_attr_group = {
+	.attrs = sched_cpu_attrs,
+};
+
 static const struct attribute_group *common_cpu_attr_groups[] = {
 #ifdef CONFIG_KEXEC
 	&crash_note_cpu_attr_group,
@@ -218,6 +271,7 @@
 #ifdef CONFIG_HOTPLUG_CPU
 	&cpu_isolated_attr_group,
 #endif
+	&sched_cpu_attr_group,
 	NULL
 };
 
@@ -228,6 +282,7 @@
 #ifdef CONFIG_HOTPLUG_CPU
 	&cpu_isolated_attr_group,
 #endif
+	&sched_cpu_attr_group,
 	NULL
 };
 
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 0bf3f75..e378af5 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -947,16 +947,13 @@
 
 	drv = dev->driver;
 	if (drv) {
-		if (driver_allows_async_probing(drv))
-			async_synchronize_full();
-
 		while (device_links_busy(dev)) {
 			device_unlock(dev);
-			if (parent)
+			if (parent && dev->bus->need_parent_lock)
 				device_unlock(parent);
 
 			device_links_unbind_consumers(dev);
-			if (parent)
+			if (parent && dev->bus->need_parent_lock)
 				device_lock(parent);
 
 			device_lock(dev);
@@ -987,9 +984,9 @@
 			drv->remove(dev);
 
 		device_links_driver_cleanup(dev);
-		dma_deconfigure(dev);
 
 		devres_release_all(dev);
+		dma_deconfigure(dev);
 		dev->driver = NULL;
 		dev_set_drvdata(dev, NULL);
 		if (dev->pm_domain && dev->pm_domain->dismiss)
@@ -1055,6 +1052,9 @@
 	struct device_private *dev_prv;
 	struct device *dev;
 
+	if (driver_allows_async_probing(drv))
+		async_synchronize_full();
+
 	for (;;) {
 		spin_lock(&drv->p->klist_devices.k_lock);
 		if (list_empty(&drv->p->klist_devices.k_list)) {
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index f98a097..d68b52c 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -24,8 +24,14 @@
 
 struct devres {
 	struct devres_node		node;
-	/* -- 3 pointers */
-	unsigned long long		data[];	/* guarantee ull alignment */
+	/*
+	 * Some archs want to perform DMA into kmalloc caches
+	 * and need a guaranteed alignment larger than
+	 * the alignment of a 64-bit integer.
+	 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+	 * buffer alignment as if it was allocated by plain kmalloc().
+	 */
+	u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
 };
 
 struct devres_group {
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index a2e59a9..b4a1e88 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -498,12 +498,12 @@
 }
 #endif
 
-static DEVICE_ATTR_RO(phys_index, 0444, phys_index_show, NULL);
+static DEVICE_ATTR(phys_index, 0444, phys_index_show, NULL);
 static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
 static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
 static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
 #ifdef CONFIG_MEMORY_HOTPLUG
-static DEVICE_ATTR_RO(allocated_bytes, 0444, allocated_bytes_show, NULL);
+static DEVICE_ATTR(allocated_bytes, 0444, allocated_bytes_show, NULL);
 #endif
 
 /*
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 60d6cc6..6d54905 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -366,14 +366,16 @@
 			      unsigned int nvec)
 {
 	struct platform_msi_priv_data *data = domain->host_data;
-	struct msi_desc *desc;
-	for_each_msi_entry(desc, data->dev) {
+	struct msi_desc *desc, *tmp;
+	for_each_msi_entry_safe(desc, tmp, data->dev) {
 		if (WARN_ON(!desc->irq || desc->nvec_used != 1))
 			return;
 		if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
 			continue;
 
 		irq_domain_free_irqs_common(domain, desc->irq, 1);
+		list_del(&desc->list);
+		free_msi_entry(desc);
 	}
 }
 
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index d15703b1f..7145031 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -668,14 +668,15 @@
 		if (rv == SS_TWO_PRIMARIES) {
 			/* Maybe the peer is detected as dead very soon...
 			   retry at most once more in this case. */
-			int timeo;
-			rcu_read_lock();
-			nc = rcu_dereference(connection->net_conf);
-			timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
-			rcu_read_unlock();
-			schedule_timeout_interruptible(timeo);
-			if (try < max_tries)
+			if (try < max_tries) {
+				int timeo;
 				try = max_tries - 1;
+				rcu_read_lock();
+				nc = rcu_dereference(connection->net_conf);
+				timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
+				rcu_read_unlock();
+				schedule_timeout_interruptible(timeo);
+			}
 			continue;
 		}
 		if (rv < SS_SUCCESS) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index fc67fd8..81f9bd6 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3364,7 +3364,7 @@
 	enum drbd_conns rv = C_MASK;
 	enum drbd_disk_state mydisk;
 	struct net_conf *nc;
-	int hg, rule_nr, rr_conflict, tentative;
+	int hg, rule_nr, rr_conflict, tentative, always_asbp;
 
 	mydisk = device->state.disk;
 	if (mydisk == D_NEGOTIATING)
@@ -3415,8 +3415,12 @@
 
 	rcu_read_lock();
 	nc = rcu_dereference(peer_device->connection->net_conf);
+	always_asbp = nc->always_asbp;
+	rr_conflict = nc->rr_conflict;
+	tentative = nc->tentative;
+	rcu_read_unlock();
 
-	if (hg == 100 || (hg == -100 && nc->always_asbp)) {
+	if (hg == 100 || (hg == -100 && always_asbp)) {
 		int pcount = (device->state.role == R_PRIMARY)
 			   + (peer_role == R_PRIMARY);
 		int forced = (hg == -100);
@@ -3455,9 +3459,6 @@
 			     "Sync from %s node\n",
 			     (hg < 0) ? "peer" : "this");
 	}
-	rr_conflict = nc->rr_conflict;
-	tentative = nc->tentative;
-	rcu_read_unlock();
 
 	if (hg == -100) {
 		/* FIXME this log message is not correct if we end up here
@@ -4142,7 +4143,7 @@
 	kfree(device->p_uuid);
 	device->p_uuid = p_uuid;
 
-	if (device->state.conn < C_CONNECTED &&
+	if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
 	    device->state.disk < D_INCONSISTENT &&
 	    device->state.role == R_PRIMARY &&
 	    (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ea9debf..c9c2bcc 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -83,7 +83,7 @@
 #include <linux/uaccess.h>
 
 static DEFINE_IDR(loop_index_idr);
-static DEFINE_MUTEX(loop_index_mutex);
+static DEFINE_MUTEX(loop_ctl_mutex);
 
 static int max_part;
 static int part_shift;
@@ -631,18 +631,7 @@
 {
 	int rc;
 
-	/*
-	 * bd_mutex has been held already in release path, so don't
-	 * acquire it if this function is called in such case.
-	 *
-	 * If the reread partition isn't from release path, lo_refcnt
-	 * must be at least one and it can only become zero when the
-	 * current holder is released.
-	 */
-	if (!atomic_read(&lo->lo_refcnt))
-		rc = __blkdev_reread_part(bdev);
-	else
-		rc = blkdev_reread_part(bdev);
+	rc = blkdev_reread_part(bdev);
 	if (rc)
 		pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
 			__func__, lo->lo_number, lo->lo_file_name, rc);
@@ -689,26 +678,30 @@
 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 			  unsigned int arg)
 {
-	struct file	*file, *old_file;
+	struct file	*file = NULL, *old_file;
 	int		error;
+	bool		partscan;
 
+	error = mutex_lock_killable(&loop_ctl_mutex);
+	if (error)
+		return error;
 	error = -ENXIO;
 	if (lo->lo_state != Lo_bound)
-		goto out;
+		goto out_err;
 
 	/* the loop device has to be read-only */
 	error = -EINVAL;
 	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
-		goto out;
+		goto out_err;
 
 	error = -EBADF;
 	file = fget(arg);
 	if (!file)
-		goto out;
+		goto out_err;
 
 	error = loop_validate_file(file, bdev);
 	if (error)
-		goto out_putf;
+		goto out_err;
 
 	old_file = lo->lo_backing_file;
 
@@ -716,7 +709,7 @@
 
 	/* size of the new backing store needs to be the same */
 	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
-		goto out_putf;
+		goto out_err;
 
 	/* and ... switch */
 	blk_mq_freeze_queue(lo->lo_queue);
@@ -727,15 +720,22 @@
 			     lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 	loop_update_dio(lo);
 	blk_mq_unfreeze_queue(lo->lo_queue);
-
+	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
+	mutex_unlock(&loop_ctl_mutex);
+	/*
+	 * We must drop file reference outside of loop_ctl_mutex as dropping
+	 * the file ref can take bd_mutex which creates circular locking
+	 * dependency.
+	 */
 	fput(old_file);
-	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
+	if (partscan)
 		loop_reread_partitions(lo, bdev);
 	return 0;
 
- out_putf:
-	fput(file);
- out:
+out_err:
+	mutex_unlock(&loop_ctl_mutex);
+	if (file)
+		fput(file);
 	return error;
 }
 
@@ -910,6 +910,7 @@
 	int		lo_flags = 0;
 	int		error;
 	loff_t		size;
+	bool		partscan;
 
 	/* This is safe, since we have a reference from open(). */
 	__module_get(THIS_MODULE);
@@ -919,13 +920,17 @@
 	if (!file)
 		goto out;
 
+	error = mutex_lock_killable(&loop_ctl_mutex);
+	if (error)
+		goto out_putf;
+
 	error = -EBUSY;
 	if (lo->lo_state != Lo_unbound)
-		goto out_putf;
+		goto out_unlock;
 
 	error = loop_validate_file(file, bdev);
 	if (error)
-		goto out_putf;
+		goto out_unlock;
 
 	mapping = file->f_mapping;
 	inode = mapping->host;
@@ -937,10 +942,10 @@
 	error = -EFBIG;
 	size = get_loop_size(lo, file);
 	if ((loff_t)(sector_t)size != size)
-		goto out_putf;
+		goto out_unlock;
 	error = loop_prepare_queue(lo);
 	if (error)
-		goto out_putf;
+		goto out_unlock;
 
 	error = 0;
 
@@ -972,18 +977,22 @@
 	lo->lo_state = Lo_bound;
 	if (part_shift)
 		lo->lo_flags |= LO_FLAGS_PARTSCAN;
-	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
-		loop_reread_partitions(lo, bdev);
+	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
 
 	/* Grab the block_device to prevent its destruction after we
-	 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
+	 * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
 	 */
 	bdgrab(bdev);
+	mutex_unlock(&loop_ctl_mutex);
+	if (partscan)
+		loop_reread_partitions(lo, bdev);
 	return 0;
 
- out_putf:
+out_unlock:
+	mutex_unlock(&loop_ctl_mutex);
+out_putf:
 	fput(file);
- out:
+out:
 	/* This is safe: open() is still holding a reference. */
 	module_put(THIS_MODULE);
 	return error;
@@ -1026,39 +1035,31 @@
 	return err;
 }
 
-static int loop_clr_fd(struct loop_device *lo)
+static int __loop_clr_fd(struct loop_device *lo, bool release)
 {
-	struct file *filp = lo->lo_backing_file;
+	struct file *filp = NULL;
 	gfp_t gfp = lo->old_gfp_mask;
 	struct block_device *bdev = lo->lo_device;
+	int err = 0;
+	bool partscan = false;
+	int lo_number;
 
-	if (lo->lo_state != Lo_bound)
-		return -ENXIO;
-
-	/*
-	 * If we've explicitly asked to tear down the loop device,
-	 * and it has an elevated reference count, set it for auto-teardown when
-	 * the last reference goes away. This stops $!~#$@ udev from
-	 * preventing teardown because it decided that it needs to run blkid on
-	 * the loopback device whenever they appear. xfstests is notorious for
-	 * failing tests because blkid via udev races with a losetup
-	 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
-	 * command to fail with EBUSY.
-	 */
-	if (atomic_read(&lo->lo_refcnt) > 1) {
-		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
-		mutex_unlock(&lo->lo_ctl_mutex);
-		return 0;
+	mutex_lock(&loop_ctl_mutex);
+	if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
+		err = -ENXIO;
+		goto out_unlock;
 	}
 
-	if (filp == NULL)
-		return -EINVAL;
+	filp = lo->lo_backing_file;
+	if (filp == NULL) {
+		err = -EINVAL;
+		goto out_unlock;
+	}
 
 	/* freeze request queue during the transition */
 	blk_mq_freeze_queue(lo->lo_queue);
 
 	spin_lock_irq(&lo->lo_lock);
-	lo->lo_state = Lo_rundown;
 	lo->lo_backing_file = NULL;
 	spin_unlock_irq(&lo->lo_lock);
 
@@ -1094,21 +1095,73 @@
 	module_put(THIS_MODULE);
 	blk_mq_unfreeze_queue(lo->lo_queue);
 
-	if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
-		loop_reread_partitions(lo, bdev);
+	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
+	lo_number = lo->lo_number;
 	lo->lo_flags = 0;
 	if (!part_shift)
 		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
 	loop_unprepare_queue(lo);
-	mutex_unlock(&lo->lo_ctl_mutex);
+out_unlock:
+	mutex_unlock(&loop_ctl_mutex);
+	if (partscan) {
+		/*
+		 * bd_mutex has been held already in release path, so don't
+		 * acquire it if this function is called in such case.
+		 *
+		 * If the reread partition isn't from release path, lo_refcnt
+		 * must be at least one and it can only become zero when the
+		 * current holder is released.
+		 */
+		if (release)
+			err = __blkdev_reread_part(bdev);
+		else
+			err = blkdev_reread_part(bdev);
+		pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
+			__func__, lo_number, err);
+		/* Device is gone, no point in returning error */
+		err = 0;
+	}
 	/*
-	 * Need not hold lo_ctl_mutex to fput backing file.
-	 * Calling fput holding lo_ctl_mutex triggers a circular
+	 * Need not hold loop_ctl_mutex to fput backing file.
+	 * Calling fput holding loop_ctl_mutex triggers a circular
 	 * lock dependency possibility warning as fput can take
-	 * bd_mutex which is usually taken before lo_ctl_mutex.
+	 * bd_mutex which is usually taken before loop_ctl_mutex.
 	 */
-	fput(filp);
-	return 0;
+	if (filp)
+		fput(filp);
+	return err;
+}
+
+static int loop_clr_fd(struct loop_device *lo)
+{
+	int err;
+
+	err = mutex_lock_killable(&loop_ctl_mutex);
+	if (err)
+		return err;
+	if (lo->lo_state != Lo_bound) {
+		mutex_unlock(&loop_ctl_mutex);
+		return -ENXIO;
+	}
+	/*
+	 * If we've explicitly asked to tear down the loop device,
+	 * and it has an elevated reference count, set it for auto-teardown when
+	 * the last reference goes away. This stops $!~#$@ udev from
+	 * preventing teardown because it decided that it needs to run blkid on
+	 * the loopback device whenever they appear. xfstests is notorious for
+	 * failing tests because blkid via udev races with a losetup
+	 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
+	 * command to fail with EBUSY.
+	 */
+	if (atomic_read(&lo->lo_refcnt) > 1) {
+		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+		mutex_unlock(&loop_ctl_mutex);
+		return 0;
+	}
+	lo->lo_state = Lo_rundown;
+	mutex_unlock(&loop_ctl_mutex);
+
+	return __loop_clr_fd(lo, false);
 }
 
 static int
@@ -1117,47 +1170,72 @@
 	int err;
 	struct loop_func_table *xfer;
 	kuid_t uid = current_uid();
+	struct block_device *bdev;
+	bool partscan = false;
 
+	err = mutex_lock_killable(&loop_ctl_mutex);
+	if (err)
+		return err;
 	if (lo->lo_encrypt_key_size &&
 	    !uid_eq(lo->lo_key_owner, uid) &&
-	    !capable(CAP_SYS_ADMIN))
-		return -EPERM;
-	if (lo->lo_state != Lo_bound)
-		return -ENXIO;
-	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
-		return -EINVAL;
+	    !capable(CAP_SYS_ADMIN)) {
+		err = -EPERM;
+		goto out_unlock;
+	}
+	if (lo->lo_state != Lo_bound) {
+		err = -ENXIO;
+		goto out_unlock;
+	}
+	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (lo->lo_offset != info->lo_offset ||
+	    lo->lo_sizelimit != info->lo_sizelimit) {
+		sync_blockdev(lo->lo_device);
+		kill_bdev(lo->lo_device);
+	}
 
 	/* I/O need to be drained during transfer transition */
 	blk_mq_freeze_queue(lo->lo_queue);
 
 	err = loop_release_xfer(lo);
 	if (err)
-		goto exit;
+		goto out_unfreeze;
 
 	if (info->lo_encrypt_type) {
 		unsigned int type = info->lo_encrypt_type;
 
 		if (type >= MAX_LO_CRYPT) {
 			err = -EINVAL;
-			goto exit;
+			goto out_unfreeze;
 		}
 		xfer = xfer_funcs[type];
 		if (xfer == NULL) {
 			err = -EINVAL;
-			goto exit;
+			goto out_unfreeze;
 		}
 	} else
 		xfer = NULL;
 
 	err = loop_init_xfer(lo, xfer, info);
 	if (err)
-		goto exit;
+		goto out_unfreeze;
 
 	if (lo->lo_offset != info->lo_offset ||
 	    lo->lo_sizelimit != info->lo_sizelimit) {
+		/* kill_bdev should have truncated all the pages */
+		if (lo->lo_device->bd_inode->i_mapping->nrpages) {
+			err = -EAGAIN;
+			pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+				__func__, lo->lo_number, lo->lo_file_name,
+				lo->lo_device->bd_inode->i_mapping->nrpages);
+			goto out_unfreeze;
+		}
 		if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
 			err = -EFBIG;
-			goto exit;
+			goto out_unfreeze;
 		}
 	}
 
@@ -1189,15 +1267,20 @@
 	/* update dio if lo_offset or transfer is changed */
 	__loop_update_dio(lo, lo->use_dio);
 
- exit:
+out_unfreeze:
 	blk_mq_unfreeze_queue(lo->lo_queue);
 
 	if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
 	     !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
 		lo->lo_flags |= LO_FLAGS_PARTSCAN;
 		lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
-		loop_reread_partitions(lo, lo->lo_device);
+		bdev = lo->lo_device;
+		partscan = true;
 	}
+out_unlock:
+	mutex_unlock(&loop_ctl_mutex);
+	if (partscan)
+		loop_reread_partitions(lo, bdev);
 
 	return err;
 }
@@ -1205,12 +1288,15 @@
 static int
 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
 {
-	struct file *file;
+	struct path path;
 	struct kstat stat;
 	int ret;
 
+	ret = mutex_lock_killable(&loop_ctl_mutex);
+	if (ret)
+		return ret;
 	if (lo->lo_state != Lo_bound) {
-		mutex_unlock(&lo->lo_ctl_mutex);
+		mutex_unlock(&loop_ctl_mutex);
 		return -ENXIO;
 	}
 
@@ -1229,17 +1315,17 @@
 		       lo->lo_encrypt_key_size);
 	}
 
-	/* Drop lo_ctl_mutex while we call into the filesystem. */
-	file = get_file(lo->lo_backing_file);
-	mutex_unlock(&lo->lo_ctl_mutex);
-	ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
-			  AT_STATX_SYNC_AS_STAT);
+	/* Drop loop_ctl_mutex while we call into the filesystem. */
+	path = lo->lo_backing_file->f_path;
+	path_get(&path);
+	mutex_unlock(&loop_ctl_mutex);
+	ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
 	if (!ret) {
 		info->lo_device = huge_encode_dev(stat.dev);
 		info->lo_inode = stat.ino;
 		info->lo_rdevice = huge_encode_dev(stat.rdev);
 	}
-	fput(file);
+	path_put(&path);
 	return ret;
 }
 
@@ -1323,10 +1409,8 @@
 	struct loop_info64 info64;
 	int err;
 
-	if (!arg) {
-		mutex_unlock(&lo->lo_ctl_mutex);
+	if (!arg)
 		return -EINVAL;
-	}
 	err = loop_get_status(lo, &info64);
 	if (!err)
 		err = loop_info64_to_old(&info64, &info);
@@ -1341,10 +1425,8 @@
 	struct loop_info64 info64;
 	int err;
 
-	if (!arg) {
-		mutex_unlock(&lo->lo_ctl_mutex);
+	if (!arg)
 		return -EINVAL;
-	}
 	err = loop_get_status(lo, &info64);
 	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
 		err = -EFAULT;
@@ -1376,22 +1458,64 @@
 
 static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
 {
+	int err = 0;
+
 	if (lo->lo_state != Lo_bound)
 		return -ENXIO;
 
 	if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
 		return -EINVAL;
 
+	if (lo->lo_queue->limits.logical_block_size != arg) {
+		sync_blockdev(lo->lo_device);
+		kill_bdev(lo->lo_device);
+	}
+
 	blk_mq_freeze_queue(lo->lo_queue);
 
+	/* kill_bdev should have truncated all the pages */
+	if (lo->lo_queue->limits.logical_block_size != arg &&
+			lo->lo_device->bd_inode->i_mapping->nrpages) {
+		err = -EAGAIN;
+		pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+			__func__, lo->lo_number, lo->lo_file_name,
+			lo->lo_device->bd_inode->i_mapping->nrpages);
+		goto out_unfreeze;
+	}
+
 	blk_queue_logical_block_size(lo->lo_queue, arg);
 	blk_queue_physical_block_size(lo->lo_queue, arg);
 	blk_queue_io_min(lo->lo_queue, arg);
 	loop_update_dio(lo);
-
+out_unfreeze:
 	blk_mq_unfreeze_queue(lo->lo_queue);
 
-	return 0;
+	return err;
+}
+
+static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
+			   unsigned long arg)
+{
+	int err;
+
+	err = mutex_lock_killable(&loop_ctl_mutex);
+	if (err)
+		return err;
+	switch (cmd) {
+	case LOOP_SET_CAPACITY:
+		err = loop_set_capacity(lo);
+		break;
+	case LOOP_SET_DIRECT_IO:
+		err = loop_set_dio(lo, arg);
+		break;
+	case LOOP_SET_BLOCK_SIZE:
+		err = loop_set_block_size(lo, arg);
+		break;
+	default:
+		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+	}
+	mutex_unlock(&loop_ctl_mutex);
+	return err;
 }
 
 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1400,64 +1524,42 @@
 	struct loop_device *lo = bdev->bd_disk->private_data;
 	int err;
 
-	err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1);
-	if (err)
-		goto out_unlocked;
-
 	switch (cmd) {
 	case LOOP_SET_FD:
-		err = loop_set_fd(lo, mode, bdev, arg);
-		break;
+		return loop_set_fd(lo, mode, bdev, arg);
 	case LOOP_CHANGE_FD:
-		err = loop_change_fd(lo, bdev, arg);
-		break;
+		return loop_change_fd(lo, bdev, arg);
 	case LOOP_CLR_FD:
-		/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
-		err = loop_clr_fd(lo);
-		if (!err)
-			goto out_unlocked;
-		break;
+		return loop_clr_fd(lo);
 	case LOOP_SET_STATUS:
 		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
 			err = loop_set_status_old(lo,
 					(struct loop_info __user *)arg);
+		}
 		break;
 	case LOOP_GET_STATUS:
-		err = loop_get_status_old(lo, (struct loop_info __user *) arg);
-		/* loop_get_status() unlocks lo_ctl_mutex */
-		goto out_unlocked;
+		return loop_get_status_old(lo, (struct loop_info __user *) arg);
 	case LOOP_SET_STATUS64:
 		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
 			err = loop_set_status64(lo,
 					(struct loop_info64 __user *) arg);
+		}
 		break;
 	case LOOP_GET_STATUS64:
-		err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
-		/* loop_get_status() unlocks lo_ctl_mutex */
-		goto out_unlocked;
+		return loop_get_status64(lo, (struct loop_info64 __user *) arg);
 	case LOOP_SET_CAPACITY:
-		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
-			err = loop_set_capacity(lo);
-		break;
 	case LOOP_SET_DIRECT_IO:
-		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
-			err = loop_set_dio(lo, arg);
-		break;
 	case LOOP_SET_BLOCK_SIZE:
-		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
-			err = loop_set_block_size(lo, arg);
-		break;
+		if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		/* Fall through */
 	default:
-		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+		err = lo_simple_ioctl(lo, cmd, arg);
+		break;
 	}
-	mutex_unlock(&lo->lo_ctl_mutex);
 
-out_unlocked:
 	return err;
 }
 
@@ -1571,10 +1673,8 @@
 	struct loop_info64 info64;
 	int err;
 
-	if (!arg) {
-		mutex_unlock(&lo->lo_ctl_mutex);
+	if (!arg)
 		return -EINVAL;
-	}
 	err = loop_get_status(lo, &info64);
 	if (!err)
 		err = loop_info64_to_compat(&info64, arg);
@@ -1589,20 +1689,12 @@
 
 	switch(cmd) {
 	case LOOP_SET_STATUS:
-		err = mutex_lock_killable(&lo->lo_ctl_mutex);
-		if (!err) {
-			err = loop_set_status_compat(lo,
-						     (const struct compat_loop_info __user *)arg);
-			mutex_unlock(&lo->lo_ctl_mutex);
-		}
+		err = loop_set_status_compat(lo,
+			     (const struct compat_loop_info __user *)arg);
 		break;
 	case LOOP_GET_STATUS:
-		err = mutex_lock_killable(&lo->lo_ctl_mutex);
-		if (!err) {
-			err = loop_get_status_compat(lo,
-						     (struct compat_loop_info __user *)arg);
-			/* loop_get_status() unlocks lo_ctl_mutex */
-		}
+		err = loop_get_status_compat(lo,
+				     (struct compat_loop_info __user *)arg);
 		break;
 	case LOOP_SET_CAPACITY:
 	case LOOP_CLR_FD:
@@ -1626,9 +1718,11 @@
 static int lo_open(struct block_device *bdev, fmode_t mode)
 {
 	struct loop_device *lo;
-	int err = 0;
+	int err;
 
-	mutex_lock(&loop_index_mutex);
+	err = mutex_lock_killable(&loop_ctl_mutex);
+	if (err)
+		return err;
 	lo = bdev->bd_disk->private_data;
 	if (!lo) {
 		err = -ENXIO;
@@ -1637,26 +1731,30 @@
 
 	atomic_inc(&lo->lo_refcnt);
 out:
-	mutex_unlock(&loop_index_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 	return err;
 }
 
-static void __lo_release(struct loop_device *lo)
+static void lo_release(struct gendisk *disk, fmode_t mode)
 {
-	int err;
+	struct loop_device *lo;
 
+	mutex_lock(&loop_ctl_mutex);
+	lo = disk->private_data;
 	if (atomic_dec_return(&lo->lo_refcnt))
-		return;
+		goto out_unlock;
 
-	mutex_lock(&lo->lo_ctl_mutex);
 	if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+		if (lo->lo_state != Lo_bound)
+			goto out_unlock;
+		lo->lo_state = Lo_rundown;
+		mutex_unlock(&loop_ctl_mutex);
 		/*
 		 * In autoclear mode, stop the loop thread
 		 * and remove configuration after last close.
 		 */
-		err = loop_clr_fd(lo);
-		if (!err)
-			return;
+		__loop_clr_fd(lo, true);
+		return;
 	} else if (lo->lo_state == Lo_bound) {
 		/*
 		 * Otherwise keep thread (if running) and config,
@@ -1666,14 +1764,8 @@
 		blk_mq_unfreeze_queue(lo->lo_queue);
 	}
 
-	mutex_unlock(&lo->lo_ctl_mutex);
-}
-
-static void lo_release(struct gendisk *disk, fmode_t mode)
-{
-	mutex_lock(&loop_index_mutex);
-	__lo_release(disk->private_data);
-	mutex_unlock(&loop_index_mutex);
+out_unlock:
+	mutex_unlock(&loop_ctl_mutex);
 }
 
 static const struct block_device_operations lo_fops = {
@@ -1712,10 +1804,10 @@
 	struct loop_device *lo = ptr;
 	struct loop_func_table *xfer = data;
 
-	mutex_lock(&lo->lo_ctl_mutex);
+	mutex_lock(&loop_ctl_mutex);
 	if (lo->lo_encryption == xfer)
 		loop_release_xfer(lo);
-	mutex_unlock(&lo->lo_ctl_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 	return 0;
 }
 
@@ -1896,7 +1988,6 @@
 	if (!part_shift)
 		disk->flags |= GENHD_FL_NO_PART_SCAN;
 	disk->flags |= GENHD_FL_EXT_DEVT;
-	mutex_init(&lo->lo_ctl_mutex);
 	atomic_set(&lo->lo_refcnt, 0);
 	lo->lo_number		= i;
 	spin_lock_init(&lo->lo_lock);
@@ -1975,7 +2066,7 @@
 	struct kobject *kobj;
 	int err;
 
-	mutex_lock(&loop_index_mutex);
+	mutex_lock(&loop_ctl_mutex);
 	err = loop_lookup(&lo, MINOR(dev) >> part_shift);
 	if (err < 0)
 		err = loop_add(&lo, MINOR(dev) >> part_shift);
@@ -1983,7 +2074,7 @@
 		kobj = NULL;
 	else
 		kobj = get_disk_and_module(lo->lo_disk);
-	mutex_unlock(&loop_index_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 
 	*part = 0;
 	return kobj;
@@ -1993,9 +2084,13 @@
 			       unsigned long parm)
 {
 	struct loop_device *lo;
-	int ret = -ENOSYS;
+	int ret;
 
-	mutex_lock(&loop_index_mutex);
+	ret = mutex_lock_killable(&loop_ctl_mutex);
+	if (ret)
+		return ret;
+
+	ret = -ENOSYS;
 	switch (cmd) {
 	case LOOP_CTL_ADD:
 		ret = loop_lookup(&lo, parm);
@@ -2009,21 +2104,15 @@
 		ret = loop_lookup(&lo, parm);
 		if (ret < 0)
 			break;
-		ret = mutex_lock_killable(&lo->lo_ctl_mutex);
-		if (ret)
-			break;
 		if (lo->lo_state != Lo_unbound) {
 			ret = -EBUSY;
-			mutex_unlock(&lo->lo_ctl_mutex);
 			break;
 		}
 		if (atomic_read(&lo->lo_refcnt) > 0) {
 			ret = -EBUSY;
-			mutex_unlock(&lo->lo_ctl_mutex);
 			break;
 		}
 		lo->lo_disk->private_data = NULL;
-		mutex_unlock(&lo->lo_ctl_mutex);
 		idr_remove(&loop_index_idr, lo->lo_number);
 		loop_remove(lo);
 		break;
@@ -2033,7 +2122,7 @@
 			break;
 		ret = loop_add(&lo, -1);
 	}
-	mutex_unlock(&loop_index_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 
 	return ret;
 }
@@ -2117,10 +2206,10 @@
 				  THIS_MODULE, loop_probe, NULL, NULL);
 
 	/* pre-create number of devices given by config or max_loop */
-	mutex_lock(&loop_index_mutex);
+	mutex_lock(&loop_ctl_mutex);
 	for (i = 0; i < nr; i++)
 		loop_add(&lo, i);
-	mutex_unlock(&loop_index_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 
 	printk(KERN_INFO "loop: module loaded\n");
 	return 0;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 4d42c7a..af75a5e 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -54,7 +54,6 @@
 
 	spinlock_t		lo_lock;
 	int			lo_state;
-	struct mutex		lo_ctl_mutex;
 	struct kthread_worker	worker;
 	struct task_struct	*worker_task;
 	bool			use_dio;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 14a5125..c13a6d1 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -288,9 +288,10 @@
 	blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
 	set_capacity(nbd->disk, config->bytesize >> 9);
 	if (bdev) {
-		if (bdev->bd_disk)
+		if (bdev->bd_disk) {
 			bd_set_size(bdev, config->bytesize);
-		else
+			set_blocksize(bdev, config->blksize);
+		} else
 			bdev->bd_invalidated = 1;
 		bdput(bdev);
 	}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 73ed5f3..585378b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5982,7 +5982,6 @@
 	struct list_head *tmp;
 	int dev_id;
 	char opt_buf[6];
-	bool already = false;
 	bool force = false;
 	int ret;
 
@@ -6015,13 +6014,13 @@
 		spin_lock_irq(&rbd_dev->lock);
 		if (rbd_dev->open_count && !force)
 			ret = -EBUSY;
-		else
-			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
-							&rbd_dev->flags);
+		else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
+					  &rbd_dev->flags))
+			ret = -EINPROGRESS;
 		spin_unlock_irq(&rbd_dev->lock);
 	}
 	spin_unlock(&rbd_dev_list_lock);
-	if (ret < 0 || already)
+	if (ret)
 		return ret;
 
 	if (force) {
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index f68e9ba..5d70240 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -45,6 +45,8 @@
 #define WAITING_FOR_GEN_CMD	0x04
 #define WAITING_FOR_ANY		-1
 
+#define	VDC_MAX_RETRIES	10
+
 static struct workqueue_struct *sunvdc_wq;
 
 struct vdc_req_entry {
@@ -431,6 +433,7 @@
 		.end_idx		= dr->prod,
 	};
 	int err, delay;
+	int retries = 0;
 
 	hdr.seq = dr->snd_nxt;
 	delay = 1;
@@ -443,6 +446,8 @@
 		udelay(delay);
 		if ((delay <<= 1) > 128)
 			delay = 128;
+		if (retries++ > VDC_MAX_RETRIES)
+			break;
 	} while (err == -EAGAIN);
 
 	if (err == -ENOTCONN)
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 469541c..20907a0 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1026,7 +1026,11 @@
 	struct swim3 __iomem *sw = fs->swim3;
 
 	mutex_lock(&swim3_mutex);
-	if (fs->ref_count > 0 && --fs->ref_count == 0) {
+	if (fs->ref_count > 0)
+		--fs->ref_count;
+	else if (fs->ref_count == -1)
+		fs->ref_count = 0;
+	if (fs->ref_count == 0) {
 		swim3_action(fs, MOTOR_OFF);
 		out_8(&sw->control_bic, 0xff);
 		swim3_select(fs, RELAX);
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 6352357..99a2c60 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -16,7 +16,7 @@
 	  See Documentation/blockdev/zram.txt for more information.
 
 config ZRAM_WRITEBACK
-       bool "Write back incompressible page to backing device"
+       bool "Write back incompressible or idle page to backing device"
        depends on ZRAM
        default n
        help
@@ -25,6 +25,9 @@
 	 For this feature, admin should set up backing device via
 	 /sys/block/zramX/backing_dev.
 
+	 With /sys/block/zramX/{idle,writeback}, application could ask
+	 idle page's writeback to the backing device to save in memory.
+
 	 See Documentation/blockdev/zram.txt for more information.
 
 config ZRAM_MEMORY_TRACKING
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 586992f..50045f0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,15 +52,23 @@
 static size_t huge_class_size;
 
 static void zram_free_page(struct zram *zram, size_t index);
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+				u32 index, int offset, struct bio *bio);
+
+
+static int zram_slot_trylock(struct zram *zram, u32 index)
+{
+	return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+}
 
 static void zram_slot_lock(struct zram *zram, u32 index)
 {
-	bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
+	bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
 }
 
 static void zram_slot_unlock(struct zram *zram, u32 index)
 {
-	bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value);
+	bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
 }
 
 static inline bool init_done(struct zram *zram)
@@ -68,13 +76,6 @@
 	return zram->disksize;
 }
 
-static inline bool zram_allocated(struct zram *zram, u32 index)
-{
-
-	return (zram->table[index].value >> (ZRAM_FLAG_SHIFT + 1)) ||
-					zram->table[index].handle;
-}
-
 static inline struct zram *dev_to_zram(struct device *dev)
 {
 	return (struct zram *)dev_to_disk(dev)->private_data;
@@ -94,19 +95,19 @@
 static bool zram_test_flag(struct zram *zram, u32 index,
 			enum zram_pageflags flag)
 {
-	return zram->table[index].value & BIT(flag);
+	return zram->table[index].flags & BIT(flag);
 }
 
 static void zram_set_flag(struct zram *zram, u32 index,
 			enum zram_pageflags flag)
 {
-	zram->table[index].value |= BIT(flag);
+	zram->table[index].flags |= BIT(flag);
 }
 
 static void zram_clear_flag(struct zram *zram, u32 index,
 			enum zram_pageflags flag)
 {
-	zram->table[index].value &= ~BIT(flag);
+	zram->table[index].flags &= ~BIT(flag);
 }
 
 static inline void zram_set_element(struct zram *zram, u32 index,
@@ -122,15 +123,22 @@
 
 static size_t zram_get_obj_size(struct zram *zram, u32 index)
 {
-	return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
+	return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
 }
 
 static void zram_set_obj_size(struct zram *zram,
 					u32 index, size_t size)
 {
-	unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
+	unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
 
-	zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
+	zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
+}
+
+static inline bool zram_allocated(struct zram *zram, u32 index)
+{
+	return zram_get_obj_size(zram, index) ||
+			zram_test_flag(zram, index, ZRAM_SAME) ||
+			zram_test_flag(zram, index, ZRAM_WB);
 }
 
 #if PAGE_SIZE != 4096
@@ -276,17 +284,125 @@
 	return len;
 }
 
-#ifdef CONFIG_ZRAM_WRITEBACK
-static bool zram_wb_enabled(struct zram *zram)
+static ssize_t idle_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
 {
-	return zram->backing_dev;
+	struct zram *zram = dev_to_zram(dev);
+	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+	int index;
+	char mode_buf[8];
+	ssize_t sz;
+
+	sz = strscpy(mode_buf, buf, sizeof(mode_buf));
+	if (sz <= 0)
+		return -EINVAL;
+
+	/* ignore trailing new line */
+	if (mode_buf[sz - 1] == '\n')
+		mode_buf[sz - 1] = 0x00;
+
+	if (strcmp(mode_buf, "all"))
+		return -EINVAL;
+
+	down_read(&zram->init_lock);
+	if (!init_done(zram)) {
+		up_read(&zram->init_lock);
+		return -EINVAL;
+	}
+
+	for (index = 0; index < nr_pages; index++) {
+		/*
+		 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
+		 * See the comment in writeback_store.
+		 */
+		zram_slot_lock(zram, index);
+		if (zram_allocated(zram, index) &&
+				!zram_test_flag(zram, index, ZRAM_UNDER_WB))
+			zram_set_flag(zram, index, ZRAM_IDLE);
+		zram_slot_unlock(zram, index);
+	}
+
+	up_read(&zram->init_lock);
+
+	return len;
+}
+
+#ifdef CONFIG_ZRAM_WRITEBACK
+static ssize_t writeback_limit_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct zram *zram = dev_to_zram(dev);
+	u64 val;
+	ssize_t ret = -EINVAL;
+
+	if (kstrtoull(buf, 10, &val))
+		return ret;
+
+	down_read(&zram->init_lock);
+	spin_lock(&zram->wb_limit_lock);
+	zram->wb_limit_enable = val;
+	spin_unlock(&zram->wb_limit_lock);
+	up_read(&zram->init_lock);
+	ret = len;
+
+	return ret;
+}
+
+static ssize_t writeback_limit_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	bool val;
+	struct zram *zram = dev_to_zram(dev);
+
+	down_read(&zram->init_lock);
+	spin_lock(&zram->wb_limit_lock);
+	val = zram->wb_limit_enable;
+	spin_unlock(&zram->wb_limit_lock);
+	up_read(&zram->init_lock);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t writeback_limit_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct zram *zram = dev_to_zram(dev);
+	u64 val;
+	ssize_t ret = -EINVAL;
+
+	if (kstrtoull(buf, 10, &val))
+		return ret;
+
+	down_read(&zram->init_lock);
+	spin_lock(&zram->wb_limit_lock);
+	zram->bd_wb_limit = val;
+	spin_unlock(&zram->wb_limit_lock);
+	up_read(&zram->init_lock);
+	ret = len;
+
+	return ret;
+}
+
+static ssize_t writeback_limit_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u64 val;
+	struct zram *zram = dev_to_zram(dev);
+
+	down_read(&zram->init_lock);
+	spin_lock(&zram->wb_limit_lock);
+	val = zram->bd_wb_limit;
+	spin_unlock(&zram->wb_limit_lock);
+	up_read(&zram->init_lock);
+
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
 }
 
 static void reset_bdev(struct zram *zram)
 {
 	struct block_device *bdev;
 
-	if (!zram_wb_enabled(zram))
+	if (!zram->backing_dev)
 		return;
 
 	bdev = zram->bdev;
@@ -313,7 +429,7 @@
 	ssize_t ret;
 
 	down_read(&zram->init_lock);
-	if (!zram_wb_enabled(zram)) {
+	if (!zram->backing_dev) {
 		memcpy(buf, "none\n", 5);
 		up_read(&zram->init_lock);
 		return 5;
@@ -382,8 +498,10 @@
 
 	bdev = bdgrab(I_BDEV(inode));
 	err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
-	if (err < 0)
+	if (err < 0) {
+		bdev = NULL;
 		goto out;
+	}
 
 	nr_pages = i_size_read(inode) >> PAGE_SHIFT;
 	bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
@@ -399,7 +517,6 @@
 		goto out;
 
 	reset_bdev(zram);
-	spin_lock_init(&zram->bitmap_lock);
 
 	zram->old_block_size = old_block_size;
 	zram->bdev = bdev;
@@ -441,32 +558,29 @@
 	return err;
 }
 
-static unsigned long get_entry_bdev(struct zram *zram)
+static unsigned long alloc_block_bdev(struct zram *zram)
 {
-	unsigned long entry;
-
-	spin_lock(&zram->bitmap_lock);
+	unsigned long blk_idx = 1;
+retry:
 	/* skip 0 bit to confuse zram.handle = 0 */
-	entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
-	if (entry == zram->nr_pages) {
-		spin_unlock(&zram->bitmap_lock);
+	blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
+	if (blk_idx == zram->nr_pages)
 		return 0;
-	}
 
-	set_bit(entry, zram->bitmap);
-	spin_unlock(&zram->bitmap_lock);
+	if (test_and_set_bit(blk_idx, zram->bitmap))
+		goto retry;
 
-	return entry;
+	atomic64_inc(&zram->stats.bd_count);
+	return blk_idx;
 }
 
-static void put_entry_bdev(struct zram *zram, unsigned long entry)
+static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
 {
 	int was_set;
 
-	spin_lock(&zram->bitmap_lock);
-	was_set = test_and_clear_bit(entry, zram->bitmap);
-	spin_unlock(&zram->bitmap_lock);
+	was_set = test_and_clear_bit(blk_idx, zram->bitmap);
 	WARN_ON_ONCE(!was_set);
+	atomic64_dec(&zram->stats.bd_count);
 }
 
 static void zram_page_end_io(struct bio *bio)
@@ -509,6 +623,172 @@
 	return 1;
 }
 
+#define HUGE_WRITEBACK 1
+#define IDLE_WRITEBACK 2
+
+static ssize_t writeback_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct zram *zram = dev_to_zram(dev);
+	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+	unsigned long index;
+	struct bio bio;
+	struct bio_vec bio_vec;
+	struct page *page;
+	ssize_t ret, sz;
+	char mode_buf[8];
+	int mode = -1;
+	unsigned long blk_idx = 0;
+
+	sz = strscpy(mode_buf, buf, sizeof(mode_buf));
+	if (sz <= 0)
+		return -EINVAL;
+
+	/* ignore trailing newline */
+	if (mode_buf[sz - 1] == '\n')
+		mode_buf[sz - 1] = 0x00;
+
+	if (!strcmp(mode_buf, "idle"))
+		mode = IDLE_WRITEBACK;
+	else if (!strcmp(mode_buf, "huge"))
+		mode = HUGE_WRITEBACK;
+
+	if (mode == -1)
+		return -EINVAL;
+
+	down_read(&zram->init_lock);
+	if (!init_done(zram)) {
+		ret = -EINVAL;
+		goto release_init_lock;
+	}
+
+	if (!zram->backing_dev) {
+		ret = -ENODEV;
+		goto release_init_lock;
+	}
+
+	page = alloc_page(GFP_KERNEL);
+	if (!page) {
+		ret = -ENOMEM;
+		goto release_init_lock;
+	}
+
+	for (index = 0; index < nr_pages; index++) {
+		struct bio_vec bvec;
+
+		bvec.bv_page = page;
+		bvec.bv_len = PAGE_SIZE;
+		bvec.bv_offset = 0;
+
+		spin_lock(&zram->wb_limit_lock);
+		if (zram->wb_limit_enable && !zram->bd_wb_limit) {
+			spin_unlock(&zram->wb_limit_lock);
+			ret = -EIO;
+			break;
+		}
+		spin_unlock(&zram->wb_limit_lock);
+
+		if (!blk_idx) {
+			blk_idx = alloc_block_bdev(zram);
+			if (!blk_idx) {
+				ret = -ENOSPC;
+				break;
+			}
+		}
+
+		zram_slot_lock(zram, index);
+		if (!zram_allocated(zram, index))
+			goto next;
+
+		if (zram_test_flag(zram, index, ZRAM_WB) ||
+				zram_test_flag(zram, index, ZRAM_SAME) ||
+				zram_test_flag(zram, index, ZRAM_UNDER_WB))
+			goto next;
+
+		if (mode == IDLE_WRITEBACK &&
+			  !zram_test_flag(zram, index, ZRAM_IDLE))
+			goto next;
+		if (mode == HUGE_WRITEBACK &&
+			  !zram_test_flag(zram, index, ZRAM_HUGE))
+			goto next;
+		/*
+		 * Clearing ZRAM_UNDER_WB is duty of caller.
+		 * IOW, zram_free_page never clear it.
+		 */
+		zram_set_flag(zram, index, ZRAM_UNDER_WB);
+		/* Need for hugepage writeback racing */
+		zram_set_flag(zram, index, ZRAM_IDLE);
+		zram_slot_unlock(zram, index);
+		if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
+			zram_slot_lock(zram, index);
+			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+			zram_clear_flag(zram, index, ZRAM_IDLE);
+			zram_slot_unlock(zram, index);
+			continue;
+		}
+
+		bio_init(&bio, &bio_vec, 1);
+		bio_set_dev(&bio, zram->bdev);
+		bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
+		bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
+
+		bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
+				bvec.bv_offset);
+		/*
+		 * XXX: A single page IO would be inefficient for write
+		 * but it would be not bad as starter.
+		 */
+		ret = submit_bio_wait(&bio);
+		if (ret) {
+			zram_slot_lock(zram, index);
+			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+			zram_clear_flag(zram, index, ZRAM_IDLE);
+			zram_slot_unlock(zram, index);
+			continue;
+		}
+
+		atomic64_inc(&zram->stats.bd_writes);
+		/*
+		 * We released zram_slot_lock so need to check if the slot was
+		 * changed. If there is freeing for the slot, we can catch it
+		 * easily by zram_allocated.
+		 * A subtle case is the slot is freed/reallocated/marked as
+		 * ZRAM_IDLE again. To close the race, idle_store doesn't
+		 * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
+		 * Thus, we could close the race by checking ZRAM_IDLE bit.
+		 */
+		zram_slot_lock(zram, index);
+		if (!zram_allocated(zram, index) ||
+			  !zram_test_flag(zram, index, ZRAM_IDLE)) {
+			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+			zram_clear_flag(zram, index, ZRAM_IDLE);
+			goto next;
+		}
+
+		zram_free_page(zram, index);
+		zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+		zram_set_flag(zram, index, ZRAM_WB);
+		zram_set_element(zram, index, blk_idx);
+		blk_idx = 0;
+		atomic64_inc(&zram->stats.pages_stored);
+		spin_lock(&zram->wb_limit_lock);
+		if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
+			zram->bd_wb_limit -=  1UL << (PAGE_SHIFT - 12);
+		spin_unlock(&zram->wb_limit_lock);
+next:
+		zram_slot_unlock(zram, index);
+	}
+
+	if (blk_idx)
+		free_block_bdev(zram, blk_idx);
+	ret = len;
+	__free_page(page);
+release_init_lock:
+	up_read(&zram->init_lock);
+
+	return ret;
+}
+
 struct zram_work {
 	struct work_struct work;
 	struct zram *zram;
@@ -561,79 +841,21 @@
 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
 			unsigned long entry, struct bio *parent, bool sync)
 {
+	atomic64_inc(&zram->stats.bd_reads);
 	if (sync)
 		return read_from_bdev_sync(zram, bvec, entry, parent);
 	else
 		return read_from_bdev_async(zram, bvec, entry, parent);
 }
-
-static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
-					u32 index, struct bio *parent,
-					unsigned long *pentry)
-{
-	struct bio *bio;
-	unsigned long entry;
-
-	bio = bio_alloc(GFP_ATOMIC, 1);
-	if (!bio)
-		return -ENOMEM;
-
-	entry = get_entry_bdev(zram);
-	if (!entry) {
-		bio_put(bio);
-		return -ENOSPC;
-	}
-
-	bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
-	bio_set_dev(bio, zram->bdev);
-	if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
-					bvec->bv_offset)) {
-		bio_put(bio);
-		put_entry_bdev(zram, entry);
-		return -EIO;
-	}
-
-	if (!parent) {
-		bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
-		bio->bi_end_io = zram_page_end_io;
-	} else {
-		bio->bi_opf = parent->bi_opf;
-		bio_chain(bio, parent);
-	}
-
-	submit_bio(bio);
-	*pentry = entry;
-
-	return 0;
-}
-
-static void zram_wb_clear(struct zram *zram, u32 index)
-{
-	unsigned long entry;
-
-	zram_clear_flag(zram, index, ZRAM_WB);
-	entry = zram_get_element(zram, index);
-	zram_set_element(zram, index, 0);
-	put_entry_bdev(zram, entry);
-}
-
 #else
-static bool zram_wb_enabled(struct zram *zram) { return false; }
 static inline void reset_bdev(struct zram *zram) {};
-static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
-					u32 index, struct bio *parent,
-					unsigned long *pentry)
-
-{
-	return -EIO;
-}
-
 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
 			unsigned long entry, struct bio *parent, bool sync)
 {
 	return -EIO;
 }
-static void zram_wb_clear(struct zram *zram, u32 index) {}
+
+static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
 #endif
 
 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
@@ -652,14 +874,10 @@
 
 static void zram_accessed(struct zram *zram, u32 index)
 {
+	zram_clear_flag(zram, index, ZRAM_IDLE);
 	zram->table[index].ac_time = ktime_get_boottime();
 }
 
-static void zram_reset_access(struct zram *zram, u32 index)
-{
-	zram->table[index].ac_time = 0;
-}
-
 static ssize_t read_block_state(struct file *file, char __user *buf,
 				size_t count, loff_t *ppos)
 {
@@ -689,12 +907,13 @@
 
 		ts = ktime_to_timespec64(zram->table[index].ac_time);
 		copied = snprintf(kbuf + written, count,
-			"%12zd %12lld.%06lu %c%c%c\n",
+			"%12zd %12lld.%06lu %c%c%c%c\n",
 			index, (s64)ts.tv_sec,
 			ts.tv_nsec / NSEC_PER_USEC,
 			zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
 			zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
-			zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.');
+			zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
+			zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
 
 		if (count < copied) {
 			zram_slot_unlock(zram, index);
@@ -739,8 +958,10 @@
 #else
 static void zram_debugfs_create(void) {};
 static void zram_debugfs_destroy(void) {};
-static void zram_accessed(struct zram *zram, u32 index) {};
-static void zram_reset_access(struct zram *zram, u32 index) {};
+static void zram_accessed(struct zram *zram, u32 index)
+{
+	zram_clear_flag(zram, index, ZRAM_IDLE);
+};
 static void zram_debugfs_register(struct zram *zram) {};
 static void zram_debugfs_unregister(struct zram *zram) {};
 #endif
@@ -877,6 +1098,26 @@
 	return ret;
 }
 
+#ifdef CONFIG_ZRAM_WRITEBACK
+#define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
+static ssize_t bd_stat_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct zram *zram = dev_to_zram(dev);
+	ssize_t ret;
+
+	down_read(&zram->init_lock);
+	ret = scnprintf(buf, PAGE_SIZE,
+		"%8llu %8llu %8llu\n",
+			FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
+			FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
+			FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
+	up_read(&zram->init_lock);
+
+	return ret;
+}
+#endif
+
 static ssize_t debug_stat_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -886,9 +1127,10 @@
 
 	down_read(&zram->init_lock);
 	ret = scnprintf(buf, PAGE_SIZE,
-			"version: %d\n%8llu\n",
+			"version: %d\n%8llu %8llu\n",
 			version,
-			(u64)atomic64_read(&zram->stats.writestall));
+			(u64)atomic64_read(&zram->stats.writestall),
+			(u64)atomic64_read(&zram->stats.miss_free));
 	up_read(&zram->init_lock);
 
 	return ret;
@@ -896,6 +1138,9 @@
 
 static DEVICE_ATTR_RO(io_stat);
 static DEVICE_ATTR_RO(mm_stat);
+#ifdef CONFIG_ZRAM_WRITEBACK
+static DEVICE_ATTR_RO(bd_stat);
+#endif
 static DEVICE_ATTR_RO(debug_stat);
 
 static void zram_meta_free(struct zram *zram, u64 disksize)
@@ -940,17 +1185,21 @@
 {
 	unsigned long handle;
 
-	zram_reset_access(zram, index);
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+	zram->table[index].ac_time = 0;
+#endif
+	if (zram_test_flag(zram, index, ZRAM_IDLE))
+		zram_clear_flag(zram, index, ZRAM_IDLE);
 
 	if (zram_test_flag(zram, index, ZRAM_HUGE)) {
 		zram_clear_flag(zram, index, ZRAM_HUGE);
 		atomic64_dec(&zram->stats.huge_pages);
 	}
 
-	if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
-		zram_wb_clear(zram, index);
-		atomic64_dec(&zram->stats.pages_stored);
-		return;
+	if (zram_test_flag(zram, index, ZRAM_WB)) {
+		zram_clear_flag(zram, index, ZRAM_WB);
+		free_block_bdev(zram, zram_get_element(zram, index));
+		goto out;
 	}
 
 	/*
@@ -959,10 +1208,8 @@
 	 */
 	if (zram_test_flag(zram, index, ZRAM_SAME)) {
 		zram_clear_flag(zram, index, ZRAM_SAME);
-		zram_set_element(zram, index, 0);
 		atomic64_dec(&zram->stats.same_pages);
-		atomic64_dec(&zram->stats.pages_stored);
-		return;
+		goto out;
 	}
 
 	handle = zram_get_handle(zram, index);
@@ -973,10 +1220,12 @@
 
 	atomic64_sub(zram_get_obj_size(zram, index),
 			&zram->stats.compr_data_size);
+out:
 	atomic64_dec(&zram->stats.pages_stored);
-
 	zram_set_handle(zram, index, 0);
 	zram_set_obj_size(zram, index, 0);
+	WARN_ON_ONCE(zram->table[index].flags &
+		~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
 }
 
 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
@@ -987,24 +1236,20 @@
 	unsigned int size;
 	void *src, *dst;
 
-	if (zram_wb_enabled(zram)) {
-		zram_slot_lock(zram, index);
-		if (zram_test_flag(zram, index, ZRAM_WB)) {
-			struct bio_vec bvec;
+	zram_slot_lock(zram, index);
+	if (zram_test_flag(zram, index, ZRAM_WB)) {
+		struct bio_vec bvec;
 
-			zram_slot_unlock(zram, index);
-
-			bvec.bv_page = page;
-			bvec.bv_len = PAGE_SIZE;
-			bvec.bv_offset = 0;
-			return read_from_bdev(zram, &bvec,
-					zram_get_element(zram, index),
-					bio, partial_io);
-		}
 		zram_slot_unlock(zram, index);
+
+		bvec.bv_page = page;
+		bvec.bv_len = PAGE_SIZE;
+		bvec.bv_offset = 0;
+		return read_from_bdev(zram, &bvec,
+				zram_get_element(zram, index),
+				bio, partial_io);
 	}
 
-	zram_slot_lock(zram, index);
 	handle = zram_get_handle(zram, index);
 	if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
 		unsigned long value;
@@ -1089,7 +1334,6 @@
 	struct page *page = bvec->bv_page;
 	unsigned long element = 0;
 	enum zram_pageflags flags = 0;
-	bool allow_wb = true;
 
 	mem = kmap_atomic(page);
 	if (page_same_filled(mem, &element)) {
@@ -1114,21 +1358,8 @@
 		return ret;
 	}
 
-	if (unlikely(comp_len >= huge_class_size)) {
+	if (comp_len >= huge_class_size)
 		comp_len = PAGE_SIZE;
-		if (zram_wb_enabled(zram) && allow_wb) {
-			zcomp_stream_put(zram->comp);
-			ret = write_to_bdev(zram, bvec, index, bio, &element);
-			if (!ret) {
-				flags = ZRAM_WB;
-				ret = 1;
-				goto out;
-			}
-			allow_wb = false;
-			goto compress_again;
-		}
-	}
-
 	/*
 	 * handle allocation has 2 paths:
 	 * a) fast path is executed with preemption disabled (for
@@ -1401,10 +1632,14 @@
 
 	zram = bdev->bd_disk->private_data;
 
-	zram_slot_lock(zram, index);
+	atomic64_inc(&zram->stats.notify_free);
+	if (!zram_slot_trylock(zram, index)) {
+		atomic64_inc(&zram->stats.miss_free);
+		return;
+	}
+
 	zram_free_page(zram, index);
 	zram_slot_unlock(zram, index);
-	atomic64_inc(&zram->stats.notify_free);
 }
 
 static int zram_rw_page(struct block_device *bdev, sector_t sector,
@@ -1609,10 +1844,14 @@
 static DEVICE_ATTR_WO(reset);
 static DEVICE_ATTR_WO(mem_limit);
 static DEVICE_ATTR_WO(mem_used_max);
+static DEVICE_ATTR_WO(idle);
 static DEVICE_ATTR_RW(max_comp_streams);
 static DEVICE_ATTR_RW(comp_algorithm);
 #ifdef CONFIG_ZRAM_WRITEBACK
 static DEVICE_ATTR_RW(backing_dev);
+static DEVICE_ATTR_WO(writeback);
+static DEVICE_ATTR_RW(writeback_limit);
+static DEVICE_ATTR_RW(writeback_limit_enable);
 #endif
 
 static struct attribute *zram_disk_attrs[] = {
@@ -1622,13 +1861,20 @@
 	&dev_attr_compact.attr,
 	&dev_attr_mem_limit.attr,
 	&dev_attr_mem_used_max.attr,
+	&dev_attr_idle.attr,
 	&dev_attr_max_comp_streams.attr,
 	&dev_attr_comp_algorithm.attr,
 #ifdef CONFIG_ZRAM_WRITEBACK
 	&dev_attr_backing_dev.attr,
+	&dev_attr_writeback.attr,
+	&dev_attr_writeback_limit.attr,
+	&dev_attr_writeback_limit_enable.attr,
 #endif
 	&dev_attr_io_stat.attr,
 	&dev_attr_mm_stat.attr,
+#ifdef CONFIG_ZRAM_WRITEBACK
+	&dev_attr_bd_stat.attr,
+#endif
 	&dev_attr_debug_stat.attr,
 	NULL,
 };
@@ -1662,7 +1908,9 @@
 	device_id = ret;
 
 	init_rwsem(&zram->init_lock);
-
+#ifdef CONFIG_ZRAM_WRITEBACK
+	spin_lock_init(&zram->wb_limit_lock);
+#endif
 	queue = blk_alloc_queue(GFP_KERNEL);
 	if (!queue) {
 		pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 72c8584..f2fd46d 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -30,7 +30,7 @@
 
 
 /*
- * The lower ZRAM_FLAG_SHIFT bits of table.value is for
+ * The lower ZRAM_FLAG_SHIFT bits of table.flags is for
  * object size (excluding header), the higher bits is for
  * zram_pageflags.
  *
@@ -41,13 +41,15 @@
  */
 #define ZRAM_FLAG_SHIFT 24
 
-/* Flags for zram pages (table[page_no].value) */
+/* Flags for zram pages (table[page_no].flags) */
 enum zram_pageflags {
 	/* zram slot is locked */
 	ZRAM_LOCK = ZRAM_FLAG_SHIFT,
 	ZRAM_SAME,	/* Page consists the same element */
 	ZRAM_WB,	/* page is stored on backing_device */
+	ZRAM_UNDER_WB,	/* page is under writeback */
 	ZRAM_HUGE,	/* Incompressible page */
+	ZRAM_IDLE,	/* not accessed page since last idle marking */
 
 	__NR_ZRAM_PAGEFLAGS,
 };
@@ -60,7 +62,7 @@
 		unsigned long handle;
 		unsigned long element;
 	};
-	unsigned long value;
+	unsigned long flags;
 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
 	ktime_t ac_time;
 #endif
@@ -79,6 +81,12 @@
 	atomic64_t pages_stored;	/* no. of pages currently stored */
 	atomic_long_t max_used_pages;	/* no. of maximum pages stored */
 	atomic64_t writestall;		/* no. of write slow paths */
+	atomic64_t miss_free;		/* no. of missed free */
+#ifdef	CONFIG_ZRAM_WRITEBACK
+	atomic64_t bd_count;		/* no. of pages in backing device */
+	atomic64_t bd_reads;		/* no. of reads from backing device */
+	atomic64_t bd_writes;		/* no. of writes from backing device */
+#endif
 };
 
 struct zram {
@@ -104,13 +112,15 @@
 	 * zram is claimed so open request will be failed
 	 */
 	bool claim; /* Protected by bdev->bd_mutex */
-#ifdef CONFIG_ZRAM_WRITEBACK
 	struct file *backing_dev;
+#ifdef CONFIG_ZRAM_WRITEBACK
+	spinlock_t wb_limit_lock;
+	bool wb_limit_enable;
+	u64 bd_wb_limit;
 	struct block_device *bdev;
 	unsigned int old_block_size;
 	unsigned long *bitmap;
 	unsigned long nr_pages;
-	spinlock_t bitmap_lock;
 #endif
 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
 	struct dentry *debugfs_dir;
diff --git a/drivers/bluetooth/btfm_slim_slave.c b/drivers/bluetooth/btfm_slim_slave.c
index d0e19ca..e70b394 100644
--- a/drivers/bluetooth/btfm_slim_slave.c
+++ b/drivers/bluetooth/btfm_slim_slave.c
@@ -18,12 +18,12 @@
 };
 
 struct btfmslim_ch slave_txport[] = {
+	{.id = BTFM_BT_SCO_SLIM_TX, .name = "SCO_Tx",
+	.port = SLAVE_SB_PGD_PORT_TX_SCO},
 	{.id = BTFM_FM_SLIM_TX, .name = "FM_Tx1",
 	.port = SLAVE_SB_PGD_PORT_TX1_FM},
 	{.id = BTFM_FM_SLIM_TX, .name = "FM_Tx2",
 	.port = SLAVE_SB_PGD_PORT_TX2_FM},
-	{.id = BTFM_BT_SCO_SLIM_TX, .name = "SCO_Tx",
-	.port = SLAVE_SB_PGD_PORT_TX_SCO},
 	{.id = BTFM_SLIM_NUM_CODEC_DAIS, .name = "",
 	.port = BTFM_SLIM_PGD_PORT_LAST},
 };
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 7f9ea8e..1342f8e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -544,10 +544,9 @@
 					    hdev->bus);
 
 	if (!btrtl_dev->ic_info) {
-		rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
+		rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
 			    lmp_subver, hci_rev, hci_ver);
-		ret = -EINVAL;
-		goto err_free;
+		return btrtl_dev;
 	}
 
 	if (btrtl_dev->ic_info->has_rom_version) {
@@ -602,6 +601,11 @@
 	 * standard btusb. Once that firmware is uploaded, the subver changes
 	 * to a different value.
 	 */
+	if (!btrtl_dev->ic_info) {
+		rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
+		return 0;
+	}
+
 	switch (btrtl_dev->ic_info->lmp_subver) {
 	case RTL_ROM_LMP_8723A:
 	case RTL_ROM_LMP_3499:
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index cd2e5cf..77b67a5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -343,6 +343,7 @@
 	/* Intel Bluetooth devices */
 	{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
 	{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
+	{ USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
 	{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
 	{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
 	{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@ -2054,6 +2055,35 @@
 	return -EILSEQ;
 }
 
+static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
+					     struct intel_boot_params *params,
+					     char *fw_name, size_t len,
+					     const char *suffix)
+{
+	switch (ver->hw_variant) {
+	case 0x0b:	/* SfP */
+	case 0x0c:	/* WsP */
+		snprintf(fw_name, len, "intel/ibt-%u-%u.%s",
+			le16_to_cpu(ver->hw_variant),
+			le16_to_cpu(params->dev_revid),
+			suffix);
+		break;
+	case 0x11:	/* JfP */
+	case 0x12:	/* ThP */
+	case 0x13:	/* HrP */
+	case 0x14:	/* CcP */
+		snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s",
+			le16_to_cpu(ver->hw_variant),
+			le16_to_cpu(ver->hw_revision),
+			le16_to_cpu(ver->fw_revision),
+			suffix);
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
 static int btusb_setup_intel_new(struct hci_dev *hdev)
 {
 	struct btusb_data *data = hci_get_drvdata(hdev);
@@ -2105,7 +2135,7 @@
 	case 0x11:	/* JfP */
 	case 0x12:	/* ThP */
 	case 0x13:	/* HrP */
-	case 0x14:	/* QnJ, IcP */
+	case 0x14:	/* CcP */
 		break;
 	default:
 		bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
@@ -2189,23 +2219,9 @@
 	 * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
 	 *
 	 */
-	switch (ver.hw_variant) {
-	case 0x0b:	/* SfP */
-	case 0x0c:	/* WsP */
-		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
-			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(params.dev_revid));
-		break;
-	case 0x11:	/* JfP */
-	case 0x12:	/* ThP */
-	case 0x13:	/* HrP */
-	case 0x14:	/* QnJ, IcP */
-		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
-			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(ver.hw_revision),
-			 le16_to_cpu(ver.fw_revision));
-		break;
-	default:
+	err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
+						sizeof(fwname), "sfi");
+	if (!err) {
 		bt_dev_err(hdev, "Unsupported Intel firmware naming");
 		return -EINVAL;
 	}
@@ -2221,23 +2237,9 @@
 	/* Save the DDC file name for later use to apply once the firmware
 	 * downloading is done.
 	 */
-	switch (ver.hw_variant) {
-	case 0x0b:	/* SfP */
-	case 0x0c:	/* WsP */
-		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
-			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(params.dev_revid));
-		break;
-	case 0x11:	/* JfP */
-	case 0x12:	/* ThP */
-	case 0x13:	/* HrP */
-	case 0x14:	/* QnJ, IcP */
-		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
-			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(ver.hw_revision),
-			 le16_to_cpu(ver.fw_revision));
-		break;
-	default:
+	err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
+						sizeof(fwname), "ddc");
+	if (!err) {
 		bt_dev_err(hdev, "Unsupported Intel firmware naming");
 		return -EINVAL;
 	}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index ddbd8c6..8001323 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -907,6 +907,10 @@
 
 	dev->clk = devm_clk_get(dev->dev, NULL);
 
+	/* Handle deferred probing */
+	if (dev->clk == ERR_PTR(-EPROBE_DEFER))
+		return PTR_ERR(dev->clk);
+
 	dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
 						     GPIOD_OUT_LOW);
 	if (IS_ERR(dev->device_wakeup))
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index eebbf4d..f249911 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -550,7 +550,7 @@
 	struct device_node *of_node = mhi_dev->dev.of_node;
 	struct mhi_netdev_priv *mhi_netdev_priv;
 
-	mhi_netdev->alias = of_alias_get_id(of_node, "mhi_netdev");
+	mhi_netdev->alias = of_alias_get_id(of_node, "mhi-netdev");
 	if (mhi_netdev->alias < 0)
 		return -ENODEV;
 
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index fd6b7db..6a785ec 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2019, The Linux Foundation. All rights reserved.*/
 
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
@@ -18,6 +19,8 @@
 
 #define MHI_SAT_DRIVER_NAME "mhi_satellite"
 
+static bool mhi_sat_defer_init = true; /* set by default */
+
 /* logging macros */
 #define IPC_LOG_PAGES (10)
 #define IPC_LOG_LVL (MHI_MSG_LVL_INFO)
@@ -282,6 +285,9 @@
 
 	struct mhi_sat_subsys *subsys; /* pointer to subsystem array */
 	unsigned int num_subsys;
+
+	struct dentry *dentry; /* debugfs directory */
+	bool deferred_init_done; /* flag for deferred init protection */
 };
 
 static struct mhi_sat_driver mhi_sat_driver;
@@ -1041,6 +1047,44 @@
 	},
 };
 
+int mhi_sat_trigger_init(void *data, u64 val)
+{
+	struct mhi_sat_subsys *subsys;
+	int i, ret;
+
+	if (mhi_sat_driver.deferred_init_done)
+		return -EIO;
+
+	ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver);
+	if (ret)
+		goto error_sat_trigger_init;
+
+	ret = mhi_driver_register(&mhi_sat_dev_driver);
+	if (ret)
+		goto error_sat_trigger_register;
+
+	mhi_sat_driver.deferred_init_done = true;
+
+	return 0;
+
+error_sat_trigger_register:
+	unregister_rpmsg_driver(&mhi_sat_rpmsg_driver);
+
+error_sat_trigger_init:
+	subsys = mhi_sat_driver.subsys;
+	for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) {
+		ipc_log_context_destroy(subsys->ipc_log);
+		mutex_destroy(&subsys->cntrl_mutex);
+	}
+	kfree(mhi_sat_driver.subsys);
+	mhi_sat_driver.subsys = NULL;
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mhi_sat_debugfs_fops, NULL,
+			mhi_sat_trigger_init, "%llu\n");
+
 static int mhi_sat_init(void)
 {
 	struct mhi_sat_subsys *subsys;
@@ -1066,6 +1110,20 @@
 		subsys->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, log, 0);
 	}
 
+	/* create debugfs entry if defer_init is enabled */
+	if (mhi_sat_defer_init) {
+		mhi_sat_driver.dentry = debugfs_create_dir("mhi_sat", NULL);
+		if (IS_ERR_OR_NULL(mhi_sat_driver.dentry)) {
+			ret = -ENODEV;
+			goto error_sat_init;
+		}
+
+		debugfs_create_file("debug", 0444, mhi_sat_driver.dentry, NULL,
+				    &mhi_sat_debugfs_fops);
+
+		return 0;
+	}
+
 	ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver);
 	if (ret)
 		goto error_sat_init;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index ae3a753..72cd96a 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -889,6 +889,7 @@
 	platform_device_unregister(pd);
 	platform_driver_unregister(&gdrom_driver);
 	kfree(gd.toc);
+	kfree(gd.cd_info);
 }
 
 module_init(init_gdrom);
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 9a49e9e..f94f335 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -58,9 +58,11 @@
 
 #define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME   "audio_pdr_adsprpc"
 #define AUDIO_PDR_ADSP_SERVICE_NAME              "avs/audio"
+#define ADSP_AUDIOPD_NAME                        "msm/adsp/audio_pd"
 
-#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME   "sensors_pdr_adsprpc"
-#define SENSORS_PDR_ADSP_SERVICE_NAME              "tms/servreg"
+#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
+#define SENSORS_PDR_SLPI_SERVICE_NAME            "tms/servreg"
+#define SLPI_SENSORPD_NAME                       "msm/slpi/sensor_pd"
 
 #define RPC_TIMEOUT	(5 * HZ)
 #define BALIGN		128
@@ -88,6 +90,8 @@
 #define SDSP_DOMAIN_ID (2)
 #define CDSP_DOMAIN_ID (3)
 
+#define RH_CID ADSP_DOMAIN_ID
+
 #define PERF_KEYS \
 	"count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke:tid:ptr"
 #define FASTRPC_STATIC_HANDLE_KERNEL (1)
@@ -244,6 +248,7 @@
 };
 
 struct fastrpc_static_pd {
+	char *servloc_name;
 	char *spdname;
 	struct notifier_block pdrnb;
 	struct notifier_block get_service_nb;
@@ -274,7 +279,7 @@
 	int vmid;
 	struct secure_vm rhvm;
 	int ramdumpenabled;
-	void *remoteheap_ramdump_dev;
+	void *rh_dump_dev;
 	/* Indicates, if channel is restricted to secure node only */
 	int secure;
 };
@@ -363,7 +368,7 @@
 	int cid;
 	int ssrcount;
 	int pd;
-	char *spdname;
+	char *servloc_name;
 	int file_close;
 	struct fastrpc_apps *apps;
 	struct hlist_head perf;
@@ -386,19 +391,13 @@
 		.subsys = "adsp",
 		.spd = {
 			{
-				.spdname =
+				.servloc_name =
 					AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
+				.spdname = ADSP_AUDIOPD_NAME,
 				.pdrnb.notifier_call =
 						fastrpc_pdr_notifier_cb,
 				.cid = ADSP_DOMAIN_ID,
 			},
-			{
-				.spdname =
-				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
-				.pdrnb.notifier_call =
-						fastrpc_pdr_notifier_cb,
-				.cid = ADSP_DOMAIN_ID,
-			}
 		},
 	},
 	{
@@ -415,6 +414,11 @@
 		.subsys = "slpi",
 		.spd = {
 			{
+				.servloc_name =
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
+				.spdname = SLPI_SENSORPD_NAME,
+				.pdrnb.notifier_call =
+						fastrpc_pdr_notifier_cb,
 				.cid = SDSP_DOMAIN_ID,
 			}
 		},
@@ -1261,7 +1265,6 @@
 		complete(&ictx->work);
 	}
 	spin_unlock(&me->hlock);
-
 }
 
 
@@ -1294,21 +1297,20 @@
 			fastrpc_notify_users(fl);
 	}
 	spin_unlock(&me->hlock);
-
 }
 
-static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
+static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me,
+		char *servloc_name)
 {
 	struct fastrpc_file *fl;
 	struct hlist_node *n;
 
 	spin_lock(&me->hlock);
 	hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
-		if (fl->spdname && !strcmp(spdname, fl->spdname))
+		if (fl->servloc_name && !strcmp(servloc_name, fl->servloc_name))
 			fastrpc_notify_users_staticpd_pdr(fl);
 	}
 	spin_unlock(&me->hlock);
-
 }
 
 static void context_list_ctor(struct fastrpc_ctx_lst *me)
@@ -1931,21 +1933,28 @@
 	return err;
 }
 
-static int fastrpc_get_spd_session(char *name, int *session)
+static int fastrpc_get_spd_session(char *name, int *session, int *cid)
 {
 	struct fastrpc_apps *me = &gfa;
-	int err = 0, i;
+	int err = 0, i, j, match = 0;
 
-	for (i = 0; i < NUM_SESSIONS; i++) {
-		if (!me->channel[0].spd[i].spdname)
-			continue;
-		if (!strcmp(name, me->channel[0].spd[i].spdname))
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		for (j = 0; j < NUM_SESSIONS; j++) {
+			if (!me->channel[i].spd[j].servloc_name)
+				continue;
+			if (!strcmp(name, me->channel[i].spd[j].servloc_name)) {
+				match = 1;
+				break;
+			}
+		}
+		if (match)
 			break;
 	}
-	VERIFY(err, i < NUM_SESSIONS);
+	VERIFY(err, i < NUM_CHANNELS && j < NUM_SESSIONS);
 	if (err)
 		goto bail;
-	*session = i;
+	*cid = i;
+	*session = j;
 bail:
 	return err;
 }
@@ -1985,7 +1994,8 @@
 		if (init->flags == FASTRPC_INIT_ATTACH)
 			fl->pd = 0;
 		else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
-			fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
+			fl->servloc_name =
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
 			fl->pd = 2;
 		}
 		VERIFY(err, !(err = fastrpc_internal_invoke(fl,
@@ -2114,8 +2124,9 @@
 		inbuf.pageslen = 0;
 
 		if (!strcmp(proc_name, "audiopd")) {
-			fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
-			VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
+			fl->servloc_name =
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
+			err = fastrpc_mmap_remove_pdr(fl);
 			if (err)
 				goto bail;
 		}
@@ -2123,9 +2134,9 @@
 		if (!me->staticpd_flags && !(me->legacy_remote_heap)) {
 			inbuf.pageslen = 1;
 			mutex_lock(&fl->map_mutex);
-			VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
+			err = fastrpc_mmap_create(fl, -1, 0, init->mem,
 				 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
-				 &mem));
+				 &mem);
 			mutex_unlock(&fl->map_mutex);
 			if (err)
 				goto bail;
@@ -2406,6 +2417,9 @@
 	struct fastrpc_apps *me = &gfa;
 	struct ramdump_segment *ramdump_segments_rh = NULL;
 
+	VERIFY(err, fl->cid == RH_CID);
+	if (err)
+		goto bail;
 	do {
 		match = NULL;
 		spin_lock(&me->hlock);
@@ -2421,7 +2435,7 @@
 						match->size, match->flags);
 			if (err)
 				goto bail;
-			if (me->channel[0].ramdumpenabled) {
+			if (me->channel[RH_CID].ramdumpenabled) {
 				ramdump_segments_rh = kcalloc(1,
 				sizeof(struct ramdump_segment), GFP_KERNEL);
 				if (ramdump_segments_rh) {
@@ -2429,7 +2443,7 @@
 					match->phys;
 					ramdump_segments_rh->size = match->size;
 					ret = do_elf_ramdump(
-					 me->channel[0].remoteheap_ramdump_dev,
+					 me->channel[RH_CID].rh_dump_dev,
 					 ramdump_segments_rh, 1);
 					if (ret < 0)
 						pr_err("adsprpc: %s: unable to dump heap (err %d)\n",
@@ -2449,10 +2463,13 @@
 static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
 {
 	struct fastrpc_apps *me = &gfa;
-	int session = 0, err = 0;
+	int session = 0, err = 0, cid = -1;
 
-	err = fastrpc_get_spd_session(AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
-			&session);
+	err = fastrpc_get_spd_session(fl->servloc_name,
+			&session, &cid);
+	if (err)
+		goto bail;
+	VERIFY(err, cid == fl->cid);
 	if (err)
 		goto bail;
 	if (me->channel[fl->cid].spd[session].pdrcount !=
@@ -2464,12 +2481,10 @@
 		me->channel[fl->cid].spd[session].prevpdrcount =
 				me->channel[fl->cid].spd[session].pdrcount;
 	}
-	if (!me->channel[fl->cid].spd[session].ispdup) {
-		VERIFY(err, 0);
-		if (err) {
-			err = -ENOTCONN;
-			goto bail;
-		}
+	if (!me->channel[fl->cid].spd[session].ispdup &&
+		me->channel[fl->cid].spd[session].pdrhandle) {
+		err = -ENOTCONN;
+		goto bail;
 	}
 bail:
 	return err;
@@ -2969,7 +2984,7 @@
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
 			"%s %14s %d\n", "pd", ":", fl->pd);
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
-			"%s %9s %s\n", "spdname", ":", fl->spdname);
+			"%s %9s %s\n", "servloc_name", ":", fl->servloc_name);
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
 			"%s %6s %d\n", "file_close", ":", fl->file_close);
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
@@ -3101,11 +3116,11 @@
 
 	VERIFY(err, fl && fl->sctx);
 	if (err)
-		return err;
+		goto bail;
 	cid = fl->cid;
 	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
 	if (err)
-		return err;
+		goto bail;
 
 	mutex_lock(&me->channel[cid].rpmsg_mutex);
 	VERIFY(err, NULL != me->channel[cid].rpdev);
@@ -3120,12 +3135,9 @@
 	if (me->channel[cid].ssrcount !=
 				 me->channel[cid].prevssrcount) {
 		if (!me->channel[cid].issubsystemup) {
-			VERIFY(err, 0);
-			if (err) {
-				err = -ENOTCONN;
-				mutex_unlock(&me->channel[cid].smd_mutex);
-				goto bail;
-			}
+			err = -ENOTCONN;
+			mutex_unlock(&me->channel[cid].smd_mutex);
+			goto bail;
 		}
 	}
 	fl->ssrcount = me->channel[cid].ssrcount;
@@ -3173,7 +3185,7 @@
 		return err;
 
 	snprintf(strpid, PID_SIZE, "%d", current->pid);
-	buf_size = strlen(current->comm) + strlen(strpid) + 1;
+	buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
 	VERIFY(err, NULL != (fl->debug_buf = kzalloc(buf_size, GFP_KERNEL)));
 	if (err) {
 		kfree(fl);
@@ -3519,27 +3531,32 @@
 {
 	struct fastrpc_apps *me = &gfa;
 	struct fastrpc_channel_ctx *ctx;
-	struct notif_data *notifdata = data;
-	int cid;
+	struct notif_data *notifdata = (struct notif_data *)data;
+	int cid = -1;
 
 	ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
 	cid = ctx - &me->channel[0];
 	if (code == SUBSYS_BEFORE_SHUTDOWN) {
+		pr_debug("adsprpc: %s: %s subsystem is restarting\n",
+			__func__, gcinfo[cid].subsys);
 		mutex_lock(&me->channel[cid].smd_mutex);
 		ctx->ssrcount++;
 		ctx->issubsystemup = 0;
 		mutex_unlock(&me->channel[cid].smd_mutex);
-		if (cid == 0)
+		if (cid == RH_CID)
 			me->staticpd_flags = 0;
 	} else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
-		if (me->channel[0].remoteheap_ramdump_dev &&
-				notifdata->enable_ramdump) {
-			me->channel[0].ramdumpenabled = 1;
+		if (cid == RH_CID) {
+			if (me->channel[RH_CID].rh_dump_dev &&
+					notifdata->enable_ramdump) {
+				me->channel[RH_CID].ramdumpenabled = 1;
+			}
 		}
 	} else if (code == SUBSYS_AFTER_POWERUP) {
+		pr_debug("adsprpc: %s: %s subsystem is up\n",
+			__func__, gcinfo[cid].subsys);
 		ctx->issubsystemup = 1;
 	}
-
 	return NOTIFY_DONE;
 }
 
@@ -3549,26 +3566,30 @@
 {
 	struct fastrpc_apps *me = &gfa;
 	struct fastrpc_static_pd *spd;
-	struct notif_data *notifdata = data;
+	struct notif_data *notifdata = (struct notif_data *)data;
 
 	spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
 	if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
+		pr_debug("adsprpc: %s: %s (%s) is down for PDR\n",
+			__func__, spd->spdname, spd->servloc_name);
 		mutex_lock(&me->channel[spd->cid].smd_mutex);
 		spd->pdrcount++;
 		spd->ispdup = 0;
 		mutex_unlock(&me->channel[spd->cid].smd_mutex);
-		pr_info("adsprpc: %s called for %s (dev %d)\n",
-				__func__, spd->spdname, MAJOR(me->dev_no));
-		if (!strcmp(spd->spdname,
+		if (!strcmp(spd->servloc_name,
 				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
 			me->staticpd_flags = 0;
-		fastrpc_notify_pdr_drivers(me, spd->spdname);
+		fastrpc_notify_pdr_drivers(me, spd->servloc_name);
 	} else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
-		if (me->channel[0].remoteheap_ramdump_dev &&
-				notifdata->enable_ramdump) {
-			me->channel[0].ramdumpenabled = 1;
+		if (spd->cid == RH_CID) {
+			if (me->channel[RH_CID].rh_dump_dev &&
+					notifdata->enable_ramdump) {
+				me->channel[RH_CID].ramdumpenabled = 1;
+			}
 		}
 	} else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
+		pr_debug("adsprpc: %s: %s (%s) is up\n",
+			__func__, spd->spdname, spd->servloc_name);
 		spd->ispdup = 1;
 	}
 
@@ -3584,18 +3605,20 @@
 
 	spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
 	if (opcode == LOCATOR_DOWN) {
-		pr_err("adsprpc: %s: PD restart notifier locator down\n",
-				__func__);
+		pr_warn("adsprpc: %s: PDR notifier locator is down for %s\n",
+				__func__, spd->servloc_name);
 		return NOTIFY_DONE;
 	}
 	for (i = 0; i < pdr->total_domains; i++) {
-		if ((!strcmp(spd->spdname, "audio_pdr_adsprpc"))
-					&& (!strcmp(pdr->domain_list[i].name,
-						"msm/adsp/audio_pd"))) {
+		if ((!strcmp(spd->servloc_name,
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
+				&& (!strcmp(pdr->domain_list[i].name,
+				ADSP_AUDIOPD_NAME))) {
 			goto pdr_register;
-		} else if ((!strcmp(spd->spdname, "sensors_pdr_adsprpc"))
-					&& (!strcmp(pdr->domain_list[i].name,
-						"msm/adsp/sensor_pd"))) {
+		} else if ((!strcmp(spd->servloc_name,
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME))
+				&& (!strcmp(pdr->domain_list[i].name,
+				SLPI_SENSORPD_NAME))) {
 			goto pdr_register;
 		}
 	}
@@ -3608,19 +3631,24 @@
 			pdr->domain_list[i].name,
 			pdr->domain_list[i].instance_id,
 			&spd->pdrnb, &curr_state);
+		if (IS_ERR_OR_NULL(spd->pdrhandle))
+			pr_warn("adsprpc: %s: PDR notifier register failed for %s (%s) with err %d\n",
+				__func__, pdr->domain_list[i].name,
+				spd->servloc_name, PTR_ERR(spd->pdrhandle));
+		else
+			pr_info("adsprpc: %s: PDR notifier registered for %s (%s)\n",
+			__func__, pdr->domain_list[i].name, spd->servloc_name);
 	} else {
-		pr_err("adsprpc: %s is already registered\n", spd->spdname);
+		pr_warn("adsprpc: %s: %s (%s) notifier is already registered\n",
+			__func__, pdr->domain_list[i].name, spd->servloc_name);
 	}
 
-	if (IS_ERR(spd->pdrhandle))
-		pr_err("adsprpc: Unable to register notifier\n");
-
 	if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
-		pr_info("adsprpc: %s: %s is up\n", __func__, spd->spdname);
+		pr_debug("adsprpc: %s: %s (%s) PDR service is up\n",
+			__func__, spd->servloc_name, pdr->domain_list[i].name);
 		spd->ispdup = 1;
 	} else if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
-		pr_info("adsprpc: %s: %s is uninitialzed\n",
-			__func__, spd->spdname);
+		spd->ispdup = 0;
 	}
 	return NOTIFY_DONE;
 }
@@ -3791,6 +3819,7 @@
 	uint32_t val;
 	int ret = 0;
 	uint32_t secure_domains;
+	int session = -1, cid = -1;
 
 	if (of_device_is_compatible(dev->of_node,
 					"qcom,msm-fastrpc-compute")) {
@@ -3861,45 +3890,50 @@
 					"qcom,fastrpc-legacy-remote-heap");
 	if (of_property_read_bool(dev->of_node,
 					"qcom,fastrpc-adsp-audio-pdr")) {
-		int session;
-
-		VERIFY(err, !fastrpc_get_spd_session(
-			AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
+		err = fastrpc_get_spd_session(
+			AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session, &cid);
 		if (err)
 			goto spdbail;
-		me->channel[0].spd[session].get_service_nb.notifier_call =
+		me->channel[cid].spd[session].get_service_nb.notifier_call =
 					fastrpc_get_service_location_notify;
 		ret = get_service_location(
 				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
 				AUDIO_PDR_ADSP_SERVICE_NAME,
-				&me->channel[0].spd[session].get_service_nb);
+				&me->channel[cid].spd[session].get_service_nb);
 		if (ret)
-			pr_err("adsprpc: %s: getting ADSP service location failed with %d\n",
-					__func__, ret);
+			pr_warn("adsprpc: %s: get service location failed with %d for %s (%s)\n",
+				__func__, ret, AUDIO_PDR_ADSP_SERVICE_NAME,
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME);
+		else
+			pr_debug("adsprpc: %s: service location enabled for %s (%s)\n",
+				__func__, AUDIO_PDR_ADSP_SERVICE_NAME,
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME);
 	}
 	if (of_property_read_bool(dev->of_node,
 					"qcom,fastrpc-adsp-sensors-pdr")) {
-		int session;
-
-		VERIFY(err, !fastrpc_get_spd_session(
-			SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
+		err = fastrpc_get_spd_session(
+		SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session, &cid);
 		if (err)
 			goto spdbail;
-		me->channel[0].spd[session].get_service_nb.notifier_call =
+		me->channel[cid].spd[session].get_service_nb.notifier_call =
 					fastrpc_get_service_location_notify;
 		ret = get_service_location(
 				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
-				SENSORS_PDR_ADSP_SERVICE_NAME,
-				&me->channel[0].spd[session].get_service_nb);
+				SENSORS_PDR_SLPI_SERVICE_NAME,
+				&me->channel[cid].spd[session].get_service_nb);
 		if (ret)
-			pr_err("adsprpc: %s: getting sensors service location failed with %d\n",
-					__func__, ret);
+			pr_warn("adsprpc: %s: get service location failed with %d for %s (%s)\n",
+				__func__, ret, SENSORS_PDR_SLPI_SERVICE_NAME,
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME);
+		else
+			pr_debug("adsprpc: %s: service location enabled for %s (%s)\n",
+				__func__, SENSORS_PDR_SLPI_SERVICE_NAME,
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME);
 	}
 spdbail:
-	err = 0;
-	VERIFY(err, !of_platform_populate(pdev->dev.of_node,
+	err = of_platform_populate(pdev->dev.of_node,
 					  fastrpc_match_table,
-					  NULL, &pdev->dev));
+					  NULL, &pdev->dev);
 	if (err)
 		goto bail;
 bail:
@@ -4012,11 +4046,18 @@
 		me->channel[i].prevssrcount = 0;
 		me->channel[i].issubsystemup = 1;
 		me->channel[i].ramdumpenabled = 0;
-		me->channel[i].remoteheap_ramdump_dev = NULL;
+		me->channel[i].rh_dump_dev = NULL;
 		me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
 		me->channel[i].handle = subsys_notif_register_notifier(
 							gcinfo[i].subsys,
 							&me->channel[i].nb);
+		if (IS_ERR_OR_NULL(me->channel[i].handle))
+			pr_warn("adsprpc: %s: SSR notifier register failed for %s with err %d\n",
+				__func__, gcinfo[i].subsys,
+				PTR_ERR(me->channel[i].handle));
+		else
+			pr_info("adsprpc: %s: SSR notifier registered for %s\n",
+				__func__, gcinfo[i].subsys);
 	}
 
 	err = register_rpmsg_driver(&fastrpc_rpmsg_client);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index c0a5b1f..4ccc39e 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -32,6 +32,7 @@
 #include <linux/wait.h>
 #include <linux/init.h>
 #include <linux/fs.h>
+#include <linux/nospec.h>
 
 #include <asm/io.h>
 #include <linux/uaccess.h>
@@ -386,7 +387,11 @@
 	TicCard = st_loc.tic_des_from_pc;	/* tic number to send            */
 	IndexCard = NumCard - 1;
 
-	if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
+	if (IndexCard >= MAX_BOARD)
+		return -EINVAL;
+	IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+	if (!apbs[IndexCard].RamIO)
 		return -EINVAL;
 
 #ifdef DEBUG
@@ -697,6 +702,7 @@
 	unsigned char IndexCard;
 	void __iomem *pmem;
 	int ret = 0;
+	static int warncount = 10;
 	volatile unsigned char byte_reset_it;
 	struct st_ram_io *adgl;
 	void __user *argp = (void __user *)arg;
@@ -711,16 +717,12 @@
 	mutex_lock(&ac_mutex);	
 	IndexCard = adgl->num_card-1;
 	 
-	if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
-		static int warncount = 10;
-		if (warncount) {
-			printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
-			warncount--;
-		}
-		kfree(adgl);
-		mutex_unlock(&ac_mutex);
-		return -EINVAL;
-	}
+	if (cmd != 6 && IndexCard >= MAX_BOARD)
+		goto err;
+	IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+	if (cmd != 6 && !apbs[IndexCard].RamIO)
+		goto err;
 
 	switch (cmd) {
 		
@@ -838,5 +840,16 @@
 	kfree(adgl);
 	mutex_unlock(&ac_mutex);
 	return 0;
+
+err:
+	if (warncount) {
+		pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
+			(int)IndexCard + 1);
+		warncount--;
+	}
+	kfree(adgl);
+	mutex_unlock(&ac_mutex);
+	return -EINVAL;
+
 }
 
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5dd86a7..db67e08 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -73,6 +73,13 @@
 		.send_event_mask = diag_send_dci_event_mask_remote,
 		.peripheral_status = 0,
 		.mempool = POOL_TYPE_MDM_DCI_WRITE,
+	},
+	{
+		.ctx = DIAGFWD_MDM_DCI_2,
+		.send_log_mask = diag_send_dci_log_mask_remote,
+		.send_event_mask = diag_send_dci_event_mask_remote,
+		.peripheral_status = 0,
+		.mempool = POOL_TYPE_MDM2_DCI_WRITE,
 	}
 #endif
 };
@@ -593,7 +600,7 @@
 		 * (1 byte) + version (1 byte) + length (2 bytes)
 		 */
 		err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
-						 DCI_REMOTE_DATA, DCI_MDM_PROC);
+						 DCI_REMOTE_DATA, token);
 		if (err)
 			break;
 		read_bytes += header_len + dci_pkt_len;
@@ -2963,6 +2970,7 @@
 		new_entry->num_buffers = NUM_DCI_PERIPHERALS;
 		break;
 	case DCI_MDM_PROC:
+	case DCI_MDM_2_PROC:
 		new_entry->num_buffers = 1;
 		break;
 	}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 3acde54..5817066 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
  */
 #ifndef DIAG_DCI_H
 #define DIAG_DCI_H
@@ -58,7 +58,8 @@
 #define DCI_LOCAL_PROC		0
 #define DCI_REMOTE_BASE		1
 #define DCI_MDM_PROC		DCI_REMOTE_BASE
-#define DCI_REMOTE_LAST		(DCI_REMOTE_BASE + 1)
+#define DCI_MDM_2_PROC		(DCI_REMOTE_BASE + 1)
+#define DCI_REMOTE_LAST		(DCI_REMOTE_BASE + 2)
 
 #ifndef CONFIG_DIAGFWD_BRIDGE_CODE
 #define NUM_DCI_PROC		1
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 8dcab0d..8f5011f 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifdef CONFIG_DEBUG_FS
@@ -789,7 +789,8 @@
 {
 	char *buf = NULL;
 	int ret = 0;
-	int i = 0;
+	int ch_idx = 0;
+	int dev_idx = 0;
 	unsigned int buf_size;
 	unsigned int bytes_remaining = 0;
 	unsigned int bytes_written = 0;
@@ -810,23 +811,27 @@
 
 	buf_size = ksize(buf);
 	bytes_remaining = buf_size;
-	for (i = diag_dbgfs_mhiinfo_index; i < NUM_MHI_DEV; i++) {
-		mhi_info = &diag_mhi[i];
-		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
-			"id: %d\n"
-			"name: %s\n"
-			"enabled %d\n"
-			"bridge index: %s\n"
-			"mempool: %s\n"
-			"read ch opened: %d\n"
-			"write ch opened: %d\n"
-			"read work pending: %d\n"
-			"read done work pending: %d\n"
-			"open work pending: %d\n"
-			"close work pending: %d\n\n",
-			mhi_info->id,
-			mhi_info->name,
-			mhi_info->enabled,
+	for (dev_idx = diag_dbgfs_mhiinfo_index; dev_idx < NUM_MHI_DEV;
+								dev_idx++) {
+		for (ch_idx = diag_dbgfs_mhiinfo_index; ch_idx < NUM_MHI_DEV;
+								ch_idx++) {
+			mhi_info = &diag_mhi[dev_idx][ch_idx];
+			bytes_written = scnprintf(buf+bytes_in_buffer,
+						bytes_remaining,
+						"id: %d\n"
+						"name: %s\n"
+						"enabled %d\n"
+						"bridge index: %s\n"
+						"mempool: %s\n"
+						"read ch opened: %d\n"
+						"write ch opened: %d\n"
+						"read work pending: %d\n"
+						"read done work pending: %d\n"
+						"open work pending: %d\n"
+						"close work pending: %d\n\n",
+						mhi_info->id,
+						mhi_info->name,
+						mhi_info->enabled,
 			DIAG_BRIDGE_GET_NAME(mhi_info->dev_id),
 			DIAG_MEMPOOL_GET_NAME(mhi_info->mempool),
 			atomic_read(&mhi_info->read_ch.opened),
@@ -835,15 +840,16 @@
 			work_pending(&mhi_info->read_done_work),
 			work_pending(&mhi_info->open_work),
 			work_pending(&mhi_info->close_work));
-		bytes_in_buffer += bytes_written;
+			bytes_in_buffer += bytes_written;
 
-		/* Check if there is room to add another table entry */
-		bytes_remaining = buf_size - bytes_in_buffer;
+			/* Check if there is room to add another table entry */
+			bytes_remaining = buf_size - bytes_in_buffer;
 
-		if (bytes_remaining < bytes_written)
-			break;
+			if (bytes_remaining < bytes_written)
+				break;
+		}
 	}
-	diag_dbgfs_mhiinfo_index = i+1;
+	diag_dbgfs_mhiinfo_index = dev_idx + 1;
 	*ppos = 0;
 	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
 
diff --git a/drivers/char/diag/diag_ipc_logging.h b/drivers/char/diag/diag_ipc_logging.h
index fe754a9..dd4dc3c 100644
--- a/drivers/char/diag/diag_ipc_logging.h
+++ b/drivers/char/diag/diag_ipc_logging.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2015, 2017-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef DIAGIPCLOG_H
@@ -16,6 +16,7 @@
 #define DIAG_DEBUG_MASKS	0x0010
 #define DIAG_DEBUG_POWER	0x0020
 #define DIAG_DEBUG_BRIDGE	0x0040
+#define DIAG_DEBUG_CMD_INFO	0x0080
 
 #ifdef CONFIG_IPC_LOGGING
 extern uint16_t diag_debug_mask;
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index 2ecd05b..ec537f1 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -128,9 +128,10 @@
 int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
 {
 	struct diag_logger_t *logger = NULL;
-	int peripheral;
+	int peripheral = -EINVAL, type = -EINVAL, log_sink;
+	unsigned char *offset = NULL;
 
-	if (proc < 0 || proc >= NUM_MUX_PROC)
+	if (proc < 0 || proc >= NUM_MUX_PROC || !buf)
 		return -EINVAL;
 	if (!diag_mux)
 		return -EIO;
@@ -138,16 +139,38 @@
 	peripheral = diag_md_get_peripheral(ctx);
 	if (peripheral < 0) {
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
-			"diag:%s:%d invalid peripheral = %d\n",
-			__func__, __LINE__, peripheral);
+			"diag: invalid peripheral = %d\n", peripheral);
 		return -EINVAL;
 	}
 
-	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask) {
 		logger = diag_mux->md_ptr;
-	else
+		log_sink = DIAG_MEMORY_DEVICE_MODE;
+	} else {
 		logger = diag_mux->usb_ptr;
+		log_sink = DIAG_USB_MODE;
+	}
 
+	if (!proc) {
+		type = GET_BUF_TYPE(ctx);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Packet from PD: %d, type: %d, len: %d to be written to %s\n",
+			peripheral, type, len,
+			(log_sink ? "MD_device" : "USB"));
+
+		if (type == TYPE_CMD) {
+			if (driver->p_hdlc_disabled[peripheral])
+				offset = buf + 4;
+			else
+				offset = buf;
+
+			DIAG_LOG(DIAG_DEBUG_CMD_INFO,
+				"diag: cmd rsp (%02x %02x %02x %02x) from PD: %d to be written to %s\n",
+				*(offset), *(offset+1), *(offset+2),
+				*(offset+3), peripheral,
+				(log_sink ? "MD_device" : "USB"));
+		}
+	}
 	if (logger && logger->log_ops && logger->log_ops->write)
 		return logger->log_ops->write(proc, buf, len, ctx);
 	return 0;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 768c30f..750413b 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -134,6 +134,7 @@
 #define DIAG_GET_TIME_API	0x21B
 #define DIAG_SET_TIME_API	0x21C
 #define DIAG_GET_DIAG_ID	0x222
+#define DIAG_FEATURE_QUERY	0x225
 #define DIAG_SWITCH_COMMAND	0x081B
 #define DIAG_BUFFERING_MODE	0x080C
 
@@ -175,6 +176,10 @@
 
 #define FEATURE_MASK_LEN	4
 
+#define F_DIAG_EVENT_REPORT	0
+#define F_DIAG_HW_ACCELERATION	1
+#define F_DIAG_MULTI_SIM_MASK	2
+
 #define DIAG_MD_NONE			0
 #define DIAG_MD_PERIPHERAL		1
 
@@ -457,6 +462,12 @@
 	uint8_t result;
 };
 
+struct diag_cmd_feature_query_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t feature_len;
+};
+
 struct diag_pkt_frame_t {
 	uint8_t start;
 	uint8_t version;
@@ -628,6 +639,7 @@
 	struct diagfwd_info *diagfwd_cmd[NUM_PERIPHERALS];
 	struct diagfwd_info *diagfwd_dci_cmd[NUM_PERIPHERALS];
 	struct diag_feature_t feature[NUM_PERIPHERALS];
+	uint32_t apps_feature;
 	struct diag_buffering_mode_t buffering_mode[NUM_MD_SESSIONS];
 	uint8_t buffering_flag[NUM_MD_SESSIONS];
 	struct mutex mode_lock;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 1453d9d..ae3673a 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -42,6 +42,8 @@
 #define STM_RSP_NUM_BYTES		9
 #define RETRY_MAX_COUNT		1000
 
+#define SET_APPS_FEATURE(driver, n) ((driver->apps_feature) |= (1 << (n)))
+
 struct diag_md_hdlc_reset_work {
 	int pid;
 	struct work_struct work;
@@ -983,6 +985,43 @@
 	return write_len;
 }
 
+int diag_cmd_feature_query(unsigned char *src_buf, int src_len,
+				      unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_feature_query_rsp_t rsp;
+
+	if (!src_buf || !dest_buf || src_len <= sizeof(struct diag_pkt_header_t)
+			|| dest_len <= 0 || dest_len > DIAG_MAX_RSP_SIZE) {
+		pr_err("diag: Feature query, invalid input src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+			src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	header = (struct diag_pkt_header_t *)src_buf;
+
+	rsp.header.cmd_code = header->cmd_code;
+	rsp.header.subsys_id = header->subsys_id;
+	rsp.header.subsys_cmd_code = header->subsys_cmd_code;
+	rsp.version = 1;
+	rsp.feature_len = sizeof(driver->apps_feature);
+	if (dest_len < (sizeof(rsp) + sizeof(driver->apps_feature)))
+		return -EINVAL;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	memcpy(dest_buf + sizeof(rsp), &(driver->apps_feature),
+			sizeof(driver->apps_feature));
+	write_len = sizeof(rsp) + sizeof(driver->apps_feature);
+	return write_len;
+}
+
+static void diag_init_apps_feature(void)
+{
+	driver->apps_feature = 0;
+
+	SET_APPS_FEATURE(driver, F_DIAG_EVENT_REPORT);
+}
+
 void diag_send_error_rsp(unsigned char *buf, int len,
 			int pid)
 {
@@ -1029,8 +1068,8 @@
 	entry.cmd_code_lo = (uint16_t)(*(uint16_t *)temp);
 	temp += sizeof(uint16_t);
 
-	pr_debug("diag: In %s, received cmd %02x %02x %02x\n",
-		 __func__, entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
+	DIAG_LOG(DIAG_DEBUG_CMD_INFO, "diag: received cmd %02x %02x %02x\n",
+		 entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
 
 	if (*buf == DIAG_CMD_LOG_ON_DMND && driver->log_on_demand_support &&
 	    driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
@@ -1123,6 +1162,17 @@
 			diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
 		return 0;
 	}
+	/* Check for Diag Feature Query command */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+		(*(buf+1) == DIAG_SS_DIAG) &&
+		(*(uint16_t *)(buf+2) == DIAG_FEATURE_QUERY)) {
+		write_len = diag_cmd_feature_query(buf, len,
+							driver->apps_rsp_buf,
+							DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+		return 0;
+	}
 	/* Check for download command */
 	else if ((chk_apps_master()) && (*buf == 0x3A)) {
 		/* send response back */
@@ -1910,6 +1960,8 @@
 		driver->feature[i].diag_id_support = 0;
 	}
 
+	diag_init_apps_feature();
+
 	for (i = 0; i < NUM_MD_SESSIONS; i++) {
 		driver->buffering_mode[i].peripheral = i;
 		driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index 5b0722d..e4c9032 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -45,9 +45,9 @@
 		.dci_wq = NULL,
 	},
 	{
-		.id = DIAGFWD_SMUX,
+		.id = DIAGFWD_MDM2,
 		.type = DIAG_DATA_TYPE,
-		.name = "SMUX",
+		.name = "MDM_2",
 		.inited = 0,
 		.ctxt = 0,
 		.dci_read_ptr = NULL,
@@ -68,6 +68,18 @@
 		.dci_read_len = 0,
 		.dci_wq = NULL,
 	},
+	{
+		.id = DIAGFWD_MDM_DCI_2,
+		.type = DIAG_DCI_TYPE,
+		.name = "MDM_DCI_2",
+		.inited = 0,
+		.ctxt = 0,
+		.dci_read_ptr = NULL,
+		.dev_ops = NULL,
+		.dci_read_buf = NULL,
+		.dci_read_len = 0,
+		.dci_wq = NULL,
+	},
 };
 
 static int diagfwd_bridge_mux_connect(int id, int mode)
@@ -75,7 +87,7 @@
 	if (id < 0 || id >= NUM_REMOTE_DEV)
 		return -EINVAL;
 	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->open)
-		bridge_info[id].dev_ops->open(bridge_info[id].ctxt);
+		bridge_info[id].dev_ops->open(id, bridge_info[id].ctxt);
 	return 0;
 }
 
@@ -101,7 +113,7 @@
 		return -EINVAL;
 	ch = &bridge_info[buf_ctx];
 	if (ch->dev_ops && ch->dev_ops->fwd_complete)
-		ch->dev_ops->fwd_complete(ch->ctxt, buf, len, 0);
+		ch->dev_ops->fwd_complete(ch->id, ch->ctxt, buf, len, 0);
 	return 0;
 }
 
@@ -122,7 +134,7 @@
 	diag_process_remote_dci_read_data(ch->id, ch->dci_read_buf,
 					  ch->dci_read_len);
 	if (ch->dev_ops && ch->dev_ops->fwd_complete) {
-		ch->dev_ops->fwd_complete(ch->ctxt, ch->dci_read_ptr,
+		ch->dev_ops->fwd_complete(ch->id, ch->ctxt, ch->dci_read_ptr,
 					  ch->dci_read_len, 0);
 	}
 }
@@ -134,7 +146,8 @@
 	char wq_name[DIAG_BRIDGE_NAME_SZ + 10];
 
 	if (!ops) {
-		pr_err("diag: Invalid pointers ops: %pK ctxt: %d\n", ops, ctxt);
+		pr_err("diag: Invalid pointers ops: %pK ctxt: %d id: %d\n",
+			ops, ctxt, id);
 		return -EINVAL;
 	}
 
@@ -201,7 +214,7 @@
 	if (ch->type == DIAG_DATA_TYPE) {
 		err = diag_mux_write(BRIDGE_TO_MUX(id), buf, len, id);
 		if (ch->dev_ops && ch->dev_ops->queue_read)
-			ch->dev_ops->queue_read(ch->ctxt);
+			ch->dev_ops->queue_read(id, ch->ctxt);
 		return err;
 	}
 	/*
@@ -277,7 +290,8 @@
 	if (id < 0 || id >= NUM_REMOTE_DEV)
 		return -EINVAL;
 	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->close)
-		return bridge_info[id].dev_ops->close(bridge_info[id].ctxt);
+		return bridge_info[id].dev_ops->close(bridge_info[id].id,
+						bridge_info[id].ctxt);
 	return 0;
 }
 
@@ -286,8 +300,9 @@
 	if (id < 0 || id >= NUM_REMOTE_DEV)
 		return -EINVAL;
 	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->write) {
-		return bridge_info[id].dev_ops->write(bridge_info[id].ctxt,
-						      buf, len, 0);
+		return bridge_info[id].dev_ops->write(bridge_info[id].id,
+							bridge_info[id].ctxt,
+							buf, len, 0);
 	}
 	return 0;
 }
@@ -301,7 +316,7 @@
 		if (bridge_info[i].inited &&
 		    bridge_info[i].type == DIAG_DATA_TYPE &&
 		    (bridge_info[i].dev_ops->remote_proc_check &&
-		    bridge_info[i].dev_ops->remote_proc_check())) {
+		    bridge_info[i].dev_ops->remote_proc_check(i))) {
 			remote_dev |= 1 << i;
 		}
 	}
diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h
index 82645fe..8cb0374 100644
--- a/drivers/char/diag/diagfwd_bridge.h
+++ b/drivers/char/diag/diagfwd_bridge.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2012-2014, 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef DIAGFWD_BRIDGE_H
@@ -10,22 +10,25 @@
  * bottom half of this list.
  */
 #define DIAGFWD_MDM		0
-#define DIAGFWD_SMUX		1
+#define DIAGFWD_MDM2		1
 #define NUM_REMOTE_DATA_DEV	2
 #define DIAGFWD_MDM_DCI		NUM_REMOTE_DATA_DEV
-#define NUM_REMOTE_DCI_DEV	(DIAGFWD_MDM_DCI - NUM_REMOTE_DATA_DEV + 1)
+#define DIAGFWD_MDM_DCI_2	(NUM_REMOTE_DATA_DEV + 1)
+#define NUM_REMOTE_DCI_DEV	(DIAGFWD_MDM_DCI_2 - NUM_REMOTE_DATA_DEV + 1)
 #define NUM_REMOTE_DEV		(NUM_REMOTE_DATA_DEV + NUM_REMOTE_DCI_DEV)
 
 #define DIAG_BRIDGE_NAME_SZ	24
 #define DIAG_BRIDGE_GET_NAME(x)	(bridge_info[x].name)
 
 struct diag_remote_dev_ops {
-	int (*open)(int id);
-	int (*close)(int id);
-	int (*queue_read)(int id);
-	int (*write)(int id, unsigned char *buf, int len, int ctxt);
-	int (*fwd_complete)(int id, unsigned char *buf, int len, int ctxt);
-	int (*remote_proc_check)(void);
+	int (*open)(int id, int  ch);
+	int (*close)(int id, int ch);
+	int (*queue_read)(int id, int ch);
+	int (*write)(int id, int ch, unsigned char *buf,
+			int len, int ctxt);
+	int (*fwd_complete)(int id, int ch, unsigned char *buf,
+				int len, int ctxt);
+	int (*remote_proc_check)(int id);
 };
 
 struct diagfwd_bridge_info {
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 9fef6ac..b9d5051 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -1131,6 +1131,9 @@
 		if (peripheral > NUM_PERIPHERALS)
 			peripheral = diag_search_peripheral_by_pd(i);
 
+		if (peripheral < 0 || peripheral > NUM_PERIPHERALS)
+			continue;
+
 		if (!driver->feature[peripheral].peripheral_buffering)
 			continue;
 		switch (driver->buffering_mode[i].mode) {
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index be3212c..fdeca4f 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -36,43 +36,93 @@
 
 #define DIAG_MHI_STRING_SZ		11
 
-struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
+struct diag_mhi_info diag_mhi[NUM_MHI_DEV][NUM_MHI_CHAN] = {
 	{
-		.id = MHI_1,
-		.dev_id = DIAGFWD_MDM,
-		.name = "MDM",
-		.enabled = 0,
-		.num_read = 0,
-		.mempool = POOL_TYPE_MDM,
-		.mempool_init = 0,
-		.mhi_wq = NULL,
-		.mhi_dev = NULL,
-		.read_ch = {
-			.type = TYPE_MHI_READ_CH,
+		{
+			.id = MHI_1,
+			.dev_id = DIAGFWD_MDM,
+			.name = "MDM",
+			.enabled = 0,
+			.num_read = 0,
+			.mempool = POOL_TYPE_MDM,
+			.mempool_init = 0,
+			.mhi_wq = NULL,
+			.mhi_dev = NULL,
+			.read_ch = {
+				.type = TYPE_MHI_READ_CH,
+			},
+			.write_ch = {
+				.type = TYPE_MHI_WRITE_CH,
+			}
 		},
-		.write_ch = {
-			.type = TYPE_MHI_WRITE_CH,
+		{
+			.id = MHI_DCI_1,
+			.dev_id = DIAGFWD_MDM_DCI,
+			.name = "MDM_DCI",
+			.enabled = 0,
+			.num_read = 0,
+			.mempool = POOL_TYPE_MDM_DCI,
+			.mempool_init = 0,
+			.mhi_wq = NULL,
+			.mhi_dev = NULL,
+			.read_ch = {
+				.type = TYPE_MHI_READ_CH,
+			},
+			.write_ch = {
+				.type = TYPE_MHI_WRITE_CH,
+			}
 		}
 	},
 	{
-		.id = MHI_DCI_1,
-		.dev_id = DIAGFWD_MDM_DCI,
-		.name = "MDM_DCI",
-		.enabled = 0,
-		.num_read = 0,
-		.mempool = POOL_TYPE_MDM_DCI,
-		.mempool_init = 0,
-		.mhi_wq = NULL,
-		.mhi_dev = NULL,
-		.read_ch = {
-			.type = TYPE_MHI_READ_CH,
+		{
+			.id = MHI_1,
+			.dev_id = DIAGFWD_MDM2,
+			.name = "MDM_2",
+			.enabled = 0,
+			.num_read = 0,
+			.mempool = POOL_TYPE_MDM2,
+			.mempool_init = 0,
+			.mhi_wq = NULL,
+			.mhi_dev = NULL,
+			.read_ch = {
+				.type = TYPE_MHI_READ_CH,
+			},
+			.write_ch = {
+				.type = TYPE_MHI_WRITE_CH,
+			}
 		},
-		.write_ch = {
-			.type = TYPE_MHI_WRITE_CH,
+		{
+			.id = MHI_DCI_1,
+			.dev_id = DIAGFWD_MDM_DCI_2,
+			.name = "MDM_DCI_2",
+			.enabled = 0,
+			.num_read = 0,
+			.mempool = POOL_TYPE_MDM2_DCI,
+			.mempool_init = 0,
+			.mhi_wq = NULL,
+			.mhi_dev = NULL,
+			.read_ch = {
+				.type = TYPE_MHI_READ_CH,
+			},
+			.write_ch = {
+				.type = TYPE_MHI_WRITE_CH,
+			}
 		}
 	}
-};
 
+};
+static int get_id_from_token(int token)
+{
+	int ch_idx = 0;
+	int dev_idx = 0;
+
+	for (dev_idx = 0; dev_idx < NUM_MHI_DEV; dev_idx++)
+		for (ch_idx = 0; ch_idx < NUM_MHI_CHAN; ch_idx++)
+			if (diag_mhi[dev_idx][ch_idx].dev_id == token)
+				return dev_idx;
+
+	return -EINVAL;
+}
 static int mhi_buf_tbl_add(struct diag_mhi_info *mhi_info, int type,
 			   void *buf, int len)
 {
@@ -228,21 +278,26 @@
 	return 0;
 }
 
-static int mhi_close(int id)
+static int mhi_close(int token, int ch)
 {
-	if (id < 0 || id >= NUM_MHI_DEV) {
-		pr_err("diag: In %s, invalid index %d\n", __func__, id);
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
+		pr_err("diag: In %s, invalid index %d\n", __func__, dev_idx);
 		return -EINVAL;
 	}
 
-	if (!diag_mhi[id].enabled)
+	if (ch < 0 || ch >= NUM_MHI_CHAN)
+		pr_err("diag: In %s, invalid channel %d\n", __func__, ch);
+
+	if (!diag_mhi[dev_idx][ch].enabled)
 		return -ENODEV;
 	/*
 	 * This function is called whenever the channel needs to be closed
 	 * explicitly by Diag. Close both the read and write channels (denoted
 	 * by CLOSE_CHANNELS flag)
 	 */
-	return __mhi_close(&diag_mhi[id], CLOSE_CHANNELS);
+	return __mhi_close(&diag_mhi[dev_idx][ch], CLOSE_CHANNELS);
 }
 
 static void mhi_close_work_fn(struct work_struct *work)
@@ -259,7 +314,7 @@
 		__mhi_close(mhi_info, CHANNELS_CLOSED);
 }
 
-static int __mhi_open(struct diag_mhi_info *mhi_info, int open_flag)
+static int __mhi_open(struct diag_mhi_info *mhi_info, int token, int open_flag)
 {
 	int err = 0;
 
@@ -294,15 +349,21 @@
 	return 0;
 
 fail:
-	pr_err("diag: Failed to open mhi channlels, err: %d\n", err);
-	mhi_close(mhi_info->id);
+	mhi_close(token, mhi_info->id);
 	return err;
 }
 
-static int mhi_open(int id)
+static int mhi_open(int token, int ch)
 {
-	if (id < 0 || id >= NUM_MHI_DEV) {
-		pr_err("diag: In %s, invalid index %d\n", __func__, id);
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
+		pr_err("diag: In %s, invalid index %d\n", __func__, dev_idx);
+		return -EINVAL;
+	}
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err("diag: In %s, invalid ch %d\n", __func__, ch);
 		return -EINVAL;
 	}
 
@@ -311,9 +372,10 @@
 	 * explicitly by Diag. Open both the read and write channels (denoted by
 	 * OPEN_CHANNELS flag)
 	 */
-	__mhi_open(&diag_mhi[id], OPEN_CHANNELS);
-	diag_remote_dev_open(diag_mhi[id].dev_id);
-	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+	__mhi_open(&diag_mhi[dev_idx][ch], token, OPEN_CHANNELS);
+	diag_remote_dev_open(diag_mhi[dev_idx][ch].dev_id);
+	queue_work(diag_mhi[dev_idx][ch].mhi_wq,
+			&(diag_mhi[dev_idx][ch].read_work));
 
 	return 0;
 }
@@ -441,64 +503,82 @@
 	queue_work(mhi_info->mhi_wq, &mhi_info->read_work);
 }
 
-static int mhi_queue_read(int id)
+static int mhi_queue_read(int token, int ch)
 {
-	if (id < 0 || id >= NUM_MHI_DEV) {
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
 		pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
-				   id);
+				   dev_idx);
 		return -EINVAL;
 	}
-	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited("diag: In %s, invalid chan %d\n", __func__,
+					ch);
+		return -EINVAL;
+	}
+	queue_work(diag_mhi[dev_idx][ch].mhi_wq,
+			&(diag_mhi[dev_idx][ch].read_work));
 	return 0;
 }
 
-static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
+static int mhi_write(int token, int ch, unsigned char *buf, int len, int ctxt)
 {
 	int err = 0;
 	enum MHI_FLAGS mhi_flags = MHI_EOT;
 	unsigned long flags;
-	struct diag_mhi_ch_t *ch = NULL;
+	struct diag_mhi_ch_t *ch_info = NULL;
+	int dev_idx = get_id_from_token(token);
 
-	if (id < 0 || id >= NUM_MHI_DEV) {
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
 		pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
-				   id);
+				   dev_idx);
+		return -EINVAL;
+	}
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited("diag: In %s, invalid chan %d\n", __func__,
+				   ch);
 		return -EINVAL;
 	}
 
 	if (!buf || len <= 0) {
 		pr_err("diag: In %s, ch %d, invalid buf %pK len %d\n",
-			__func__, id, buf, len);
+			__func__, dev_idx, buf, len);
 		return -EINVAL;
 	}
 
-	if (!diag_mhi[id].enabled) {
+	if (!diag_mhi[dev_idx][ch].enabled) {
 		pr_err_ratelimited("diag: In %s, MHI channel %s is not enabled\n",
-				   __func__, diag_mhi[id].name);
+				   __func__, diag_mhi[dev_idx][ch].name);
 		return -EIO;
 	}
 
-	ch = &diag_mhi[id].write_ch;
-	if (!(atomic_read(&(ch->opened)))) {
+	ch_info = &diag_mhi[dev_idx][ch].write_ch;
+	if (!(atomic_read(&(ch_info->opened)))) {
 		pr_err_ratelimited("diag: In %s, MHI write channel %s is not open\n",
-				   __func__, diag_mhi[id].name);
+				   __func__, diag_mhi[dev_idx][ch].name);
 		return -EIO;
 	}
 
-	spin_lock_irqsave(&ch->lock, flags);
-	err = mhi_buf_tbl_add(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf,
+	spin_lock_irqsave(&ch_info->lock, flags);
+	err = mhi_buf_tbl_add(&diag_mhi[dev_idx][ch], TYPE_MHI_WRITE_CH, buf,
 			      len);
 	if (err) {
-		spin_unlock_irqrestore(&ch->lock, flags);
+		spin_unlock_irqrestore(&ch_info->lock, flags);
 		goto fail;
 	}
 
-	err = mhi_queue_transfer(diag_mhi[id].mhi_dev, DMA_TO_DEVICE, buf,
-				len, mhi_flags);
-	spin_unlock_irqrestore(&ch->lock, flags);
+	err = mhi_queue_transfer(diag_mhi[dev_idx][ch].mhi_dev, DMA_TO_DEVICE,
+					buf, len, mhi_flags);
+	spin_unlock_irqrestore(&ch_info->lock, flags);
 	if (err) {
-		pr_err_ratelimited("diag: In %s, cannot write to MHI channel %pK, len %d, err: %d\n",
-				   __func__, diag_mhi[id].name, len, err);
-		mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, len);
+		pr_err_ratelimited("diag: In %s, cannot write to MHI channel %s, len %d, err: %d\n",
+					__func__, diag_mhi[dev_idx][ch].name,
+					len, err);
+		mhi_buf_tbl_remove(&diag_mhi[dev_idx][ch], TYPE_MHI_WRITE_CH,
+					buf, len);
 		goto fail;
 	}
 
@@ -507,36 +587,54 @@
 	return err;
 }
 
-static int mhi_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+static int mhi_fwd_complete(int token, int ch, unsigned char *buf,
+				int len, int ctxt)
 {
-	if (id < 0 || id >= NUM_MHI_DEV) {
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
 		pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
-				   id);
+				   dev_idx);
 		return -EINVAL;
 	}
 
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited("diag: In %s, invalid chan %d\n", __func__,
+				   ch);
+		return -EINVAL;
+	}
 	if (!buf)
 		return -EINVAL;
 
-	mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_READ_CH, buf, len);
-	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+	mhi_buf_tbl_remove(&diag_mhi[dev_idx][ch], TYPE_MHI_READ_CH,
+				buf, len);
+	queue_work(diag_mhi[dev_idx][ch].mhi_wq,
+			&(diag_mhi[dev_idx][ch].read_work));
 	return 0;
 }
 
-static int mhi_remote_proc_check(void)
+static int mhi_remote_proc_check(int token)
 {
-	return diag_mhi[MHI_1].enabled;
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx >= 0 && dev_idx < NUM_MHI_DEV)
+		return diag_mhi[dev_idx][MHI_1].enabled;
+	else
+		return 0;
 }
 
 static struct diag_mhi_info *diag_get_mhi_info(struct mhi_device *mhi_dev)
 {
 	struct diag_mhi_info *mhi_info = NULL;
-	int i;
+	int ch;
+	int dev_idx;
 
-	for (i = 0; i < NUM_MHI_DEV; i++) {
-		mhi_info = &diag_mhi[i];
-		if (mhi_info->mhi_dev == mhi_dev)
-			return mhi_info;
+	for (dev_idx = 0; dev_idx < NUM_MHI_DEV; dev_idx++) {
+		for (ch = 0; ch < NUM_MHI_CHAN; ch++) {
+			mhi_info = &diag_mhi[dev_idx][ch];
+			if (mhi_info->mhi_dev == mhi_dev)
+				return mhi_info;
+		}
 	}
 	return NULL;
 }
@@ -635,22 +733,46 @@
 static int diag_mhi_probe(struct mhi_device *mhi_dev,
 			const struct mhi_device_id *id)
 {
-	int index = id->driver_data;
+	int dev_idx;
+	int ch = id->driver_data;
 	unsigned long flags;
-	struct diag_mhi_info *mhi_info = &diag_mhi[index];
+	struct diag_mhi_info *mhi_info;
 
+	switch (mhi_dev->dev_id) {
+	case MHI_DEV_ID_1:
+		dev_idx = 0;
+		break;
+	case MHI_DEV_ID_2:
+		dev_idx = 1;
+		break;
+	default:
+		return 0;
+	}
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
+		pr_err_ratelimited(" In %s invalid dev index %d\n", __func__,
+					dev_idx);
+		return 0;
+	}
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited(" In %s invalid channel %d\n", __func__, ch);
+		return 0;
+	}
+
+	mhi_info = &diag_mhi[dev_idx][ch];
 	DIAG_LOG(DIAG_DEBUG_BRIDGE,
-		"received probe for %d\n",
-		index);
-	diag_mhi[index].mhi_dev = mhi_dev;
+		"received probe for dev:%d ch:%d\n",
+		dev_idx, ch);
+	mhi_info->mhi_dev = mhi_dev;
 	DIAG_LOG(DIAG_DEBUG_BRIDGE,
 		"diag: mhi device is ready to open\n");
 	spin_lock_irqsave(&mhi_info->lock, flags);
 	mhi_info->enabled = 1;
 	spin_unlock_irqrestore(&mhi_info->lock, flags);
-	__mhi_open(&diag_mhi[index], OPEN_CHANNELS);
-	queue_work(diag_mhi[index].mhi_wq,
-			   &(diag_mhi[index].open_work));
+	__mhi_open(mhi_info, mhi_info->dev_id, OPEN_CHANNELS);
+	queue_work(diag_mhi[dev_idx][ch].mhi_wq,
+			   &(diag_mhi[dev_idx][ch].open_work));
 	return 0;
 }
 
@@ -663,70 +785,87 @@
 	.remote_proc_check = mhi_remote_proc_check,
 };
 
-static void diag_mhi_dev_exit(int dev)
+static void diag_mhi_dev_exit(int dev_idx, int ch)
 {
 	struct diag_mhi_info *mhi_info = NULL;
 
-	mhi_info = &diag_mhi[dev];
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
+		pr_err_ratelimited(" In %s invalid dev index %d\n", __func__,
+					dev_idx);
+		return;
+	}
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited(" In %s invalid channel %d\n", __func__, ch);
+		return;
+	}
+
+	mhi_info = &diag_mhi[dev_idx][ch];
 	if (!mhi_info)
 		return;
 	if (mhi_info->mhi_wq)
 		destroy_workqueue(mhi_info->mhi_wq);
-	mhi_close(mhi_info->id);
+	mhi_close(mhi_info->dev_id, mhi_info->id);
 	if (mhi_info->mempool_init)
 		diagmem_exit(driver, mhi_info->mempool);
 }
 
 int diag_mhi_init(void)
 {
-	int i;
-	int err = 0;
+	int ch, dev_idx, err = 0;
 	struct diag_mhi_info *mhi_info = NULL;
 	char wq_name[DIAG_MHI_NAME_SZ + DIAG_MHI_STRING_SZ];
 
-	for (i = 0; i < NUM_MHI_DEV; i++) {
-		mhi_info = &diag_mhi[i];
-		spin_lock_init(&mhi_info->lock);
-		spin_lock_init(&mhi_info->read_ch.lock);
-		spin_lock_init(&mhi_info->write_ch.lock);
-		INIT_LIST_HEAD(&mhi_info->read_ch.buf_tbl);
-		INIT_LIST_HEAD(&mhi_info->write_ch.buf_tbl);
-		atomic_set(&(mhi_info->read_ch.opened), 0);
-		atomic_set(&(mhi_info->write_ch.opened), 0);
-		INIT_WORK(&(mhi_info->read_work), mhi_read_work_fn);
-		INIT_LIST_HEAD(&mhi_info->read_done_list);
-		INIT_WORK(&(mhi_info->read_done_work), mhi_read_done_work_fn);
-		INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn);
-		INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
-		strlcpy(wq_name, "diag_mhi_", sizeof(wq_name));
-		strlcat(wq_name, mhi_info->name, sizeof(wq_name));
-		diagmem_init(driver, mhi_info->mempool);
-		mhi_info->mempool_init = 1;
-		mhi_info->mhi_wq = create_singlethread_workqueue(wq_name);
-		if (!mhi_info->mhi_wq)
-			goto fail;
-		err = diagfwd_bridge_register(mhi_info->dev_id, mhi_info->id,
-					      &diag_mhi_fwd_ops);
-		if (err) {
-			pr_err("diag: Unable to register MHI channel %d with bridge, err: %d\n",
-			       i, err);
-			goto fail;
+	for (dev_idx = 0; dev_idx < NUM_MHI_DEV; dev_idx++) {
+		for (ch = 0; ch < NUM_MHI_CHAN; ch++) {
+			mhi_info = &diag_mhi[dev_idx][ch];
+			spin_lock_init(&mhi_info->lock);
+			spin_lock_init(&mhi_info->read_ch.lock);
+			spin_lock_init(&mhi_info->write_ch.lock);
+			INIT_LIST_HEAD(&mhi_info->read_ch.buf_tbl);
+			INIT_LIST_HEAD(&mhi_info->write_ch.buf_tbl);
+			atomic_set(&(mhi_info->read_ch.opened), 0);
+			atomic_set(&(mhi_info->write_ch.opened), 0);
+			INIT_WORK(&(mhi_info->read_work), mhi_read_work_fn);
+			INIT_LIST_HEAD(&mhi_info->read_done_list);
+			INIT_WORK(&(mhi_info->read_done_work),
+					mhi_read_done_work_fn);
+			INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn);
+			INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
+			strlcpy(wq_name, "diag_mhi_", sizeof(wq_name));
+			strlcat(wq_name, mhi_info->name, sizeof(wq_name));
+			diagmem_init(driver, mhi_info->mempool);
+			mhi_info->mempool_init = 1;
+			mhi_info->mhi_wq =
+				create_singlethread_workqueue(wq_name);
+			if (!mhi_info->mhi_wq)
+				goto fail;
+			err = diagfwd_bridge_register(mhi_info->dev_id,
+							mhi_info->id,
+							&diag_mhi_fwd_ops);
+			if (err) {
+				pr_err("diag: Unable to register MHI channel %d with bridge dev:%d, err: %d\n",
+					ch, dev_idx, err);
+				goto fail;
+			}
+			DIAG_LOG(DIAG_DEBUG_BRIDGE,
+					"mhi dev %d port %d initialized\n",
+					dev_idx, ch);
 		}
-		DIAG_LOG(DIAG_DEBUG_BRIDGE, "mhi port %d is initailzed\n", i);
 	}
-
 	return 0;
 fail:
-	diag_mhi_dev_exit(i);
+	diag_mhi_dev_exit(dev_idx, ch);
 	return -ENOMEM;
 }
 
 void diag_mhi_exit(void)
 {
-	int i;
+	int ch, dev_idx;
 
-	for (i = 0; i < NUM_MHI_DEV; i++)
-		diag_mhi_dev_exit(i);
+	for (dev_idx = 0; dev_idx < NUM_MHI_DEV; dev_idx++)
+		for (ch = 0; ch < NUM_MHI_CHAN; ch++)
+			diag_mhi_dev_exit(dev_idx, ch);
 }
 
 static const struct mhi_device_id diag_mhi_match_table[] = {
diff --git a/drivers/char/diag/diagfwd_mhi.h b/drivers/char/diag/diagfwd_mhi.h
index 3a94109..0fcc6af 100644
--- a/drivers/char/diag/diagfwd_mhi.h
+++ b/drivers/char/diag/diagfwd_mhi.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef DIAGFWD_MHI_H
@@ -29,12 +29,17 @@
 #define MHI_1			0
 #define MHI_DCI_1		1
 #define NUM_MHI_DEV		2
+#define NUM_MHI_CHAN		2
 
 #define TYPE_MHI_READ_CH	0
 #define TYPE_MHI_WRITE_CH	1
 
 #define DIAG_MHI_NAME_SZ	24
 
+/* Below mhi  device ids are from mhi controller */
+#define MHI_DEV_ID_1 0x306
+#define MHI_DEV_ID_2 0x1101
+
 struct diag_mhi_buf_tbl_t {
 	struct list_head link;
 	unsigned char *buf;
@@ -69,8 +74,7 @@
 	spinlock_t lock;
 };
 
-extern struct diag_mhi_info diag_mhi[NUM_MHI_DEV];
-
+extern struct diag_mhi_info diag_mhi[NUM_MHI_DEV][NUM_MHI_CHAN];
 int diag_mhi_init(void);
 void diag_mhi_exit(void);
 void diag_register_with_mhi(void);
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 1fb82bd..e85acd7 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -444,6 +444,7 @@
 	struct diagfwd_buf_t *temp_fwdinfo_upd = NULL;
 	int flag_buf_1 = 0, flag_buf_2 = 0;
 	uint8_t peripheral, temp_diagid_val;
+	unsigned char *buf_offset = NULL;
 
 	if (!fwd_info || !buf || len <= 0) {
 		diag_ws_release();
@@ -498,9 +499,24 @@
 		}
 
 		while (processed < len) {
+			/* Debug log to check diag_id header validity*/
 			pr_debug("diag_fr:untagged packet buf contents: %02x %02x %02x %02x\n",
 			 *temp_buf_main, *(temp_buf_main+1),
 			 *(temp_buf_main+2), *(temp_buf_main+3));
+
+			/* Debug log ONLY for CMD channel*/
+			if (fwd_info->type == TYPE_CMD) {
+				/* buf_offset taking into account
+				 * diag_id header and non-hdlc header
+				 */
+				buf_offset = temp_buf_main + 8;
+
+				DIAG_LOG(DIAG_DEBUG_CMD_INFO,
+				"diag: cmd rsp (%02x %02x %02x %02x) received from peripheral: %d\n",
+				*(buf_offset), *(buf_offset+1),
+				*(buf_offset+2), *(buf_offset+3), peripheral);
+			}
+
 			packet_len =
 				*(uint16_t *) (temp_buf_main + 2);
 			if (packet_len > PERIPHERAL_BUF_SZ)
@@ -605,7 +621,7 @@
 {
 	int err = 0;
 	int write_len = 0;
-	unsigned char *write_buf = NULL;
+	unsigned char *write_buf = NULL, *buf_offset = NULL;
 	struct diagfwd_buf_t *temp_buf = NULL;
 	uint8_t hdlc_disabled = 0;
 
@@ -631,6 +647,16 @@
 
 	hdlc_disabled = driver->p_hdlc_disabled[fwd_info->peripheral];
 
+	if (fwd_info->type == TYPE_CMD) {
+		/*buf_offset taking into account non-hdlc header */
+		buf_offset = buf + 4;
+
+		DIAG_LOG(DIAG_DEBUG_CMD_INFO,
+		"diag: cmd rsp(%02x %02x %02x %02x) received from peripheral: %d\n",
+		*(buf_offset), *(buf_offset+1), *(buf_offset+2),
+		*(buf_offset+3), fwd_info->peripheral);
+	}
+
 	if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
 		if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
 			temp_buf = fwd_info->buf_1;
@@ -1124,8 +1150,9 @@
 	int err = 0;
 	uint8_t retry_count = 0;
 	uint8_t max_retries = 3;
+	unsigned char *temp_buf = NULL;
 
-	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !buf)
 		return -EINVAL;
 
 	if (type == TYPE_CMD || type == TYPE_DCI_CMD) {
@@ -1156,6 +1183,17 @@
 	if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
 		return -EIO;
 
+	if (type == TYPE_CMD) {
+		temp_buf = (unsigned char *)(buf);
+		/* Only raw bytes is sent to peripheral,
+		 * HDLC/NON-HDLC need not be considered
+		 */
+		DIAG_LOG(DIAG_DEBUG_CMD_INFO,
+		"diag: cmd (%02x %02x %02x %02x) ready to be written to p: %d\n",
+		*(temp_buf), *(temp_buf+1), *(temp_buf+2), *(temp_buf+3),
+		peripheral);
+	}
+
 	while (retry_count < max_retries) {
 		err = 0;
 		err = fwd_info->p_ops->write(fwd_info->ctxt, buf, len);
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 202f0a1..27c1f64 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -352,6 +352,7 @@
 static void diag_state_close_socket(void *ctxt);
 static int diag_socket_write(void *ctxt, unsigned char *buf, int len);
 static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_socket_drop_data(struct diag_socket_info *info);
 static void diag_socket_queue_read(void *ctxt);
 
 static struct diag_peripheral_ops socket_ops = {
@@ -613,6 +614,9 @@
 		return;
 	}
 
+	if (!info->fwd_ctxt && info->port_type == PORT_TYPE_SERVER)
+		diag_socket_drop_data(info);
+
 	if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
 		diagfwd_buffers_init(info->fwd_ctxt);
 
@@ -683,6 +687,46 @@
 	}
 }
 
+static void diag_socket_drop_data(struct diag_socket_info *info)
+{
+	int err = 0;
+	int pkt_len = 0;
+	int read_len = 0;
+	unsigned char *temp = NULL;
+	struct kvec iov;
+	struct msghdr read_msg = {NULL, 0};
+	struct sockaddr_qrtr src_addr = {0};
+	unsigned long flags;
+
+	temp = vzalloc(PERIPHERAL_BUF_SZ);
+	if (!temp)
+		return;
+
+	while (info->data_ready > 0) {
+		iov.iov_base = temp;
+		iov.iov_len = PERIPHERAL_BUF_SZ;
+		read_msg.msg_name = &src_addr;
+		read_msg.msg_namelen = sizeof(src_addr);
+		err = info->hdl->ops->ioctl(info->hdl, TIOCINQ,
+					(unsigned long)&pkt_len);
+		if (err || pkt_len < 0)
+			break;
+		spin_lock_irqsave(&info->lock, flags);
+		if (info->data_ready > 0) {
+			info->data_ready--;
+		} else {
+			spin_unlock_irqrestore(&info->lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&info->lock, flags);
+		read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
+					  pkt_len, MSG_DONTWAIT);
+		pr_debug("%s : %s drop total bytes: %d\n", __func__,
+			info->name, read_len);
+	}
+	vfree(temp);
+}
+
 static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len)
 {
 	int err = 0;
@@ -693,8 +737,8 @@
 	int qrtr_ctrl_recd = 0;
 	uint8_t buf_full = 0;
 	unsigned char *temp = NULL;
-	struct kvec iov = {0};
-	struct msghdr read_msg = {0};
+	struct kvec iov;
+	struct msghdr read_msg = {NULL, 0};
 	struct sockaddr_qrtr src_addr = {0};
 	struct diag_socket_info *info;
 	struct mutex *channel_mutex;
@@ -769,7 +813,13 @@
 		}
 
 		spin_lock_irqsave(&info->lock, flags);
-		info->data_ready--;
+		if (info->data_ready > 0) {
+			info->data_ready--;
+		} else {
+			spin_unlock_irqrestore(&info->lock, flags);
+			mutex_unlock(&info->socket_info_mutex);
+			break;
+		}
 		spin_unlock_irqrestore(&info->lock, flags);
 
 		read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index dac895d..2a7d6c3 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -307,6 +307,20 @@
 
 	  If unsure, say Y.
 
+config HW_RANDOM_MSM_LEGACY
+	tristate "QTI MSM Random Number Generator support (LEGACY)"
+	depends on HW_RANDOM && ARCH_QCOM
+	select CRYPTO_AES
+	select CRYPTO_ECB
+	help
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on QTI MSM SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called msm_rng.
+
+	  If unsure, say Y.
+
 config HW_RANDOM_ST
 	tristate "ST Microelectronics HW Random Number Generator support"
 	depends on HW_RANDOM && ARCH_STI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e35ec3c..65eed19 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_HW_RANDOM_HISI)	+= hisi-rng.o
 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
 obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
+obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += msm_rng.o
 obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
 obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
 obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
new file mode 100644
index 0000000..4479b1d
--- /dev/null
+++ b/drivers/char/hw_random/msm_rng.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2013, 2015, 2017-2019 The Linux Foundation. All rights
+ * reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/qrng.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/internal/rng.h>
+
+#include <linux/sched/signal.h>
+
+#define DRIVER_NAME "msm_rng"
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT_OFFSET    0x0000
+#define PRNG_STATUS_OFFSET	0x0004
+#define PRNG_LFSR_CFG_OFFSET	0x0100
+#define PRNG_CONFIG_OFFSET	0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK	0xFFFF0000
+#define PRNG_LFSR_CFG_CLOCKS	0x0000DDDD
+#define PRNG_CONFIG_MASK	0xFFFFFFFD
+#define PRNG_HW_ENABLE		0x00000002
+
+#define MAX_HW_FIFO_DEPTH 16                     /* FIFO is 16 words deep */
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide  */
+
+#define RETRY_MAX_CNT		5	/* max retry times to read register */
+#define RETRY_DELAY_INTERVAL	440	/* retry delay interval in us */
+
+struct msm_rng_device {
+	struct platform_device *pdev;
+	void __iomem *base;
+	struct clk *prng_clk;
+	uint32_t qrng_perf_client;
+	struct mutex rng_lock;
+};
+
+struct msm_rng_device msm_rng_device_info;
+static struct msm_rng_device *msm_rng_dev_cached;
+struct mutex cached_rng_lock;
+static long msm_rng_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	long ret = 0;
+
+	switch (cmd) {
+	case QRNG_IOCTL_RESET_BUS_BANDWIDTH:
+		pr_debug("calling msm_rng_bus_scale(LOW)\n");
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_device_info.qrng_perf_client, 0);
+		if (ret)
+			pr_err("failed qrng_reset_bus_bw, ret = %ld\n", ret);
+		break;
+	default:
+		pr_err("Unsupported IOCTL call\n");
+		break;
+	}
+	return ret;
+}
+
+/*
+ *
+ *  This function calls hardware random bit generator directory and retuns it
+ *  back to caller
+ *
+ */
+static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
+					void *data, size_t max)
+{
+	struct platform_device *pdev;
+	void __iomem *base;
+	size_t currsize = 0;
+	u32 val = 0;
+	u32 *retdata = data;
+	int ret;
+	int failed = 0;
+
+	pdev = msm_rng_dev->pdev;
+	base = msm_rng_dev->base;
+
+	/* no room for word data */
+	if (max < 4)
+		return 0;
+
+	mutex_lock(&msm_rng_dev->rng_lock);
+
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 1);
+		if (ret) {
+			pr_err("bus_scale_client_update_req failed\n");
+			goto bus_err;
+		}
+	}
+	/* enable PRNG clock */
+	if (msm_rng_dev->prng_clk) {
+		ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+		if (ret) {
+			pr_err("failed to enable prng clock\n");
+			goto err;
+		}
+	}
+	/* read random data from h/w */
+	do {
+		/* check status bit if data is available */
+		if (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
+				& 0x00000001)) {
+			if (failed++ == RETRY_MAX_CNT) {
+				if (currsize == 0)
+					pr_err("Data not available\n");
+				break;
+			}
+			udelay(RETRY_DELAY_INTERVAL);
+		} else {
+
+			/* read FIFO */
+			val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+
+			/* write data back to callers pointer */
+			*(retdata++) = val;
+			currsize += 4;
+			/* make sure we stay on 32bit boundary */
+			if ((max - currsize) < 4)
+				break;
+		}
+
+	} while (currsize < max);
+
+	/* vote to turn off clock */
+	if (msm_rng_dev->prng_clk)
+		clk_disable_unprepare(msm_rng_dev->prng_clk);
+err:
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 0);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed\n");
+	}
+bus_err:
+	mutex_unlock(&msm_rng_dev->rng_lock);
+
+	val = 0L;
+	return currsize;
+}
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct msm_rng_device *msm_rng_dev;
+	int rv = 0;
+
+	msm_rng_dev = (struct msm_rng_device *)rng->priv;
+	rv = msm_rng_direct_read(msm_rng_dev, data, max);
+
+	return rv;
+}
+
+
+static struct hwrng msm_rng = {
+	.name = DRIVER_NAME,
+	.read = msm_rng_read,
+	.quality = 1024,
+};
+
+static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev)
+{
+	unsigned long val = 0;
+	unsigned long reg_val = 0;
+	int ret = 0;
+
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 1);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed\n");
+	}
+	/* Enable the PRNG CLK */
+	if (msm_rng_dev->prng_clk) {
+		ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+		if (ret) {
+			dev_err(&(msm_rng_dev->pdev)->dev,
+				"failed to enable clock in probe\n");
+			return -EPERM;
+		}
+	}
+
+	/* Enable PRNG h/w only if it is NOT ON */
+	val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) &
+					PRNG_HW_ENABLE;
+	/* PRNG H/W is not ON */
+	if (val != PRNG_HW_ENABLE) {
+		val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+		val &= PRNG_LFSR_CFG_MASK;
+		val |= PRNG_LFSR_CFG_CLOCKS;
+		writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+
+		/* The PRNG CONFIG register should be first written */
+		mb();
+
+		reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET)
+						& PRNG_CONFIG_MASK;
+		reg_val |= PRNG_HW_ENABLE;
+		writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET);
+
+		/* The PRNG clk should be disabled only after we enable the
+		 * PRNG h/w by writing to the PRNG CONFIG register.
+		 */
+		mb();
+	}
+	if (msm_rng_dev->prng_clk)
+		clk_disable_unprepare(msm_rng_dev->prng_clk);
+
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 0);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed\n");
+	}
+
+	return 0;
+}
+
+static const struct file_operations msm_rng_fops = {
+	.unlocked_ioctl = msm_rng_ioctl,
+};
+static struct class *msm_rng_class;
+static struct cdev msm_rng_cdev;
+
+static int msm_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct msm_rng_device *msm_rng_dev = NULL;
+	void __iomem *base = NULL;
+	bool configure_qrng = true;
+	int error = 0;
+	int ret = 0;
+	struct device *dev;
+
+	struct msm_bus_scale_pdata *qrng_platform_support = NULL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "invalid address\n");
+		error = -EFAULT;
+		goto err_exit;
+	}
+
+	msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
+	if (!msm_rng_dev) {
+		error = -ENOMEM;
+		goto err_exit;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		error = -ENOMEM;
+		goto err_iomap;
+	}
+	msm_rng_dev->base = base;
+
+	/* create a handle for clock control */
+	if (pdev->dev.of_node) {
+		if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-clock-support")) {
+			msm_rng_dev->prng_clk = NULL;
+		} else {
+			if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,msm-rng-iface-clk")) {
+				msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+							"iface_clk");
+			} else {
+				msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+							 "core_clk");
+			}
+		}
+	}
+
+	if (IS_ERR(msm_rng_dev->prng_clk)) {
+		dev_err(&pdev->dev, "failed to register clock source\n");
+		error = -EPERM;
+		goto err_clk_get;
+	}
+
+	/* save away pdev and register driver data */
+	msm_rng_dev->pdev = pdev;
+	platform_set_drvdata(pdev, msm_rng_dev);
+
+	if (pdev->dev.of_node) {
+		/* Register bus client */
+		qrng_platform_support = msm_bus_cl_get_pdata(pdev);
+		msm_rng_dev->qrng_perf_client = msm_bus_scale_register_client(
+						qrng_platform_support);
+		msm_rng_device_info.qrng_perf_client =
+					msm_rng_dev->qrng_perf_client;
+		if (!msm_rng_dev->qrng_perf_client)
+			pr_err("Unable to register bus client\n");
+	}
+
+	/* Enable rng h/w for the targets which can access the entire
+	 * address space of PRNG.
+	 */
+	if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-qrng-config")))
+		configure_qrng = false;
+	if (configure_qrng) {
+		error = msm_rng_enable_hw(msm_rng_dev);
+		if (error)
+			goto rollback_clk;
+	}
+
+	mutex_init(&msm_rng_dev->rng_lock);
+	mutex_init(&cached_rng_lock);
+
+	/* register with hwrng framework */
+	msm_rng.priv = (unsigned long) msm_rng_dev;
+	error = hwrng_register(&msm_rng);
+	if (error) {
+		dev_err(&pdev->dev, "failed to register hwrng\n");
+		error = -EPERM;
+		goto rollback_clk;
+	}
+	ret = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);
+
+	msm_rng_class = class_create(THIS_MODULE, "msm-rng");
+	if (IS_ERR(msm_rng_class)) {
+		pr_err("class_create failed\n");
+		return PTR_ERR(msm_rng_class);
+	}
+
+	dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
+				NULL, "msm-rng");
+	if (IS_ERR(dev)) {
+		pr_err("Device create failed\n");
+		error = PTR_ERR(dev);
+		goto unregister_chrdev;
+	}
+	cdev_init(&msm_rng_cdev, &msm_rng_fops);
+	msm_rng_dev_cached = msm_rng_dev;
+	return error;
+
+unregister_chrdev:
+	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+rollback_clk:
+	if (msm_rng_dev->prng_clk)
+		clk_put(msm_rng_dev->prng_clk);
+err_clk_get:
+	iounmap(msm_rng_dev->base);
+err_iomap:
+	kzfree(msm_rng_dev);
+err_exit:
+	return error;
+}
+
+static int msm_rng_remove(struct platform_device *pdev)
+{
+	struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
+
+	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+	hwrng_unregister(&msm_rng);
+	if (msm_rng_dev->prng_clk)
+		clk_put(msm_rng_dev->prng_clk);
+	iounmap(msm_rng_dev->base);
+	platform_set_drvdata(pdev, NULL);
+	if (msm_rng_dev->qrng_perf_client)
+		msm_bus_scale_unregister_client(msm_rng_dev->qrng_perf_client);
+
+	kzfree(msm_rng_dev);
+	msm_rng_dev_cached = NULL;
+	return 0;
+}
+
+static int qrng_get_random(struct crypto_rng *tfm, const u8 *src,
+				unsigned int slen, u8 *rdata,
+				unsigned int dlen)
+{
+	int sizeread = 0;
+	int rv = -EFAULT;
+
+	if (!msm_rng_dev_cached) {
+		pr_err("%s: msm_rng_dev is not initialized\n", __func__);
+		rv = -ENODEV;
+		goto err_exit;
+	}
+
+	if (!rdata) {
+		pr_err("%s: data buffer is null\n", __func__);
+		rv = -EINVAL;
+		goto err_exit;
+	}
+
+	if (signal_pending(current) ||
+		mutex_lock_interruptible(&cached_rng_lock)) {
+		pr_err("%s: mutex lock interrupted\n", __func__);
+		rv = -ERESTARTSYS;
+		goto err_exit;
+	}
+	sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen);
+
+	if (sizeread == dlen)
+		rv = 0;
+
+	mutex_unlock(&cached_rng_lock);
+err_exit:
+	return rv;
+
+}
+
+static int qrng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+	return 0;
+}
+
+static struct rng_alg rng_algs[] = { {
+	.generate	= qrng_get_random,
+	.seed		= qrng_reset,
+	.seedsize	= 0,
+	.base		= {
+		.cra_name		= "qrng",
+		.cra_driver_name	= "fips_hw_qrng",
+		.cra_priority		= 300,
+		.cra_ctxsize		= 0,
+		.cra_module		= THIS_MODULE,
+	}
+} };
+
+static const struct of_device_id qrng_match[] = {
+	{	.compatible = "qcom,msm-rng",
+	},
+	{},
+};
+
+static struct platform_driver rng_driver = {
+	.probe      = msm_rng_probe,
+	.remove     = msm_rng_remove,
+	.driver     = {
+		.name   = DRIVER_NAME,
+		.of_match_table = qrng_match,
+	},
+};
+
+static int __init msm_rng_init(void)
+{
+	int ret;
+
+	msm_rng_dev_cached = NULL;
+	ret = platform_driver_register(&rng_driver);
+	if (ret) {
+		pr_err("%s: platform_driver_register error:%d\n",
+			__func__, ret);
+		goto err_exit;
+	}
+	ret = crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+	if (ret) {
+		pr_err("%s: crypto_register_algs error:%d\n",
+			__func__, ret);
+		goto err_exit;
+	}
+
+err_exit:
+	return ret;
+}
+
+module_init(msm_rng_init);
+
+static void __exit msm_rng_exit(void)
+{
+	crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+	platform_driver_unregister(&rng_driver);
+}
+
+module_exit(msm_rng_exit);
+
+MODULE_DESCRIPTION("QTI MSM Random Number Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 7fc9612..d5f7a12 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -29,6 +29,7 @@
 #include <linux/moduleparam.h>
 #include <linux/workqueue.h>
 #include <linux/uuid.h>
+#include <linux/nospec.h>
 
 #define PFX "IPMI message handler: "
 
@@ -61,7 +62,8 @@
 { }
 #endif
 
-static int initialized;
+static bool initialized;
+static bool drvregistered;
 
 enum ipmi_panic_event_op {
 	IPMI_SEND_PANIC_EVENT_NONE,
@@ -611,7 +613,7 @@
 
 static LIST_HEAD(ipmi_interfaces);
 static DEFINE_MUTEX(ipmi_interfaces_mutex);
-DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
+struct srcu_struct ipmi_interfaces_srcu;
 
 /*
  * List of watchers that want to know when smi's are added and deleted.
@@ -719,7 +721,15 @@
 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
 {
 	struct ipmi_smi *intf;
-	int index;
+	int index, rv;
+
+	/*
+	 * Make sure the driver is actually initialized, this handles
+	 * problems with initialization order.
+	 */
+	rv = ipmi_init_msghandler();
+	if (rv)
+		return rv;
 
 	mutex_lock(&smi_watchers_mutex);
 
@@ -883,7 +893,7 @@
 
 		if (user) {
 			user->handler->ipmi_recv_hndl(msg, user->handler_data);
-			release_ipmi_user(msg->user, index);
+			release_ipmi_user(user, index);
 		} else {
 			/* User went away, give up. */
 			ipmi_free_recv_msg(msg);
@@ -1075,7 +1085,7 @@
 {
 	unsigned long flags;
 	struct ipmi_user *new_user;
-	int           rv = 0, index;
+	int           rv, index;
 	struct ipmi_smi *intf;
 
 	/*
@@ -1093,18 +1103,9 @@
 	 * Make sure the driver is actually initialized, this handles
 	 * problems with initialization order.
 	 */
-	if (!initialized) {
-		rv = ipmi_init_msghandler();
-		if (rv)
-			return rv;
-
-		/*
-		 * The init code doesn't return an error if it was turned
-		 * off, but it won't initialize.  Check that.
-		 */
-		if (!initialized)
-			return -ENODEV;
-	}
+	rv = ipmi_init_msghandler();
+	if (rv)
+		return rv;
 
 	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
 	if (!new_user)
@@ -1182,6 +1183,7 @@
 static void free_user(struct kref *ref)
 {
 	struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+	cleanup_srcu_struct(&user->release_barrier);
 	kfree(user);
 }
 
@@ -1258,7 +1260,6 @@
 {
 	_ipmi_destroy_user(user);
 
-	cleanup_srcu_struct(&user->release_barrier);
 	kref_put(&user->refcount, free_user);
 
 	return 0;
@@ -1297,10 +1298,12 @@
 	if (!user)
 		return -ENODEV;
 
-	if (channel >= IPMI_MAX_CHANNELS)
+	if (channel >= IPMI_MAX_CHANNELS) {
 		rv = -EINVAL;
-	else
+	} else {
+		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
 		user->intf->addrinfo[channel].address = address;
+	}
 	release_ipmi_user(user, index);
 
 	return rv;
@@ -1317,10 +1320,12 @@
 	if (!user)
 		return -ENODEV;
 
-	if (channel >= IPMI_MAX_CHANNELS)
+	if (channel >= IPMI_MAX_CHANNELS) {
 		rv = -EINVAL;
-	else
+	} else {
+		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
 		*address = user->intf->addrinfo[channel].address;
+	}
 	release_ipmi_user(user, index);
 
 	return rv;
@@ -1337,10 +1342,12 @@
 	if (!user)
 		return -ENODEV;
 
-	if (channel >= IPMI_MAX_CHANNELS)
+	if (channel >= IPMI_MAX_CHANNELS) {
 		rv = -EINVAL;
-	else
+	} else {
+		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
 		user->intf->addrinfo[channel].lun = LUN & 0x3;
+	}
 	release_ipmi_user(user, index);
 
 	return 0;
@@ -1357,10 +1364,12 @@
 	if (!user)
 		return -ENODEV;
 
-	if (channel >= IPMI_MAX_CHANNELS)
+	if (channel >= IPMI_MAX_CHANNELS) {
 		rv = -EINVAL;
-	else
+	} else {
+		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
 		*address = user->intf->addrinfo[channel].lun;
+	}
 	release_ipmi_user(user, index);
 
 	return rv;
@@ -2184,6 +2193,7 @@
 {
 	if (addr->channel >= IPMI_MAX_CHANNELS)
 		return -EINVAL;
+	addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
 	*lun = intf->addrinfo[addr->channel].lun;
 	*saddr = intf->addrinfo[addr->channel].address;
 	return 0;
@@ -3294,17 +3304,9 @@
 	 * Make sure the driver is actually initialized, this handles
 	 * problems with initialization order.
 	 */
-	if (!initialized) {
-		rv = ipmi_init_msghandler();
-		if (rv)
-			return rv;
-		/*
-		 * The init code doesn't return an error if it was turned
-		 * off, but it won't initialize.  Check that.
-		 */
-		if (!initialized)
-			return -ENODEV;
-	}
+	rv = ipmi_init_msghandler();
+	if (rv)
+		return rv;
 
 	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
 	if (!intf)
@@ -5020,6 +5022,22 @@
 	return NOTIFY_DONE;
 }
 
+/* Must be called with ipmi_interfaces_mutex held. */
+static int ipmi_register_driver(void)
+{
+	int rv;
+
+	if (drvregistered)
+		return 0;
+
+	rv = driver_register(&ipmidriver.driver);
+	if (rv)
+		pr_err("Could not register IPMI driver\n");
+	else
+		drvregistered = true;
+	return rv;
+}
+
 static struct notifier_block panic_block = {
 	.notifier_call	= panic_event,
 	.next		= NULL,
@@ -5030,66 +5048,74 @@
 {
 	int rv;
 
+	mutex_lock(&ipmi_interfaces_mutex);
+	rv = ipmi_register_driver();
+	if (rv)
+		goto out;
 	if (initialized)
-		return 0;
+		goto out;
 
-	rv = driver_register(&ipmidriver.driver);
-	if (rv) {
-		pr_err(PFX "Could not register IPMI driver\n");
-		return rv;
-	}
-
-	pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n");
+	init_srcu_struct(&ipmi_interfaces_srcu);
 
 	timer_setup(&ipmi_timer, ipmi_timeout, 0);
 	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
 
 	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
-	initialized = 1;
+	initialized = true;
 
-	return 0;
+out:
+	mutex_unlock(&ipmi_interfaces_mutex);
+	return rv;
 }
 
 static int __init ipmi_init_msghandler_mod(void)
 {
-	ipmi_init_msghandler();
-	return 0;
+	int rv;
+
+	pr_info("version " IPMI_DRIVER_VERSION "\n");
+
+	mutex_lock(&ipmi_interfaces_mutex);
+	rv = ipmi_register_driver();
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	return rv;
 }
 
 static void __exit cleanup_ipmi(void)
 {
 	int count;
 
-	if (!initialized)
-		return;
+	if (initialized) {
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+						 &panic_block);
 
-	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
+		/*
+		 * This can't be called if any interfaces exist, so no worry
+		 * about shutting down the interfaces.
+		 */
 
-	/*
-	 * This can't be called if any interfaces exist, so no worry
-	 * about shutting down the interfaces.
-	 */
+		/*
+		 * Tell the timer to stop, then wait for it to stop.  This
+		 * avoids problems with race conditions removing the timer
+		 * here.
+		 */
+		atomic_inc(&stop_operation);
+		del_timer_sync(&ipmi_timer);
 
-	/*
-	 * Tell the timer to stop, then wait for it to stop.  This
-	 * avoids problems with race conditions removing the timer
-	 * here.
-	 */
-	atomic_inc(&stop_operation);
-	del_timer_sync(&ipmi_timer);
+		initialized = false;
 
-	driver_unregister(&ipmidriver.driver);
-
-	initialized = 0;
-
-	/* Check for buffer leaks. */
-	count = atomic_read(&smi_msg_inuse_count);
-	if (count != 0)
-		pr_warn(PFX "SMI message count %d at exit\n", count);
-	count = atomic_read(&recv_msg_inuse_count);
-	if (count != 0)
-		pr_warn(PFX "recv message count %d at exit\n", count);
+		/* Check for buffer leaks. */
+		count = atomic_read(&smi_msg_inuse_count);
+		if (count != 0)
+			pr_warn(PFX "SMI message count %d at exit\n", count);
+		count = atomic_read(&recv_msg_inuse_count);
+		if (count != 0)
+			pr_warn(PFX "recv message count %d at exit\n", count);
+		cleanup_srcu_struct(&ipmi_interfaces_srcu);
+	}
+	if (drvregistered)
+		driver_unregister(&ipmidriver.driver);
 }
 module_exit(cleanup_ipmi);
 
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 9b78672..76c2010 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -630,8 +630,9 @@
 
 		/* Remove the multi-part read marker. */
 		len -= 2;
+		data += 2;
 		for (i = 0; i < len; i++)
-			ssif_info->data[i] = data[i+2];
+			ssif_info->data[i] = data[i];
 		ssif_info->multi_len = len;
 		ssif_info->multi_pos = 1;
 
@@ -659,8 +660,19 @@
 		}
 
 		blocknum = data[0];
+		len--;
+		data++;
 
-		if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
+		if (blocknum != 0xff && len != 31) {
+		    /* All blocks but the last must have 31 data bytes. */
+			result = -EIO;
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info("Received middle message <31\n");
+
+			goto continue_op;
+		}
+
+		if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
 			/* Received message too big, abort the operation. */
 			result = -E2BIG;
 			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -669,16 +681,14 @@
 			goto continue_op;
 		}
 
-		/* Remove the blocknum from the data. */
-		len--;
 		for (i = 0; i < len; i++)
-			ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
+			ssif_info->data[i + ssif_info->multi_len] = data[i];
 		ssif_info->multi_len += len;
 		if (blocknum == 0xff) {
 			/* End of read */
 			len = ssif_info->multi_len;
 			data = ssif_info->data;
-		} else if (blocknum + 1 != ssif_info->multi_pos) {
+		} else if (blocknum != ssif_info->multi_pos) {
 			/*
 			 * Out of sequence block, just abort.  Block
 			 * numbers start at zero for the second block,
@@ -706,6 +716,7 @@
 		}
 	}
 
+ continue_op:
 	if (result < 0) {
 		ssif_inc_stat(ssif_info, receive_errors);
 	} else {
@@ -713,8 +724,6 @@
 		ssif_inc_stat(ssif_info, received_message_parts);
 	}
 
-
- continue_op:
 	if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
 		pr_info(PFX "DONE 1: state = %d, result=%d.\n",
 			ssif_info->ssif_state, result);
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index b5e3103..e43c876 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -59,6 +59,7 @@
 #include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/serial_8250.h>
+#include <linux/nospec.h>
 #include "smapi.h"
 #include "mwavedd.h"
 #include "3780i.h"
@@ -289,6 +290,8 @@
 						ipcnum);
 				return -EINVAL;
 			}
+			ipcnum = array_index_nospec(ipcnum,
+						    ARRAY_SIZE(pDrvData->IPCs));
 			PRINTK_3(TRACE_MWAVE,
 				"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
 				" ipcnum %x entry usIntCount %x\n",
@@ -317,6 +320,8 @@
 						" Invalid ipcnum %x\n", ipcnum);
 				return -EINVAL;
 			}
+			ipcnum = array_index_nospec(ipcnum,
+						    ARRAY_SIZE(pDrvData->IPCs));
 			PRINTK_3(TRACE_MWAVE,
 				"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
 				" ipcnum %x, usIntCount %x\n",
@@ -383,6 +388,8 @@
 						ipcnum);
 				return -EINVAL;
 			}
+			ipcnum = array_index_nospec(ipcnum,
+						    ARRAY_SIZE(pDrvData->IPCs));
 			mutex_lock(&mwave_mutex);
 			if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
 				pDrvData->IPCs[ipcnum].bIsEnabled = false;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 7d958ff..1010cb7 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -477,13 +477,15 @@
 
 	if (need_locality) {
 		rc = tpm_request_locality(chip, flags);
-		if (rc < 0)
-			goto out_no_locality;
+		if (rc < 0) {
+			need_locality = false;
+			goto out_locality;
+		}
 	}
 
 	rc = tpm_cmd_ready(chip, flags);
 	if (rc)
-		goto out;
+		goto out_locality;
 
 	rc = tpm2_prepare_space(chip, space, ordinal, buf);
 	if (rc)
@@ -547,14 +549,13 @@
 		dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
 
 out:
-	rc = tpm_go_idle(chip, flags);
-	if (rc)
-		goto out;
+	/* may fail but do not override previous error value in rc */
+	tpm_go_idle(chip, flags);
 
+out_locality:
 	if (need_locality)
 		tpm_relinquish_locality(chip, flags);
 
-out_no_locality:
 	if (chip->ops->clk_enable != NULL)
 		chip->ops->clk_enable(chip, false);
 
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index caa86b1..f74f451 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -369,6 +369,7 @@
 	struct device *dev = chip->dev.parent;
 	struct i2c_client *client = to_i2c_client(dev);
 	u32 ordinal;
+	unsigned long duration;
 	size_t count = 0;
 	int burst_count, bytes2write, retries, rc = -EIO;
 
@@ -455,10 +456,12 @@
 		return rc;
 	}
 	ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
-	rc = i2c_nuvoton_wait_for_data_avail(chip,
-					     tpm_calc_ordinal_duration(chip,
-								       ordinal),
-					     &priv->read_queue);
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		duration = tpm2_calc_ordinal_duration(chip, ordinal);
+	else
+		duration = tpm_calc_ordinal_duration(chip, ordinal);
+
+	rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
 	if (rc) {
 		dev_err(dev, "%s() timeout command duration\n", __func__);
 		i2c_nuvoton_ready(chip);
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index decffb3..a738af8 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -262,8 +262,10 @@
 
 		if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
 			src = VC5_PRIM_SRC_SHDN_EN_XTAL;
-		if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
+		else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
 			src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
+		else /* Invalid; should have been caught by vc5_probe() */
+			return -EINVAL;
 	}
 
 	return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index fce7ab4..19ba6f4 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -50,6 +50,17 @@
 };
 
 static LIST_HEAD(clk_handoff_vdd_list);
+static bool vdd_class_handoff_completed;
+static DEFINE_MUTEX(vdd_class_list_lock);
+/*
+ * clk_rate_change_list is used during clk_core_set_rate_nolock() calls to
+ * handle vdd_class vote tracking.  core->rate_change_node is added to
+ * clk_rate_change_list when core->new_rate requires a different voltage level
+ * (core->new_vdd_class_vote) than core->vdd_class_vote.  Elements are removed
+ * from the list after unvoting core->vdd_class_vote immediately before
+ * returning from clk_core_set_rate_nolock().
+ */
+static LIST_HEAD(clk_rate_change_list);
 
 /***    private data structures    ***/
 
@@ -67,9 +78,7 @@
 	unsigned long		rate;
 	unsigned long		req_rate;
 	unsigned long		new_rate;
-	unsigned long		old_rate;
 	struct clk_core		*new_parent;
-	struct clk_core		*old_parent;
 	struct clk_core		*new_child;
 	unsigned long		flags;
 	bool			orphan;
@@ -93,6 +102,9 @@
 #endif
 	struct kref		ref;
 	struct clk_vdd_class	*vdd_class;
+	int			vdd_class_vote;
+	int			new_vdd_class_vote;
+	struct list_head	rate_change_node;
 	unsigned long		*rate_max;
 	int			num_rate_max;
 };
@@ -708,9 +720,11 @@
 	mutex_lock(&vdd_class->lock);
 
 	if (WARN(!vdd_class->level_votes[level],
-				"Reference counts are incorrect for %s level %d\n",
-				vdd_class->class_name, level))
+			"Reference counts are incorrect for %s level %d\n",
+			vdd_class->class_name, level)) {
+		rc = -EINVAL;
 		goto out;
+	}
 
 	vdd_class->level_votes[level]--;
 
@@ -774,29 +788,43 @@
 static int clk_vdd_class_init(struct clk_vdd_class *vdd)
 {
 	struct clk_handoff_vdd *v;
+	int ret = 0;
 
 	if (vdd->skip_handoff)
 		return 0;
 
+	mutex_lock(&vdd_class_list_lock);
+
 	list_for_each_entry(v, &clk_handoff_vdd_list, list) {
 		if (v->vdd_class == vdd)
-			return 0;
+			goto done;
 	}
 
-	pr_debug("voting for vdd_class %s\n", vdd->class_name);
+	if (!vdd_class_handoff_completed) {
+		pr_debug("voting for vdd_class %s\n", vdd->class_name);
 
-	if (clk_vote_vdd_level(vdd, vdd->num_levels - 1))
-		pr_err("failed to vote for %s\n", vdd->class_name);
+		ret = clk_vote_vdd_level(vdd, vdd->num_levels - 1);
+		if (ret) {
+			pr_err("failed to vote for %s, ret=%d\n",
+				vdd->class_name, ret);
+			goto done;
+		}
+	}
 
 	v = kmalloc(sizeof(*v), GFP_KERNEL);
-	if (!v)
-		return -ENOMEM;
+	if (!v) {
+		ret = -ENOMEM;
+		goto done;
+	}
 
 	v->vdd_class = vdd;
 
 	list_add_tail(&v->list, &clk_handoff_vdd_list);
 
-	return 0;
+done:
+	mutex_unlock(&vdd_class_list_lock);
+
+	return ret;
 }
 
 /***        clk api        ***/
@@ -967,7 +995,11 @@
 
 	trace_clk_unprepare_complete(core);
 
-	clk_unvote_rate_vdd(core, core->rate);
+	if (core->vdd_class) {
+		clk_unvote_vdd_level(core->vdd_class, core->vdd_class_vote);
+		core->vdd_class_vote = 0;
+		core->new_vdd_class_vote = 0;
+	}
 
 	clk_core_unprepare(core->parent);
 }
@@ -1024,6 +1056,11 @@
 			clk_core_unprepare(core->parent);
 			return ret;
 		}
+		if (core->vdd_class) {
+			core->vdd_class_vote
+				= clk_find_vdd_level(core, core->rate);
+			core->new_vdd_class_vote = core->vdd_class_vote;
+		}
 
 		if (core->ops->prepare)
 			ret = core->ops->prepare(core->hw);
@@ -1032,6 +1069,8 @@
 
 		if (ret) {
 			clk_unvote_rate_vdd(core, core->rate);
+			core->vdd_class_vote = 0;
+			core->new_vdd_class_vote = 0;
 			goto unprepare;
 		}
 	}
@@ -1372,12 +1411,15 @@
 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
 		clk_unprepare_unused_subtree(core);
 
+	mutex_lock(&vdd_class_list_lock);
 	list_for_each_entry_safe(v, v_temp, &clk_handoff_vdd_list, list) {
 		clk_unvote_vdd_level(v->vdd_class,
 				v->vdd_class->num_levels - 1);
 		list_del(&v->list);
 		kfree(v);
 	}
+	vdd_class_handoff_completed = true;
+	mutex_unlock(&vdd_class_list_lock);
 
 	clk_prepare_unlock();
 
@@ -1915,12 +1957,59 @@
 	return ret;
 }
 
-static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
+/*
+ * Vote for the voltage level required for core->new_rate.  Keep track of all
+ * clocks with a changed voltage level in clk_rate_change_list.
+ */
+static int clk_vote_new_rate_vdd(struct clk_core *core)
+{
+	int cur_level, next_level;
+	int ret;
+
+	if (IS_ERR_OR_NULL(core) || !core->vdd_class)
+		return 0;
+
+	if (!clk_core_is_prepared(core))
+		return 0;
+
+	cur_level = core->new_vdd_class_vote;
+	next_level = clk_find_vdd_level(core, core->new_rate);
+	if (cur_level == next_level)
+		return 0;
+
+	ret = clk_vote_vdd_level(core->vdd_class, next_level);
+	if (ret)
+		return ret;
+
+	core->new_vdd_class_vote = next_level;
+
+	if (list_empty(&core->rate_change_node)) {
+		list_add(&core->rate_change_node, &clk_rate_change_list);
+	} else {
+		/*
+		 * A different new_rate has been determined for a clock that
+		 * was already encountered in the clock tree traversal so the
+		 * level that was previously voted for it should be removed.
+		 */
+		ret = clk_unvote_vdd_level(core->vdd_class, cur_level);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
 			     struct clk_core *new_parent, u8 p_index)
 {
 	struct clk_core *child;
+	int ret;
 
 	core->new_rate = new_rate;
+	ret = clk_vote_new_rate_vdd(core);
+	if (ret)
+		return ret;
+
 	core->new_parent = new_parent;
 	core->new_parent_index = p_index;
 	/* include clk in new parent's PRE_RATE_CHANGE notifications */
@@ -1930,8 +2019,12 @@
 
 	hlist_for_each_entry(child, &core->children, child_node) {
 		child->new_rate = clk_recalc(child, new_rate);
-		clk_calc_subtree(child, child->new_rate, NULL, 0);
+		ret = clk_calc_subtree(child, child->new_rate, NULL, 0);
+		if (ret)
+			return ret;
 	}
+
+	return 0;
 }
 
 /*
@@ -2024,7 +2117,9 @@
 	if (!clk_is_rate_level_valid(core, rate))
 		return NULL;
 
-	clk_calc_subtree(core, new_rate, parent, p_index);
+	ret = clk_calc_subtree(core, new_rate, parent, p_index);
+	if (ret)
+		return NULL;
 
 	return top;
 }
@@ -2083,7 +2178,7 @@
 	struct clk_core *parent = NULL;
 	int rc = 0;
 
-	core->old_rate = old_rate = core->rate;
+	old_rate = core->rate;
 
 	if (core->new_parent) {
 		parent = core->new_parent;
@@ -2097,8 +2192,6 @@
 	if (rc)
 		return rc;
 
-	core->old_parent = core->parent;
-
 	if (core->flags & CLK_SET_RATE_UNGATE) {
 		unsigned long flags;
 
@@ -2112,7 +2205,6 @@
 
 	if (core->new_parent && core->new_parent != core->parent) {
 		old_parent = __clk_set_parent_before(core, core->new_parent);
-		core->old_parent = old_parent;
 		trace_clk_set_parent(core, core->new_parent);
 
 		if (core->ops->set_rate_and_parent) {
@@ -2211,72 +2303,68 @@
 	return ret ? 0 : req.rate;
 }
 
-static int vote_vdd_up(struct clk_core *core)
+/*
+ * Unvote for the voltage level required for each core->new_vdd_class_vote in
+ * clk_rate_change_list.  This is used when undoing voltage requests after an
+ * error is encountered before any physical rate changing.
+ */
+static void clk_unvote_new_rate_vdd(void)
 {
-	struct clk_core *parent = NULL;
-	int ret, cur_level, next_level;
+	struct clk_core *core;
 
-	/* sanity */
-	if (IS_ERR_OR_NULL(core))
-		return 0;
-
-	if (core->vdd_class) {
-		cur_level = clk_find_vdd_level(core, core->rate);
-		next_level = clk_find_vdd_level(core, core->new_rate);
-		if (cur_level == next_level)
-			return 0;
+	list_for_each_entry(core, &clk_rate_change_list, rate_change_node) {
+		clk_unvote_vdd_level(core->vdd_class, core->new_vdd_class_vote);
+		core->new_vdd_class_vote = core->vdd_class_vote;
 	}
+}
 
-	/* save parent rate, if it exists */
-	if (core->new_parent)
-		parent = core->new_parent;
-	else if (core->parent)
-		parent = core->parent;
+/*
+ * Unvote for the voltage level required for each core->vdd_class_vote in
+ * clk_rate_change_list.
+ */
+static int clk_unvote_old_rate_vdd(void)
+{
+	struct clk_core *core;
+	int ret;
 
-	if (core->prepare_count && core->new_rate) {
-		ret = clk_vote_rate_vdd(core, core->new_rate);
+	list_for_each_entry(core, &clk_rate_change_list, rate_change_node) {
+		ret = clk_unvote_vdd_level(core->vdd_class,
+					   core->vdd_class_vote);
 		if (ret)
 			return ret;
 	}
 
-	vote_vdd_up(parent);
-
 	return 0;
 }
 
-static int vote_vdd_down(struct clk_core *core)
+/*
+ * In the case that rate setting fails, apply the max voltage level needed
+ * by either the old or new rate for each changed clock.
+ */
+static void clk_vote_safe_vdd(void)
 {
-	struct clk_core *parent;
-	unsigned long rate;
-	int cur_level, old_level;
+	struct clk_core *core;
 
-	/* sanity */
-	if (IS_ERR_OR_NULL(core))
-		return 0;
-
-	rate = core->old_rate;
-
-	/* New rate set was a failure */
-	if (DIV_ROUND_CLOSEST(core->rate, 1000) !=
-		DIV_ROUND_CLOSEST(core->new_rate, 1000))
-		rate = core->new_rate;
-
-	if (core->vdd_class) {
-		cur_level = clk_find_vdd_level(core, core->rate);
-		old_level = clk_find_vdd_level(core, core->old_rate);
-		if ((cur_level == old_level)
-			|| !core->vdd_class->level_votes[old_level])
-			return 0;
+	list_for_each_entry(core, &clk_rate_change_list, rate_change_node) {
+		if (core->vdd_class_vote > core->new_vdd_class_vote) {
+			clk_vote_vdd_level(core->vdd_class,
+						core->vdd_class_vote);
+			clk_unvote_vdd_level(core->vdd_class,
+						core->new_vdd_class_vote);
+			core->new_vdd_class_vote = core->vdd_class_vote;
+		}
 	}
+}
 
-	parent = core->old_parent;
+static void clk_cleanup_vdd_votes(void)
+{
+	struct clk_core *core, *temp;
 
-	if (core->prepare_count && rate)
-		clk_unvote_rate_vdd(core, rate);
-
-	vote_vdd_down(parent);
-
-	return 0;
+	list_for_each_entry_safe(core, temp, &clk_rate_change_list,
+				 rate_change_node) {
+		core->vdd_class_vote = core->new_vdd_class_vote;
+		list_del_init(&core->rate_change_node);
+	}
 }
 
 static int clk_core_set_rate_nolock(struct clk_core *core,
@@ -2285,6 +2373,15 @@
 	struct clk_core *top, *fail_clk;
 	unsigned long rate;
 	int ret = 0;
+	/*
+	 * The prepare lock ensures mutual exclusion with other tasks.
+	 * set_rate_nesting_count is a static so that it can be incremented in
+	 * the case of reentrancy caused by a set_rate() ops callback itself
+	 * calling clk_set_rate().  That way, the voltage level votes for the
+	 * old rates are safely removed when the original invocation of this
+	 * function completes.
+	 */
+	static unsigned int set_rate_nesting_count;
 
 	if (!core)
 		return 0;
@@ -2301,12 +2398,14 @@
 
 	/* calculate new rates and get the topmost changed clock */
 	top = clk_calc_new_rates(core, req_rate);
-	if (!top)
-		return -EINVAL;
+	if (!top) {
+		ret = -EINVAL;
+		goto pre_rate_change_err;
+	}
 
 	ret = clk_pm_runtime_get(core);
 	if (ret)
-		return ret;
+		goto pre_rate_change_err;
 
 	/* notify that we are about to change rates */
 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
@@ -2315,33 +2414,45 @@
 				fail_clk->name, req_rate);
 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
 		ret = -EBUSY;
-		goto err;
+		clk_pm_runtime_put(core);
+		goto pre_rate_change_err;
 	}
 
-	/* Enforce the VDD for new frequency */
-	ret = vote_vdd_up(core);
-	if (ret)
-		goto err;
-
 	/* change the rates */
+	set_rate_nesting_count++;
 	ret = clk_change_rate(top);
+	set_rate_nesting_count--;
 	if (ret) {
 		pr_err("%s: failed to set %s clock to run at %lu\n", __func__,
 				top->name, req_rate);
 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-		/* Release vdd requirements for new frequency. */
-		vote_vdd_down(core);
-		goto err;
+		clk_vote_safe_vdd();
+		goto post_rate_change_err;
 	}
 
 	core->req_rate = req_rate;
-	/* Release vdd requirements for old frequency. */
-	vote_vdd_down(core);
 
-err:
+post_rate_change_err:
+	/*
+	 * Only remove vdd_class level votes for old clock rates after all
+	 * nested clk_set_rate() calls have completed.
+	 */
+	if (set_rate_nesting_count == 0) {
+		ret |= clk_unvote_old_rate_vdd();
+		clk_cleanup_vdd_votes();
+	}
+
 	clk_pm_runtime_put(core);
 
 	return ret;
+
+pre_rate_change_err:
+	if (set_rate_nesting_count == 0) {
+		clk_unvote_new_rate_vdd();
+		clk_cleanup_vdd_votes();
+	}
+
+	return ret;
 }
 
 /**
@@ -3164,7 +3275,7 @@
 	seq_printf(s, "\"protect_count\": %d,", c->protect_count);
 	seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
 	seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
-	seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+	seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
 	seq_printf(s, "\"duty_cycle\": %u",
 		   clk_core_get_scaled_duty_cycle(c, 100000));
 }
@@ -4204,6 +4315,7 @@
 	};
 
 	INIT_HLIST_HEAD(&core->clks);
+	INIT_LIST_HEAD(&core->rate_change_node);
 
 	hw->clk = __clk_create_clk(hw, NULL, NULL);
 	if (IS_ERR(hw->clk)) {
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
index 15af423..f5d54a6 100644
--- a/drivers/clk/imgtec/clk-boston.c
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -73,27 +73,32 @@
 	hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
 	if (IS_ERR(hw)) {
 		pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
-		return;
+		goto error;
 	}
 	onecell->hws[BOSTON_CLK_INPUT] = hw;
 
 	hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
 	if (IS_ERR(hw)) {
 		pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
-		return;
+		goto error;
 	}
 	onecell->hws[BOSTON_CLK_SYS] = hw;
 
 	hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
 	if (IS_ERR(hw)) {
 		pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
-		return;
+		goto error;
 	}
 	onecell->hws[BOSTON_CLK_CPU] = hw;
 
 	err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell);
 	if (err)
 		pr_err("failed to add DT provider: %d\n", err);
+
+	return;
+
+error:
+	kfree(onecell);
 }
 
 /*
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 9903652..e695622 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -154,7 +154,7 @@
 
 struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
 			     u8 width, void __iomem *busy_reg, u8 busy_shift,
-			     const char **parent_names, int num_parents)
+			     const char * const *parent_names, int num_parents)
 {
 	struct clk_busy_mux *busy;
 	struct clk *clk;
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index c9b327e..44817c1 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -70,7 +70,7 @@
 };
 
 struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
-			      u8 shift, u8 width, const char **parents,
+			      u8 shift, u8 width, const char * const *parents,
 			      int num_parents, void (*fixup)(u32 *val))
 {
 	struct clk_fixup_mux *fixup_mux;
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 8c7c2fc..c509324 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -508,8 +508,12 @@
 	 * lvds1_gate and lvds2_gate are pseudo-gates.  Both can be
 	 * independently configured as clock inputs or outputs.  We treat
 	 * the "output_enable" bit as a gate, even though it's really just
-	 * enabling clock output.
+	 * enabling clock output. Initially the gate bits are cleared, as
+	 * otherwise the exclusive configuration gets locked in the setup done
+	 * by software running before the clock driver, with no way to change
+	 * it.
 	 */
+	writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
 	clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
 	clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
 
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index eb6bcbf..390e3e0 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -17,6 +17,8 @@
 
 #include "clk.h"
 
+#define CCDR				0x4
+#define BM_CCM_CCDR_MMDC_CH0_MASK	(1 << 17)
 #define CCSR			0xc
 #define BM_CCSR_PLL1_SW_CLK_SEL	(1 << 2)
 #define CACRR			0x10
@@ -409,6 +411,10 @@
 	clks[IMX6SL_CLK_USDHC3]       = imx_clk_gate2("usdhc3",       "usdhc3_podf",       base + 0x80, 6);
 	clks[IMX6SL_CLK_USDHC4]       = imx_clk_gate2("usdhc4",       "usdhc4_podf",       base + 0x80, 8);
 
+	/* Ensure the MMDC CH0 handshake is bypassed */
+	writel_relaxed(readl_relaxed(base + CCDR) |
+		BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
+
 	imx_check_clocks(clks, ARRAY_SIZE(clks));
 
 	clk_data.clks = clks;
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 8076ec0..e65c111 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -63,14 +63,14 @@
 
 struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
 			     u8 width, void __iomem *busy_reg, u8 busy_shift,
-			     const char **parent_names, int num_parents);
+			     const char * const *parent_names, int num_parents);
 
 struct clk *imx_clk_fixup_divider(const char *name, const char *parent,
 				  void __iomem *reg, u8 shift, u8 width,
 				  void (*fixup)(u32 *val));
 
 struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
-			      u8 shift, u8 width, const char **parents,
+			      u8 shift, u8 width, const char * const *parents,
 			      int num_parents, void (*fixup)(u32 *val));
 
 static inline struct clk *imx_clk_fixed(const char *name, int rate)
@@ -79,7 +79,8 @@
 }
 
 static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
-		u8 shift, u8 width, const char **parents, int num_parents)
+			u8 shift, u8 width, const char * const *parents,
+			int num_parents)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
 			CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
@@ -192,7 +193,8 @@
 }
 
 static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
-		u8 shift, u8 width, const char **parents, int num_parents)
+			u8 shift, u8 width, const char * const *parents,
+			int num_parents)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
 			CLK_SET_RATE_NO_REPARENT, reg, shift,
@@ -200,7 +202,8 @@
 }
 
 static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
-		u8 shift, u8 width, const char **parents, int num_parents)
+			u8 shift, u8 width, const char * const *parents,
+			int num_parents)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
 			CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
@@ -208,8 +211,9 @@
 }
 
 static inline struct clk *imx_clk_mux_flags(const char *name,
-		void __iomem *reg, u8 shift, u8 width, const char **parents,
-		int num_parents, unsigned long flags)
+			void __iomem *reg, u8 shift, u8 width,
+			const char * const *parents, int num_parents,
+			unsigned long flags)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
 			flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 74697e1..9d79ff8 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -568,13 +568,14 @@
 };
 
 static const struct clk_div_table cpu_scale_table[] = {
-	{ .val = 2, .div = 4 },
-	{ .val = 3, .div = 6 },
-	{ .val = 4, .div = 8 },
-	{ .val = 5, .div = 10 },
-	{ .val = 6, .div = 12 },
-	{ .val = 7, .div = 14 },
-	{ .val = 8, .div = 16 },
+	{ .val = 1, .div = 4 },
+	{ .val = 2, .div = 6 },
+	{ .val = 3, .div = 8 },
+	{ .val = 4, .div = 10 },
+	{ .val = 5, .div = 12 },
+	{ .val = 6, .div = 14 },
+	{ .val = 7, .div = 16 },
+	{ .val = 8, .div = 18 },
 	{ /* sentinel */ },
 };
 
@@ -582,7 +583,7 @@
 	.data = &(struct clk_regmap_div_data){
 		.offset =  HHI_SYS_CPU_CLK_CNTL1,
 		.shift = 20,
-		.width = 9,
+		.width = 10,
 		.table = cpu_scale_table,
 		.flags = CLK_DIVIDER_ALLOW_ZERO,
 	},
@@ -595,20 +596,27 @@
 	},
 };
 
+static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 };
 static struct clk_regmap meson8b_cpu_scale_out_sel = {
 	.data = &(struct clk_regmap_mux_data){
 		.offset = HHI_SYS_CPU_CLK_CNTL0,
 		.mask = 0x3,
 		.shift = 2,
+		.table = mux_table_cpu_scale_out_sel,
 	},
 	.hw.init = &(struct clk_init_data){
 		.name = "cpu_scale_out_sel",
 		.ops = &clk_regmap_mux_ro_ops,
+		/*
+		 * NOTE: We are skipping the parent with value 0x2 (which is
+		 * "cpu_div3") because it results in a duty cycle of 33% which
+		 * makes the system unstable and can result in a lockup of the
+		 * whole system.
+		 */
 		.parent_names = (const char *[]) { "cpu_in_sel",
 						   "cpu_div2",
-						   "cpu_div3",
 						   "cpu_scale_div" },
-		.num_parents = 4,
+		.num_parents = 3,
 		.flags = CLK_SET_RATE_PARENT,
 	},
 };
@@ -626,7 +634,8 @@
 						  "cpu_scale_out_sel" },
 		.num_parents = 2,
 		.flags = (CLK_SET_RATE_PARENT |
-			  CLK_SET_RATE_NO_REPARENT),
+			  CLK_SET_RATE_NO_REPARENT |
+			  CLK_IS_CRITICAL),
 	},
 };
 
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 46e5628..3d7bbea 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -366,3 +366,29 @@
 	  LITO devices.
 	  Say Y if you want to support video devices and functionality such as
 	  video encode/decode.
+
+config SM_CAMCC_LITO
+	tristate "LITO Camera Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the camera clock controller on Qualcomm Technologies, Inc.
+	  LITO devices.
+	  Say Y if you want to support camera devices and functionality such as
+	  capturing pictures.
+
+config SM_DISPCC_LITO
+	tristate "LITO Display Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the display clock controller on Qualcomm Technologies, Inc.
+	  LITO devices.
+	  Say Y if you want to support display devices and functionality such as
+	  splash screen.
+
+config SM_GPUCC_LITO
+	tristate "LITO Graphics Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	 Support for the graphics clock controller on Qualcomm Technologies, Inc.
+	 LITO devices.
+	 Say Y if you want to support graphics controller devices.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index c8d9225..8fce145 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -52,8 +52,11 @@
 obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
 obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+obj-$(CONFIG_SM_CAMCC_LITO) += camcc-lito.o
+obj-$(CONFIG_SM_DISPCC_LITO) += dispcc-lito.o
 obj-$(CONFIG_SM_GCC_LITO) += gcc-lito.o
 obj-$(CONFIG_SM_VIDEOCC_LITO) += videocc-lito.o
+obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
 obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
 
 obj-y += mdss/
diff --git a/drivers/clk/qcom/camcc-kona.c b/drivers/clk/qcom/camcc-kona.c
index c859e3c..25873aa 100644
--- a/drivers/clk/qcom/camcc-kona.c
+++ b/drivers/clk/qcom/camcc-kona.c
@@ -280,6 +280,9 @@
 	.config_ctl_val = 0x08200920,
 	.config_ctl_hi_val = 0x05008011,
 	.config_ctl_hi1_val = 0x00000000,
+	.test_ctl_val = 0x00010000,
+	.test_ctl_hi_val = 0x00000000,
+	.test_ctl_hi1_val = 0x00000000,
 	.user_ctl_val = 0x00000100,
 	.user_ctl_hi_val = 0x00000000,
 	.user_ctl_hi1_val = 0x00000000,
@@ -446,6 +449,7 @@
 		.name = "cam_cc_sbi_div_clk_src",
 		.parent_names = (const char *[]){ "cam_cc_ife_0_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
diff --git a/drivers/clk/qcom/camcc-lito.c b/drivers/clk/qcom/camcc-lito.c
new file mode 100644
index 0000000..10842bb
--- /dev/null
+++ b/drivers/clk/qcom/camcc-lito.c
@@ -0,0 +1,2377 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,camcc-lito.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "vdd-level.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_BI_TCXO_MX,
+	P_CAM_CC_PLL0_OUT_EVEN,
+	P_CAM_CC_PLL0_OUT_MAIN,
+	P_CAM_CC_PLL0_OUT_ODD,
+	P_CAM_CC_PLL1_OUT_EVEN,
+	P_CAM_CC_PLL2_OUT_AUX,
+	P_CAM_CC_PLL2_OUT_EARLY,
+	P_CAM_CC_PLL2_OUT_MAIN,
+	P_CAM_CC_PLL3_OUT_EVEN,
+	P_CAM_CC_PLL4_OUT_EVEN,
+	P_CHIP_SLEEP_CLK,
+	P_CORE_BI_PLL_TEST_SE,
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL0_OUT_MAIN, 1 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL0_OUT_ODD, 3 },
+	{ P_CAM_CC_PLL2_OUT_MAIN, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"cam_cc_pll0",
+	"cam_cc_pll0_out_even",
+	"cam_cc_pll0_out_odd",
+	"cam_cc_pll2_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+	{ P_BI_TCXO_MX, 0 },
+	{ P_CAM_CC_PLL2_OUT_AUX, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"cam_cc_pll2_out_aux",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL0_OUT_MAIN, 1 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL0_OUT_ODD, 3 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 4 },
+	{ P_CAM_CC_PLL2_OUT_EARLY, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"cam_cc_pll0",
+	"cam_cc_pll0_out_even",
+	"cam_cc_pll0_out_odd",
+	"cam_cc_pll1_out_even",
+	"cam_cc_pll2",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL0_OUT_MAIN, 1 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL0_OUT_ODD, 3 },
+	{ P_CAM_CC_PLL2_OUT_EARLY, 5 },
+	{ P_CAM_CC_PLL4_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_3[] = {
+	"bi_tcxo",
+	"cam_cc_pll0",
+	"cam_cc_pll0_out_even",
+	"cam_cc_pll0_out_odd",
+	"cam_cc_pll2",
+	"cam_cc_pll4_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL3_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_4[] = {
+	"bi_tcxo",
+	"cam_cc_pll3_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL4_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_5[] = {
+	"bi_tcxo",
+	"cam_cc_pll4_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 4 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_6[] = {
+	"bi_tcxo",
+	"cam_cc_pll1_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+	{ P_CHIP_SLEEP_CLK, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_7[] = {
+	"chip_sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL0_OUT_ODD, 3 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_8[] = {
+	"bi_tcxo",
+	"cam_cc_pll0_out_odd",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_9[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco lucid_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static struct pll_vco zonda_vco[] = {
+	{ 595200000, 3600000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+	.l = 0x3E,
+	.cal_l = 0x44,
+	.alpha = 0x8000,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00003101,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll0_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll0_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+	{ 0x3, 3 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+	.offset = 0x0,
+	.post_div_shift = 12,
+	.post_div_table = post_div_table_cam_cc_pll0_out_odd,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll0_out_odd",
+		.parent_names = (const char *[]){ "cam_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+	.l = 0x1F,
+	.cal_l = 0x44,
+	.alpha = 0x4000,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000101,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+	.offset = 0x1000,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+	.offset = 0x1000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll1_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll1_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll1" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+	.l = 0x32,
+	.cal_l = 0x32,
+	.alpha = 0x0,
+	.config_ctl_val = 0x08200920,
+	.config_ctl_hi_val = 0x05008001,
+	.config_ctl_hi1_val = 0x00000000,
+	.user_ctl_val = 0x00000108,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+	.offset = 0x2000,
+	.vco_table = zonda_vco,
+	.num_vco = ARRAY_SIZE(zonda_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll2",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_zonda_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_LOWER] = 1600000000,
+				[VDD_LOW] = 2000000000,
+				[VDD_NOMINAL] = 2900000000,
+				[VDD_HIGH] = 3600000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux = {
+	.offset = 0x2000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll2_out_aux,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux),
+	.width = 2,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_aux",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_zonda_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_main[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
+	.offset = 0x2000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll2_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_main),
+	.width = 2,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_main",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_zonda_ops,
+	},
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+	.l = 0x27,
+	.cal_l = 0x44,
+	.alpha = 0x9555,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000101,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+	.offset = 0x3000,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll3",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+	.offset = 0x3000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll3_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll3_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll3" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+	.l = 0x27,
+	.cal_l = 0x44,
+	.alpha = 0x9555,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000101,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+	.offset = 0x4000,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll4",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+	.offset = 0x4000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll4_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll4_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll4" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+	.cmd_rcgr = 0x7010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_bps_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 200000000,
+			[VDD_LOW] = 400000000,
+			[VDD_LOW_L1] = 480000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+	F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+	.cmd_rcgr = 0xc12c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_camnoc_axi_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 150000000,
+			[VDD_LOW] = 240000000,
+			[VDD_LOW_L1] = 320000000,
+			[VDD_NOMINAL] = 400000000,
+			[VDD_HIGH] = 480000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+	.cmd_rcgr = 0xc0c4,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cci_0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 37500000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+	.cmd_rcgr = 0xc0e0,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cci_1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 37500000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+	.cmd_rcgr = 0xa064,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_2,
+	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cphy_rx_clk_src",
+		.parent_names = cam_cc_parent_names_2,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+	.cmd_rcgr = 0x6004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi0phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+	.cmd_rcgr = 0x6028,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi1phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+	.cmd_rcgr = 0x604c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi2phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+	.cmd_rcgr = 0x6070,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi3phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+	.cmd_rcgr = 0x703c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fast_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 200000000,
+			[VDD_LOW_L1] = 300000000,
+			[VDD_NOMINAL] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(380000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+	.cmd_rcgr = 0xc09c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_3,
+	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fd_core_clk_src",
+		.parent_names = cam_cc_parent_names_3,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 380000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 480000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+	.cmd_rcgr = 0xc074,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_icp_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_icp_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 400000000,
+			[VDD_LOW_L1] = 600000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(380000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(510000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(760000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+	.cmd_rcgr = 0xa010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_4,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_clk_src",
+		.parent_names = cam_cc_parent_names_4,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 380000000,
+			[VDD_LOW] = 510000000,
+			[VDD_LOW_L1] = 637000000,
+			[VDD_NOMINAL] = 760000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+	.cmd_rcgr = 0xa03c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_2,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_csid_clk_src",
+		.parent_names = cam_cc_parent_names_2,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(380000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(510000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(760000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+	.cmd_rcgr = 0xb010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_5,
+	.freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_clk_src",
+		.parent_names = cam_cc_parent_names_5,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 380000000,
+			[VDD_LOW] = 510000000,
+			[VDD_LOW_L1] = 637000000,
+			[VDD_NOMINAL] = 760000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+	.cmd_rcgr = 0xb034,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_2,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_csid_clk_src",
+		.parent_names = cam_cc_parent_names_2,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+	.cmd_rcgr = 0xc004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 320000000,
+			[VDD_LOW] = 400000000,
+			[VDD_LOW_L1] = 480000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+	.cmd_rcgr = 0xc020,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_2,
+	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_csid_clk_src",
+		.parent_names = cam_cc_parent_names_2,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+	F(430000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+	F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+	.cmd_rcgr = 0x8010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_6,
+	.freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ipe_0_clk_src",
+		.parent_names = cam_cc_parent_names_6,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 430000000,
+			[VDD_LOW_L1] = 520000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+	.cmd_rcgr = 0xc048,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_jpeg_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 200000000,
+			[VDD_LOW] = 400000000,
+			[VDD_LOW_L1] = 480000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+	.cmd_rcgr = 0xc100,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_lrme_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_lrme_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 240000000,
+			[VDD_LOW] = 300000000,
+			[VDD_LOW_L1] = 320000000,
+			[VDD_NOMINAL] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+	F(19200000, P_BI_TCXO_MX, 1, 0, 0),
+	F(24000000, P_CAM_CC_PLL2_OUT_AUX, 1, 1, 20),
+	F(34285714, P_CAM_CC_PLL2_OUT_AUX, 14, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+	.cmd_rcgr = 0x5004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk0_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+	.cmd_rcgr = 0x5024,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk1_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+	.cmd_rcgr = 0x5044,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk2_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+	.cmd_rcgr = 0x5064,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk3_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+	.cmd_rcgr = 0x5084,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk4_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+	F(32000, P_CHIP_SLEEP_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+	.cmd_rcgr = 0xc1a4,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_7,
+	.freq_tbl = ftbl_cam_cc_sleep_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_sleep_clk_src",
+		.parent_names = cam_cc_parent_names_7,
+		.num_parents = 2,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 32000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(80000000, P_CAM_CC_PLL0_OUT_ODD, 5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+	.cmd_rcgr = 0x7058,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_8,
+	.freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_slow_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_8,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 80000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+	.cmd_rcgr = 0xc188,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_9,
+	.freq_tbl = ftbl_cam_cc_xo_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_xo_clk_src",
+		.parent_names = cam_cc_parent_names_9,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+	.halt_reg = 0x7070,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7070,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+	.halt_reg = 0x7054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+	.halt_reg = 0x7038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+	.halt_reg = 0x7028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_bps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+	.halt_reg = 0xc148,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc148,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+	.halt_reg = 0xc150,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc150,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_dcd_xo_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+	.halt_reg = 0xc0dc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc0dc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cci_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cci_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+	.halt_reg = 0xc0f8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc0f8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cci_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cci_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+	.halt_reg = 0xc184,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0xc184,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_core_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+	.halt_reg = 0xc124,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc124,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cpas_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+	.halt_reg = 0x601c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x601c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi0phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi0phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+	.halt_reg = 0x6040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi1phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi1phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+	.halt_reg = 0x6064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi2phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi2phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+	.halt_reg = 0x6088,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6088,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi3phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi3phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+	.halt_reg = 0x6020,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+	.halt_reg = 0x6044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+	.halt_reg = 0x6068,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6068,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+	.halt_reg = 0x608c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x608c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy3_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+	.halt_reg = 0xc0b4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc0b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+	.halt_reg = 0xc0bc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc0bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_uar_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_gdsc_clk = {
+	.halt_reg = 0xc1a0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc1a0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_gdsc_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+	.halt_reg = 0xc094,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc094,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+	.halt_reg = 0xc08c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc08c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_icp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+	.halt_reg = 0xa080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+	.halt_reg = 0xa028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+	.halt_reg = 0xa07c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa07c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+	.halt_reg = 0xa054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+	.halt_reg = 0xa038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+	.halt_reg = 0xb058,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+	.halt_reg = 0xb028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+	.halt_reg = 0xb054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+	.halt_reg = 0xb04c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb04c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+	.halt_reg = 0xb030,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+	.halt_reg = 0xc01c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc01c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+	.halt_reg = 0xc040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+	.halt_reg = 0xc038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+	.halt_reg = 0x8040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+	.halt_reg = 0x803c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x803c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+	.halt_reg = 0x8038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+	.halt_reg = 0x8028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+	.halt_reg = 0x9028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+	.halt_reg = 0x9024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+	.halt_reg = 0x9020,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+	.halt_reg = 0x9010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+	.halt_reg = 0xc060,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc060,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_jpeg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_jpeg_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+	.halt_reg = 0xc118,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc118,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_lrme_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_lrme_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+	.halt_reg = 0x501c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x501c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+	.halt_reg = 0x503c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x503c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+	.halt_reg = 0x505c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x505c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+	.halt_reg = 0x507c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x507c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk3_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+	.halt_reg = 0x509c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x509c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk4_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk4_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_sleep_clk = {
+	.halt_reg = 0xc1bc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc1bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_sleep_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *cam_cc_lito_clocks[] = {
+	[CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+	[CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+	[CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+	[CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+	[CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+	[CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+	[CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+	[CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+	[CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+	[CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+	[CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+	[CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+	[CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+	[CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+	[CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+	[CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+	[CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+	[CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+	[CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+	[CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+	[CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+	[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+	[CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+	[CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+	[CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+	[CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
+	[CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+	[CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+	[CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+	[CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+	[CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+	[CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+	[CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+	[CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+	[CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+	[CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+	[CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+	[CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+	[CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+	[CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+	[CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+	[CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+	[CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+	[CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+	[CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+	[CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+	[CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+	[CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+	[CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+	[CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+	[CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+	[CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+	[CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+	[CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+	[CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+	[CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+	[CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+	[CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+	[CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+	[CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+	[CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+	[CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+	[CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+	[CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+	[CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+	[CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+	[CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+	[CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+	[CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+	[CAM_CC_PLL2_OUT_AUX] = &cam_cc_pll2_out_aux.clkr,
+	[CAM_CC_PLL2_OUT_MAIN] = &cam_cc_pll2_out_main.clkr,
+	[CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+	[CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+	[CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+	[CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+	[CAM_CC_SLEEP_CLK] = &cam_cc_sleep_clk.clkr,
+	[CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+	[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+	[CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static const struct regmap_config cam_cc_lito_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0xd028,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_lito_desc = {
+	.config = &cam_cc_lito_regmap_config,
+	.clks = cam_cc_lito_clocks,
+	.num_clks = ARRAY_SIZE(cam_cc_lito_clocks),
+};
+
+static const struct of_device_id cam_cc_lito_match_table[] = {
+	{ .compatible = "qcom,lito-camcc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_lito_match_table);
+
+static int cam_cc_lito_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct clk *clk;
+	int ret;
+
+	clk = clk_get(&pdev->dev, "cfg_ahb_clk");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get ahb clock handle\n");
+		return PTR_ERR(clk);
+	}
+	clk_put(clk);
+
+	vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(vdd_mx.regulator[0])) {
+		if (PTR_ERR(vdd_mx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"Unable to get vdd_mx regulator\n");
+		return PTR_ERR(vdd_mx.regulator[0]);
+	}
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	regmap = qcom_cc_map(pdev, &cam_cc_lito_desc);
+	if (IS_ERR(regmap)) {
+		dev_err(&pdev->dev, "Failed to map the cam CC registers\n");
+		return PTR_ERR(regmap);
+	}
+
+	clk_lucid_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+	clk_lucid_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+	clk_zonda_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+	clk_lucid_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+	clk_lucid_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+
+	ret = qcom_cc_really_probe(pdev, &cam_cc_lito_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register CAM CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered CAM CC clocks\n");
+	return 0;
+}
+
+static struct platform_driver cam_cc_lito_driver = {
+	.probe = cam_cc_lito_probe,
+	.driver = {
+		.name = "lito-camcc",
+		.of_match_table = cam_cc_lito_match_table,
+	},
+};
+
+static int __init cam_cc_lito_init(void)
+{
+	return platform_driver_register(&cam_cc_lito_driver);
+}
+subsys_initcall(cam_cc_lito_init);
+
+static void __exit cam_cc_lito_exit(void)
+{
+	platform_driver_unregister(&cam_cc_lito_driver);
+}
+module_exit(cam_cc_lito_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC LITO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cam_cc-lito");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index afc76d1..5fc3c80 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -161,7 +161,8 @@
 
 /* ZONDA PLL specific offsets */
 #define ZONDA_PLL_OUT_MASK	0x9
-
+#define ZONDA_STAY_IN_CFA	BIT(16)
+#define ZONDA_PLL_FREQ_LOCK_DET	BIT(29)
 
 #define pll_alpha_width(p)					\
 		((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ?	\
@@ -216,6 +217,9 @@
 #define wait_for_pll_enable_lock(pll) \
 	wait_for_pll(pll, PLL_LOCK_DET, 0, "enable")
 
+#define wait_for_zonda_pll_freq_lock(pll) \
+	wait_for_pll(pll, ZONDA_PLL_FREQ_LOCK_DET, 0, "freq enable")
+
 #define wait_for_pll_disable(pll) \
 	wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable")
 
@@ -879,8 +883,7 @@
 				config->test_ctl_hi1_val);
 
 	regmap_update_bits(regmap, PLL_MODE(pll),
-			 PLL_UPDATE_BYPASS,
-			 PLL_UPDATE_BYPASS);
+			 PLL_BYPASSNL, 0);
 
 	/* Disable PLL output */
 	regmap_update_bits(regmap, PLL_MODE(pll),
@@ -900,7 +903,7 @@
 static int clk_zonda_pll_enable(struct clk_hw *hw)
 {
 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
-	u32 val;
+	u32 val, test_ctl_val;
 	int ret;
 
 	ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
@@ -937,7 +940,15 @@
 	regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
 						PLL_OPMODE_RUN);
 
-	ret = wait_for_pll_enable_lock(pll);
+	ret = regmap_read(pll->clkr.regmap, PLL_TEST_CTL(pll), &test_ctl_val);
+	if (ret)
+		return ret;
+
+	/* If cfa mode then poll for freq lock */
+	if (test_ctl_val & ZONDA_STAY_IN_CFA)
+		ret = wait_for_zonda_pll_freq_lock(pll);
+	else
+		ret = wait_for_pll_enable_lock(pll);
 	if (ret)
 		return ret;
 
@@ -1001,6 +1012,7 @@
 {
 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
 	unsigned long rrate;
+	u32 test_ctl_val;
 	u32 l;
 	u64 a;
 	int ret;
@@ -1022,7 +1034,16 @@
 	/* Wait before polling for the frequency latch */
 	udelay(5);
 
-	ret = wait_for_pll_enable_lock(pll);
+	/* Read stay in cfa mode */
+	ret = regmap_read(pll->clkr.regmap, PLL_TEST_CTL(pll), &test_ctl_val);
+	if (ret)
+		return ret;
+
+	/* If cfa mode then poll for freq lock */
+	if (test_ctl_val & ZONDA_STAY_IN_CFA)
+		ret = wait_for_zonda_pll_freq_lock(pll);
+	else
+		ret = wait_for_pll_enable_lock(pll);
 	if (ret)
 		return ret;
 
@@ -1512,6 +1533,9 @@
 void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 				const struct alpha_pll_config *config)
 {
+	if (lucid_pll_is_enabled(pll, regmap))
+		return;
+
 	if (config->l)
 		regmap_write(regmap, PLL_L_VAL(pll), config->l);
 
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a31079c..6e2b645 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -1324,6 +1324,9 @@
 	if (cfg & mask)
 		f->pre_div = cfg & mask;
 
+	mode = cfg & CFG_MODE_MASK;
+	mode >>= CFG_MODE_SHIFT;
+
 	cfg &= CFG_SRC_SEL_MASK;
 	cfg >>= CFG_SRC_SEL_SHIFT;
 
@@ -1338,8 +1341,6 @@
 		}
 	}
 
-	mode = cfg & CFG_MODE_MASK;
-	mode >>= CFG_MODE_SHIFT;
 	if (mode) {
 		mask = BIT(rcg->mnd_width) - 1;
 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 1ee75a5..a9f4753 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -43,8 +35,10 @@
 {
 	struct clk_regmap_div *divider = to_clk_regmap_div(hw);
 
-	return divider_round_rate(hw, rate, prate, NULL, divider->width,
-				  CLK_DIVIDER_ROUND_CLOSEST);
+	return divider_round_rate(hw, rate, prate, divider->table,
+				  divider->width,
+				  CLK_DIVIDER_ROUND_CLOSEST |
+				  divider->flags);
 }
 
 static int div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -54,8 +48,9 @@
 	struct clk_regmap *clkr = &divider->clkr;
 	u32 div;
 
-	div = divider_get_val(rate, parent_rate, NULL, divider->width,
-			      CLK_DIVIDER_ROUND_CLOSEST);
+	div = divider_get_val(rate, parent_rate, divider->table,
+			      divider->width, CLK_DIVIDER_ROUND_CLOSEST |
+			      divider->flags);
 
 	return regmap_update_bits(clkr->regmap, divider->reg,
 				  (BIT(divider->width) - 1) << divider->shift,
@@ -73,8 +68,9 @@
 	div >>= divider->shift;
 	div &= BIT(divider->width) - 1;
 
-	return divider_recalc_rate(hw, parent_rate, div, NULL,
-				   CLK_DIVIDER_ROUND_CLOSEST, divider->width);
+	return divider_recalc_rate(hw, parent_rate, div, divider->table,
+				   CLK_DIVIDER_ROUND_CLOSEST | divider->flags,
+				   divider->width);
 }
 
 const struct clk_ops clk_regmap_div_ops = {
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
index f61fdf9..37c9901 100644
--- a/drivers/clk/qcom/clk-regmap-divider.h
+++ b/drivers/clk/qcom/clk-regmap-divider.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014,2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QCOM_CLK_REGMAP_DIVIDER_H__
@@ -10,11 +10,12 @@
 #include "clk-regmap.h"
 
 struct clk_regmap_div {
-	u32			reg;
-	u32			shift;
-	u32			width;
-	u32			flags;
-	struct clk_regmap	clkr;
+	u32				reg;
+	u32				shift;
+	u32				width;
+	u32				flags;
+	const struct clk_div_table	*table;
+	struct clk_regmap		clkr;
 };
 
 extern const struct clk_ops clk_regmap_div_ops;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index d7f3b9e..55f6a3b 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -281,6 +281,33 @@
 	.num_clks = ARRAY_SIZE(kona_rpmh_clocks),
 };
 
+DEFINE_CLK_RPMH_ARC(lito, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
+DEFINE_CLK_RPMH_VRM(lito, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk3, rf_clk3_ao, "rfclkd3", 1);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk4, rf_clk4_ao, "rfclkd4", 1);
+
+static struct clk_hw *lito_rpmh_clocks[] = {
+	[RPMH_CXO_CLK]		= &lito_bi_tcxo.hw,
+	[RPMH_CXO_CLK_A]	= &lito_bi_tcxo_ao.hw,
+	[RPMH_LN_BB_CLK3]	= &lito_ln_bb_clk3.hw,
+	[RPMH_LN_BB_CLK3_A]	= &lito_ln_bb_clk3_ao.hw,
+	[RPMH_RF_CLK1]		= &lito_rf_clk1.hw,
+	[RPMH_RF_CLK1_A]	= &lito_rf_clk1_ao.hw,
+	[RPMH_RF_CLK2]		= &lito_rf_clk2.hw,
+	[RPMH_RF_CLK2_A]	= &lito_rf_clk2_ao.hw,
+	[RPMH_RF_CLK3]		= &lito_rf_clk3.hw,
+	[RPMH_RF_CLK3_A]	= &lito_rf_clk3_ao.hw,
+	[RPMH_RF_CLK4]		= &lito_rf_clk4.hw,
+	[RPMH_RF_CLK4_A]	= &lito_rf_clk4_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_lito = {
+	.clks = lito_rpmh_clocks,
+	.num_clks = ARRAY_SIZE(lito_rpmh_clocks),
+};
+
 static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
 					 void *data)
 {
@@ -358,6 +385,7 @@
 static const struct of_device_id clk_rpmh_match_table[] = {
 	{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
 	{ .compatible = "qcom,kona-rpmh-clk", .data = &clk_rpmh_kona},
+	{ .compatible = "qcom,lito-rpmh-clk", .data = &clk_rpmh_lito},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/debugcc-kona.c b/drivers/clk/qcom/debugcc-kona.c
index e0fbd63..4947287 100644
--- a/drivers/clk/qcom/debugcc-kona.c
+++ b/drivers/clk/qcom/debugcc-kona.c
@@ -19,7 +19,7 @@
 static struct measure_clk_data debug_mux_priv = {
 	.ctl_reg = 0x62038,
 	.status_reg = 0x6203C,
-	.xo_div4_cbcr = 0x43008,
+	.xo_div4_cbcr = 0x4300C,
 };
 
 static const char *const debug_mux_parent_names[] = {
@@ -28,7 +28,6 @@
 	"cam_cc_bps_axi_clk",
 	"cam_cc_bps_clk",
 	"cam_cc_camnoc_axi_clk",
-	"cam_cc_camnoc_dcd_xo_clk",
 	"cam_cc_cci_0_clk",
 	"cam_cc_cci_1_clk",
 	"cam_cc_core_ahb_clk",
@@ -47,7 +46,6 @@
 	"cam_cc_csiphy5_clk",
 	"cam_cc_fd_core_clk",
 	"cam_cc_fd_core_uar_clk",
-	"cam_cc_gdsc_clk",
 	"cam_cc_icp_ahb_clk",
 	"cam_cc_icp_clk",
 	"cam_cc_ife_0_ahb_clk",
@@ -95,8 +93,6 @@
 	"disp_cc_mdss_byte1_intf_clk",
 	"disp_cc_mdss_dp_aux1_clk",
 	"disp_cc_mdss_dp_aux_clk",
-	"disp_cc_mdss_dp_crypto1_clk",
-	"disp_cc_mdss_dp_crypto_clk",
 	"disp_cc_mdss_dp_link1_clk",
 	"disp_cc_mdss_dp_link1_intf_clk",
 	"disp_cc_mdss_dp_link_clk",
@@ -120,28 +116,22 @@
 	"disp_cc_mdss_rscc_ahb_clk",
 	"disp_cc_mdss_rscc_vsync_clk",
 	"disp_cc_mdss_vsync_clk",
-	"disp_cc_xo_clk",
 	"gcc_aggre_noc_pcie_tbu_clk",
 	"gcc_aggre_ufs_card_axi_clk",
 	"gcc_aggre_ufs_phy_axi_clk",
 	"gcc_aggre_usb3_prim_axi_clk",
 	"gcc_aggre_usb3_sec_axi_clk",
-	"gcc_boot_rom_ahb_clk",
 	"gcc_camera_ahb_clk",
 	"gcc_camera_hf_axi_clk",
 	"gcc_camera_sf_axi_clk",
-	"gcc_camera_xo_clk",
 	"gcc_cfg_noc_usb3_prim_axi_clk",
 	"gcc_cfg_noc_usb3_sec_axi_clk",
-	"gcc_cpuss_ahb_clk",
-	"gcc_cpuss_dvm_bus_clk",
 	"gcc_cpuss_rbcpr_clk",
 	"gcc_ddrss_gpu_axi_clk",
+	"gcc_ddrss_pcie_sf_tbu_clk",
 	"gcc_disp_ahb_clk",
 	"gcc_disp_hf_axi_clk",
 	"gcc_disp_sf_axi_clk",
-	"gcc_disp_xo_clk",
-	"gcc_dpm_ahb_clk",
 	"gcc_dpm_clk",
 	"gcc_gp1_clk",
 	"gcc_gp2_clk",
@@ -151,7 +141,6 @@
 	"gcc_gpu_gpll0_div_clk_src",
 	"gcc_gpu_memnoc_gfx_clk",
 	"gcc_gpu_snoc_dvm_gfx_clk",
-	"gcc_npu_at_clk",
 	"gcc_npu_axi_clk",
 	"gcc_npu_bwmon_axi_clk",
 	"gcc_npu_bwmon_cfg_ahb_clk",
@@ -159,7 +148,6 @@
 	"gcc_npu_dma_clk",
 	"gcc_npu_gpll0_clk_src",
 	"gcc_npu_gpll0_div_clk_src",
-	"gcc_npu_trig_clk",
 	"gcc_pcie0_phy_refgen_clk",
 	"gcc_pcie1_phy_refgen_clk",
 	"gcc_pcie2_phy_refgen_clk",
@@ -183,14 +171,9 @@
 	"gcc_pcie_2_slv_q2a_axi_clk",
 	"gcc_pcie_phy_aux_clk",
 	"gcc_pdm2_clk",
-	"gcc_pdm_ahb_clk",
-	"gcc_pdm_xo4_clk",
 	"gcc_prng_ahb_clk",
-	"gcc_qmip_camera_nrt_ahb_clk",
-	"gcc_qmip_camera_rt_ahb_clk",
-	"gcc_qmip_disp_ahb_clk",
-	"gcc_qmip_video_cvp_ahb_clk",
-	"gcc_qmip_video_vcodec_ahb_clk",
+	"gcc_qupv3_wrap0_core_2x_clk",
+	"gcc_qupv3_wrap0_core_clk",
 	"gcc_qupv3_wrap0_s0_clk",
 	"gcc_qupv3_wrap0_s1_clk",
 	"gcc_qupv3_wrap0_s2_clk",
@@ -199,31 +182,27 @@
 	"gcc_qupv3_wrap0_s5_clk",
 	"gcc_qupv3_wrap0_s6_clk",
 	"gcc_qupv3_wrap0_s7_clk",
+	"gcc_qupv3_wrap1_core_2x_clk",
+	"gcc_qupv3_wrap1_core_clk",
 	"gcc_qupv3_wrap1_s0_clk",
 	"gcc_qupv3_wrap1_s1_clk",
 	"gcc_qupv3_wrap1_s2_clk",
 	"gcc_qupv3_wrap1_s3_clk",
 	"gcc_qupv3_wrap1_s4_clk",
 	"gcc_qupv3_wrap1_s5_clk",
+	"gcc_qupv3_wrap2_core_2x_clk",
+	"gcc_qupv3_wrap2_core_clk",
 	"gcc_qupv3_wrap2_s0_clk",
 	"gcc_qupv3_wrap2_s1_clk",
 	"gcc_qupv3_wrap2_s2_clk",
 	"gcc_qupv3_wrap2_s3_clk",
 	"gcc_qupv3_wrap2_s4_clk",
 	"gcc_qupv3_wrap2_s5_clk",
-	"gcc_qupv3_wrap_0_m_ahb_clk",
-	"gcc_qupv3_wrap_0_s_ahb_clk",
-	"gcc_qupv3_wrap_1_m_ahb_clk",
-	"gcc_qupv3_wrap_1_s_ahb_clk",
-	"gcc_qupv3_wrap_2_m_ahb_clk",
-	"gcc_qupv3_wrap_2_s_ahb_clk",
 	"gcc_sdcc2_ahb_clk",
 	"gcc_sdcc2_apps_clk",
 	"gcc_sdcc4_ahb_clk",
 	"gcc_sdcc4_apps_clk",
 	"gcc_sys_noc_cpuss_ahb_clk",
-	"gcc_tsif_ahb_clk",
-	"gcc_tsif_inactivity_timers_clk",
 	"gcc_tsif_ref_clk",
 	"gcc_ufs_card_ahb_clk",
 	"gcc_ufs_card_axi_clk",
@@ -243,10 +222,8 @@
 	"gcc_ufs_phy_unipro_core_clk",
 	"gcc_usb30_prim_master_clk",
 	"gcc_usb30_prim_mock_utmi_clk",
-	"gcc_usb30_prim_sleep_clk",
 	"gcc_usb30_sec_master_clk",
 	"gcc_usb30_sec_mock_utmi_clk",
-	"gcc_usb30_sec_sleep_clk",
 	"gcc_usb3_prim_phy_aux_clk",
 	"gcc_usb3_prim_phy_com_aux_clk",
 	"gcc_usb3_prim_phy_pipe_clk",
@@ -256,18 +233,17 @@
 	"gcc_video_ahb_clk",
 	"gcc_video_axi0_clk",
 	"gcc_video_axi1_clk",
-	"gcc_video_xo_clk",
 	"gpu_cc_ahb_clk",
-	"gpu_cc_crc_ahb_clk",
-	"gpu_cc_cx_apb_clk",
 	"gpu_cc_cx_gmu_clk",
 	"gpu_cc_cx_snoc_dvm_clk",
-	"gpu_cc_cxo_aon_clk",
-	"gpu_cc_cxo_clk",
 	"gpu_cc_gx_gmu_clk",
 	"gpu_cc_gx_vsense_clk",
-	"npu_cc_aon_clk",
-	"npu_cc_atb_clk",
+	"measure_only_cnoc_clk",
+	"measure_only_gpu_cc_cx_gfx3d_clk",
+	"measure_only_gpu_cc_cx_gfx3d_slv_clk",
+	"measure_only_gpu_cc_gx_gfx3d_clk",
+	"measure_only_ipa_2x_clk",
+	"measure_only_snoc_clk",
 	"npu_cc_bto_core_clk",
 	"npu_cc_bwmon_clk",
 	"npu_cc_cal_hm0_cdc_clk",
@@ -283,7 +259,6 @@
 	"npu_cc_dl_llm_clk",
 	"npu_cc_dpm_clk",
 	"npu_cc_dpm_temp_clk",
-	"npu_cc_dpm_xo_clk",
 	"npu_cc_dsp_ahbm_clk",
 	"npu_cc_dsp_ahbs_clk",
 	"npu_cc_dsp_axi_clk",
@@ -293,20 +268,15 @@
 	"npu_cc_llm_clk",
 	"npu_cc_llm_curr_clk",
 	"npu_cc_llm_temp_clk",
-	"npu_cc_llm_xo_clk",
-	"npu_cc_noc_ahb_clk",
 	"npu_cc_noc_axi_clk",
 	"npu_cc_noc_dma_clk",
-	"npu_cc_rsc_xo_clk",
 	"npu_cc_s2p_clk",
-	"npu_cc_xo_clk",
 	"video_cc_ahb_clk",
 	"video_cc_mvs0_clk",
 	"video_cc_mvs0c_clk",
 	"video_cc_mvs1_clk",
 	"video_cc_mvs1_div2_clk",
 	"video_cc_mvs1c_clk",
-	"video_cc_xo_clk",
 };
 
 static struct clk_debug_mux gcc_debug_mux = {
@@ -319,574 +289,514 @@
 	.post_div_mask = 0xF,
 	.post_div_shift = 0,
 	MUX_SRC_LIST(
-		{ "cam_cc_bps_ahb_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_bps_ahb_clk", 0x55, 2, CAM_CC,
 			0x18, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_bps_areg_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_bps_areg_clk", 0x55, 2, CAM_CC,
 			0x17, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_bps_axi_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_bps_axi_clk", 0x55, 2, CAM_CC,
 			0x16, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_bps_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_bps_clk", 0x55, 2, CAM_CC,
 			0x14, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_camnoc_axi_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_camnoc_axi_clk", 0x55, 2, CAM_CC,
 			0x3C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_camnoc_dcd_xo_clk", 0x55, 1, CAM_CC,
-			0x3D, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_cci_0_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_cci_0_clk", 0x55, 2, CAM_CC,
 			0x39, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_cci_1_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_cci_1_clk", 0x55, 2, CAM_CC,
 			0x3A, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_core_ahb_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_core_ahb_clk", 0x55, 2, CAM_CC,
 			0x40, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_cpas_ahb_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_cpas_ahb_clk", 0x55, 2, CAM_CC,
 			0x3B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csi0phytimer_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csi0phytimer_clk", 0x55, 2, CAM_CC,
 			0x8, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csi1phytimer_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csi1phytimer_clk", 0x55, 2, CAM_CC,
 			0xA, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csi2phytimer_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csi2phytimer_clk", 0x55, 2, CAM_CC,
 			0xC, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csi3phytimer_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csi3phytimer_clk", 0x55, 2, CAM_CC,
 			0xE, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csi4phytimer_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csi4phytimer_clk", 0x55, 2, CAM_CC,
 			0x10, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csi5phytimer_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csi5phytimer_clk", 0x55, 2, CAM_CC,
 			0x12, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csiphy0_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csiphy0_clk", 0x55, 2, CAM_CC,
 			0x9, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csiphy1_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csiphy1_clk", 0x55, 2, CAM_CC,
 			0xB, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csiphy2_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csiphy2_clk", 0x55, 2, CAM_CC,
 			0xD, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csiphy3_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csiphy3_clk", 0x55, 2, CAM_CC,
 			0xF, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csiphy4_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csiphy4_clk", 0x55, 2, CAM_CC,
 			0x11, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_csiphy5_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_csiphy5_clk", 0x55, 2, CAM_CC,
 			0x13, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_fd_core_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_fd_core_clk", 0x55, 2, CAM_CC,
 			0x37, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_fd_core_uar_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_fd_core_uar_clk", 0x55, 2, CAM_CC,
 			0x38, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_gdsc_clk", 0x55, 1, CAM_CC,
-			0x41, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_icp_ahb_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_icp_ahb_clk", 0x55, 2, CAM_CC,
 			0x36, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_icp_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_icp_clk", 0x55, 2, CAM_CC,
 			0x35, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_0_ahb_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_0_ahb_clk", 0x55, 2, CAM_CC,
 			0x26, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_0_areg_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_0_areg_clk", 0x55, 2, CAM_CC,
 			0x1F, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_0_axi_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_0_axi_clk", 0x55, 2, CAM_CC,
 			0x25, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_0_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_0_clk", 0x55, 2, CAM_CC,
 			0x1E, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_0_cphy_rx_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_0_cphy_rx_clk", 0x55, 2, CAM_CC,
 			0x24, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_0_csid_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_0_csid_clk", 0x55, 2, CAM_CC,
 			0x22, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_0_dsp_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_0_dsp_clk", 0x55, 2, CAM_CC,
 			0x21, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_1_ahb_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_1_ahb_clk", 0x55, 2, CAM_CC,
 			0x2E, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_1_areg_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_1_areg_clk", 0x55, 2, CAM_CC,
 			0x29, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_1_axi_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_1_axi_clk", 0x55, 2, CAM_CC,
 			0x2D, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_1_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_1_clk", 0x55, 2, CAM_CC,
 			0x27, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_1_cphy_rx_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_1_cphy_rx_clk", 0x55, 2, CAM_CC,
 			0x2C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_1_csid_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_1_csid_clk", 0x55, 2, CAM_CC,
 			0x2B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_1_dsp_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_1_dsp_clk", 0x55, 2, CAM_CC,
 			0x2A, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_lite_ahb_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_lite_ahb_clk", 0x55, 2, CAM_CC,
 			0x32, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_lite_axi_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_lite_axi_clk", 0x55, 2, CAM_CC,
 			0x49, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_lite_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_lite_clk", 0x55, 2, CAM_CC,
 			0x2F, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_lite_cphy_rx_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_lite_cphy_rx_clk", 0x55, 2, CAM_CC,
 			0x31, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ife_lite_csid_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ife_lite_csid_clk", 0x55, 2, CAM_CC,
 			0x30, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ipe_0_ahb_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ipe_0_ahb_clk", 0x55, 2, CAM_CC,
 			0x1D, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ipe_0_areg_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ipe_0_areg_clk", 0x55, 2, CAM_CC,
 			0x1C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ipe_0_axi_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ipe_0_axi_clk", 0x55, 2, CAM_CC,
 			0x1B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_ipe_0_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_ipe_0_clk", 0x55, 2, CAM_CC,
 			0x19, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_jpeg_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_jpeg_clk", 0x55, 2, CAM_CC,
 			0x33, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_mclk0_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_mclk0_clk", 0x55, 2, CAM_CC,
 			0x1, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_mclk1_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_mclk1_clk", 0x55, 2, CAM_CC,
 			0x2, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_mclk2_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_mclk2_clk", 0x55, 2, CAM_CC,
 			0x3, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_mclk3_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_mclk3_clk", 0x55, 2, CAM_CC,
 			0x4, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_mclk4_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_mclk4_clk", 0x55, 2, CAM_CC,
 			0x5, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_mclk5_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_mclk5_clk", 0x55, 2, CAM_CC,
 			0x6, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_mclk6_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_mclk6_clk", 0x55, 2, CAM_CC,
 			0x7, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_sbi_ahb_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_sbi_ahb_clk", 0x55, 2, CAM_CC,
 			0x4E, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_sbi_axi_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_sbi_axi_clk", 0x55, 2, CAM_CC,
 			0x4D, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_sbi_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_sbi_clk", 0x55, 2, CAM_CC,
 			0x4A, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_sbi_cphy_rx_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_sbi_cphy_rx_clk", 0x55, 2, CAM_CC,
 			0x4C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_sbi_csid_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_sbi_csid_clk", 0x55, 2, CAM_CC,
 			0x4B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_sbi_ife_0_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_sbi_ife_0_clk", 0x55, 2, CAM_CC,
 			0x4F, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "cam_cc_sbi_ife_1_clk", 0x55, 1, CAM_CC,
+		{ "cam_cc_sbi_ife_1_clk", 0x55, 2, CAM_CC,
 			0x50, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
-		{ "disp_cc_mdss_ahb_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_ahb_clk", 0x56, 2, DISP_CC,
 			0x2B, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_byte0_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_byte0_clk", 0x56, 2, DISP_CC,
 			0x15, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_byte0_intf_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_byte0_intf_clk", 0x56, 2, DISP_CC,
 			0x16, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_byte1_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_byte1_clk", 0x56, 2, DISP_CC,
 			0x17, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_byte1_intf_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_byte1_intf_clk", 0x56, 2, DISP_CC,
 			0x18, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_aux1_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_dp_aux1_clk", 0x56, 2, DISP_CC,
 			0x25, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_aux_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_dp_aux_clk", 0x56, 2, DISP_CC,
 			0x20, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_crypto1_clk", 0x56, 1, DISP_CC,
-			0x24, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_crypto_clk", 0x56, 1, DISP_CC,
-			0x1D, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_link1_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_dp_link1_clk", 0x56, 2, DISP_CC,
 			0x22, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_link1_intf_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_dp_link1_intf_clk", 0x56, 2, DISP_CC,
 			0x23, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_link_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_dp_link_clk", 0x56, 2, DISP_CC,
 			0x1B, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_link_intf_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_dp_link_intf_clk", 0x56, 2, DISP_CC,
 			0x1C, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_pixel1_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_dp_pixel1_clk", 0x56, 2, DISP_CC,
 			0x1F, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_pixel2_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_dp_pixel2_clk", 0x56, 2, DISP_CC,
 			0x21, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_pixel_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_dp_pixel_clk", 0x56, 2, DISP_CC,
 			0x1E, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_edp_aux_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_edp_aux_clk", 0x56, 2, DISP_CC,
 			0x29, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_edp_gtc_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_edp_gtc_clk", 0x56, 2, DISP_CC,
 			0x2A, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_edp_link_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_edp_link_clk", 0x56, 2, DISP_CC,
 			0x27, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_edp_link_intf_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_edp_link_intf_clk", 0x56, 2, DISP_CC,
 			0x28, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_edp_pixel_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_edp_pixel_clk", 0x56, 2, DISP_CC,
 			0x26, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_esc0_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_esc0_clk", 0x56, 2, DISP_CC,
 			0x19, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_esc1_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_esc1_clk", 0x56, 2, DISP_CC,
 			0x1A, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_mdp_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_mdp_clk", 0x56, 2, DISP_CC,
 			0x11, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_mdp_lut_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_mdp_lut_clk", 0x56, 2, DISP_CC,
 			0x13, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_non_gdsc_ahb_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_non_gdsc_ahb_clk", 0x56, 2, DISP_CC,
 			0x2C, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_pclk0_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_pclk0_clk", 0x56, 2, DISP_CC,
 			0xF, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_pclk1_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_pclk1_clk", 0x56, 2, DISP_CC,
 			0x10, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_rot_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_rot_clk", 0x56, 2, DISP_CC,
 			0x12, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_rscc_ahb_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_rscc_ahb_clk", 0x56, 2, DISP_CC,
 			0x2E, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_rscc_vsync_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_rscc_vsync_clk", 0x56, 2, DISP_CC,
 			0x2D, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_vsync_clk", 0x56, 1, DISP_CC,
+		{ "disp_cc_mdss_vsync_clk", 0x56, 2, DISP_CC,
 			0x14, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_xo_clk", 0x56, 1, DISP_CC,
-			0x36, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "gcc_aggre_noc_pcie_tbu_clk", 0x36, 1, GCC,
-			0x36, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_aggre_ufs_card_axi_clk", 0x142, 1, GCC,
-			0x142, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_aggre_ufs_phy_axi_clk", 0x141, 1, GCC,
-			0x141, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_aggre_usb3_prim_axi_clk", 0x13F, 1, GCC,
-			0x13F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_aggre_usb3_sec_axi_clk", 0x140, 1, GCC,
-			0x140, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_boot_rom_ahb_clk", 0xA3, 1, GCC,
-			0xA3, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_camera_ahb_clk", 0x44, 1, GCC,
-			0x44, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_camera_hf_axi_clk", 0x4D, 1, GCC,
-			0x4D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_camera_sf_axi_clk", 0x4E, 1, GCC,
-			0x4E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_camera_xo_clk", 0x52, 1, GCC,
-			0x52, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_cfg_noc_usb3_prim_axi_clk", 0x21, 1, GCC,
-			0x21, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_cfg_noc_usb3_sec_axi_clk", 0x22, 1, GCC,
-			0x22, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_cpuss_ahb_clk", 0xE0, 1, GCC,
-			0xE0, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_cpuss_dvm_bus_clk", 0xE4, 1, GCC,
-			0xE4, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_cpuss_rbcpr_clk", 0xE1, 1, GCC,
-			0xE1, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ddrss_gpu_axi_clk", 0xC4, 1, GCC,
-			0xC4, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_disp_ahb_clk", 0x45, 1, GCC,
-			0x45, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_disp_hf_axi_clk", 0x4F, 1, GCC,
-			0x4F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_disp_sf_axi_clk", 0x50, 1, GCC,
-			0x50, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_disp_xo_clk", 0x53, 1, GCC,
-			0x53, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_dpm_ahb_clk", 0x198, 1, GCC,
-			0x198, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_dpm_clk", 0x197, 1, GCC,
-			0x197, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_gp1_clk", 0xEF, 1, GCC,
-			0xEF, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_gp2_clk", 0xF0, 1, GCC,
-			0xF0, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_gp3_clk", 0xF1, 1, GCC,
-			0xF1, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_gpu_cfg_ahb_clk", 0x161, 1, GCC,
-			0x161, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_gpu_gpll0_clk_src", 0x167, 1, GCC,
-			0x167, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_gpu_gpll0_div_clk_src", 0x168, 1, GCC,
-			0x168, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_gpu_memnoc_gfx_clk", 0x164, 1, GCC,
-			0x164, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_gpu_snoc_dvm_gfx_clk", 0x166, 1, GCC,
-			0x166, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_npu_at_clk", 0x17D, 1, GCC,
-			0x17D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_npu_axi_clk", 0x17A, 1, GCC,
-			0x17A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_npu_bwmon_axi_clk", 0x19A, 1, GCC,
-			0x19A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_npu_bwmon_cfg_ahb_clk", 0x199, 1, GCC,
-			0x199, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_npu_cfg_ahb_clk", 0x179, 1, GCC,
-			0x179, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_npu_dma_clk", 0x17B, 1, GCC,
-			0x17B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_npu_gpll0_clk_src", 0x17E, 1, GCC,
-			0x17E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_npu_gpll0_div_clk_src", 0x17F, 1, GCC,
-			0x17F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_npu_trig_clk", 0x17C, 1, GCC,
-			0x17C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie0_phy_refgen_clk", 0x103, 1, GCC,
-			0x103, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie1_phy_refgen_clk", 0x104, 1, GCC,
-			0x104, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie2_phy_refgen_clk", 0x105, 1, GCC,
-			0x105, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_0_aux_clk", 0xF6, 1, GCC,
-			0xF6, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_0_cfg_ahb_clk", 0xF5, 1, GCC,
-			0xF5, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_0_mstr_axi_clk", 0xF4, 1, GCC,
-			0xF4, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_0_pipe_clk", 0xF7, 1, GCC,
-			0xF7, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_0_slv_axi_clk", 0xF3, 1, GCC,
-			0xF3, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_0_slv_q2a_axi_clk", 0xF2, 1, GCC,
-			0xF2, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_1_aux_clk", 0xFE, 1, GCC,
-			0xFE, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_1_cfg_ahb_clk", 0xFD, 1, GCC,
-			0xFD, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_1_mstr_axi_clk", 0xFC, 1, GCC,
-			0xFC, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_1_pipe_clk", 0xFF, 1, GCC,
-			0xFF, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_1_slv_axi_clk", 0xFB, 1, GCC,
-			0xFB, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_1_slv_q2a_axi_clk", 0xFA, 1, GCC,
-			0xFA, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_2_aux_clk", 0x191, 1, GCC,
-			0x191, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_2_cfg_ahb_clk", 0x190, 1, GCC,
-			0x190, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_2_mstr_axi_clk", 0x18F, 1, GCC,
-			0x18F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_2_pipe_clk", 0x192, 1, GCC,
-			0x192, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_2_slv_axi_clk", 0x18E, 1, GCC,
-			0x18E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_2_slv_q2a_axi_clk", 0x18D, 1, GCC,
-			0x18D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pcie_phy_aux_clk", 0x102, 1, GCC,
-			0x102, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pdm2_clk", 0x9D, 1, GCC,
-			0x9D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pdm_ahb_clk", 0x9B, 1, GCC,
-			0x9B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_pdm_xo4_clk", 0x9C, 1, GCC,
-			0x9C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_prng_ahb_clk", 0x9E, 1, GCC,
-			0x9E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qmip_camera_nrt_ahb_clk", 0x48, 1, GCC,
-			0x48, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qmip_camera_rt_ahb_clk", 0x49, 1, GCC,
-			0x49, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qmip_disp_ahb_clk", 0x4A, 1, GCC,
-			0x4A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qmip_video_cvp_ahb_clk", 0x46, 1, GCC,
-			0x46, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qmip_video_vcodec_ahb_clk", 0x47, 1, GCC,
-			0x47, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap0_s0_clk", 0x89, 1, GCC,
-			0x89, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap0_s1_clk", 0x8A, 1, GCC,
-			0x8A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap0_s2_clk", 0x8B, 1, GCC,
-			0x8B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap0_s3_clk", 0x8C, 1, GCC,
-			0x8C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap0_s4_clk", 0x8D, 1, GCC,
-			0x8D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap0_s5_clk", 0x8E, 1, GCC,
-			0x8E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap0_s6_clk", 0x8F, 1, GCC,
-			0x8F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap0_s7_clk", 0x90, 1, GCC,
-			0x90, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap1_s0_clk", 0x95, 1, GCC,
-			0x95, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap1_s1_clk", 0x96, 1, GCC,
-			0x96, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap1_s2_clk", 0x97, 1, GCC,
-			0x97, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap1_s3_clk", 0x98, 1, GCC,
-			0x98, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap1_s4_clk", 0x99, 1, GCC,
-			0x99, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap1_s5_clk", 0x9A, 1, GCC,
-			0x9A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap2_s0_clk", 0x185, 1, GCC,
-			0x185, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap2_s1_clk", 0x186, 1, GCC,
-			0x186, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap2_s2_clk", 0x187, 1, GCC,
-			0x187, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap2_s3_clk", 0x188, 1, GCC,
-			0x188, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap2_s4_clk", 0x189, 1, GCC,
-			0x189, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap2_s5_clk", 0x18A, 1, GCC,
-			0x18A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap_0_m_ahb_clk", 0x85, 1, GCC,
-			0x85, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap_0_s_ahb_clk", 0x86, 1, GCC,
-			0x86, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap_1_m_ahb_clk", 0x91, 1, GCC,
-			0x91, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap_1_s_ahb_clk", 0x92, 1, GCC,
-			0x92, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap_2_m_ahb_clk", 0x181, 1, GCC,
-			0x181, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_qupv3_wrap_2_s_ahb_clk", 0x182, 1, GCC,
-			0x182, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_sdcc2_ahb_clk", 0x82, 1, GCC,
-			0x82, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_sdcc2_apps_clk", 0x81, 1, GCC,
-			0x81, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_sdcc4_ahb_clk", 0x84, 1, GCC,
-			0x84, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_sdcc4_apps_clk", 0x83, 1, GCC,
-			0x83, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_sys_noc_cpuss_ahb_clk", 0xC, 1, GCC,
-			0xC, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_tsif_ahb_clk", 0x9F, 1, GCC,
-			0x9F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_tsif_inactivity_timers_clk", 0xA1, 1, GCC,
-			0xA1, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_tsif_ref_clk", 0xA0, 1, GCC,
-			0xA0, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_card_ahb_clk", 0x107, 1, GCC,
-			0x107, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_card_axi_clk", 0x106, 1, GCC,
-			0x106, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_card_ice_core_clk", 0x10D, 1, GCC,
-			0x10D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_card_phy_aux_clk", 0x10E, 1, GCC,
-			0x10E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_card_rx_symbol_0_clk", 0x109, 1, GCC,
-			0x109, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_card_rx_symbol_1_clk", 0x10F, 1, GCC,
-			0x10F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_card_tx_symbol_0_clk", 0x108, 1, GCC,
-			0x108, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_card_unipro_core_clk", 0x10C, 1, GCC,
-			0x10C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_phy_ahb_clk", 0x113, 1, GCC,
-			0x113, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_phy_axi_clk", 0x112, 1, GCC,
-			0x112, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_phy_ice_core_clk", 0x119, 1, GCC,
-			0x119, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_phy_phy_aux_clk", 0x11A, 1, GCC,
-			0x11A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_phy_rx_symbol_0_clk", 0x115, 1, GCC,
-			0x115, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_phy_rx_symbol_1_clk", 0x11B, 1, GCC,
-			0x11B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_phy_tx_symbol_0_clk", 0x114, 1, GCC,
-			0x114, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_ufs_phy_unipro_core_clk", 0x118, 1, GCC,
-			0x118, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb30_prim_master_clk", 0x6E, 1, GCC,
-			0x6E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb30_prim_mock_utmi_clk", 0x70, 1, GCC,
-			0x70, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb30_prim_sleep_clk", 0x6F, 1, GCC,
-			0x6F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb30_sec_master_clk", 0x75, 1, GCC,
-			0x75, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb30_sec_mock_utmi_clk", 0x77, 1, GCC,
-			0x77, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb30_sec_sleep_clk", 0x76, 1, GCC,
-			0x76, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb3_prim_phy_aux_clk", 0x71, 1, GCC,
-			0x71, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb3_prim_phy_com_aux_clk", 0x72, 1, GCC,
-			0x72, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb3_prim_phy_pipe_clk", 0x73, 1, GCC,
-			0x73, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb3_sec_phy_aux_clk", 0x78, 1, GCC,
-			0x78, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb3_sec_phy_com_aux_clk", 0x79, 1, GCC,
-			0x79, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_usb3_sec_phy_pipe_clk", 0x7A, 1, GCC,
-			0x7A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_video_ahb_clk", 0x43, 1, GCC,
-			0x43, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_video_axi0_clk", 0x4B, 1, GCC,
-			0x4B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_video_axi1_clk", 0x4C, 1, GCC,
-			0x4C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gcc_video_xo_clk", 0x51, 1, GCC,
-			0x51, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
-		{ "gpu_cc_ahb_clk", 0x163, 1, GPU_CC,
+		{ "gcc_aggre_noc_pcie_tbu_clk", 0x36, 2, GCC,
+			0x36, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_aggre_ufs_card_axi_clk", 0x142, 2, GCC,
+			0x142, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_aggre_ufs_phy_axi_clk", 0x141, 2, GCC,
+			0x141, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_aggre_usb3_prim_axi_clk", 0x13F, 2, GCC,
+			0x13F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_aggre_usb3_sec_axi_clk", 0x140, 2, GCC,
+			0x140, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_camera_ahb_clk", 0x44, 2, GCC,
+			0x44, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_camera_hf_axi_clk", 0x4D, 2, GCC,
+			0x4D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_camera_sf_axi_clk", 0x4E, 2, GCC,
+			0x4E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_cfg_noc_usb3_prim_axi_clk", 0x21, 2, GCC,
+			0x21, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_cfg_noc_usb3_sec_axi_clk", 0x22, 2, GCC,
+			0x22, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_cpuss_rbcpr_clk", 0xE1, 2, GCC,
+			0xE1, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ddrss_gpu_axi_clk", 0xC4, 2, GCC,
+			0xC4, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ddrss_pcie_sf_tbu_clk", 0xC5, 2, GCC,
+			0xC5, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_disp_ahb_clk", 0x45, 2, GCC,
+			0x45, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_disp_hf_axi_clk", 0x4F, 2, GCC,
+			0x4F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_disp_sf_axi_clk", 0x50, 2, GCC,
+			0x50, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_dpm_clk", 0x197, 2, GCC,
+			0x197, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_gp1_clk", 0xEF, 2, GCC,
+			0xEF, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_gp2_clk", 0xF0, 2, GCC,
+			0xF0, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_gp3_clk", 0xF1, 2, GCC,
+			0xF1, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_gpu_cfg_ahb_clk", 0x161, 2, GCC,
+			0x161, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_gpu_gpll0_clk_src", 0x167, 2, GCC,
+			0x167, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_gpu_gpll0_div_clk_src", 0x168, 2, GCC,
+			0x168, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_gpu_memnoc_gfx_clk", 0x164, 2, GCC,
+			0x164, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_gpu_snoc_dvm_gfx_clk", 0x166, 2, GCC,
+			0x166, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_npu_axi_clk", 0x17A, 2, GCC,
+			0x17A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_npu_bwmon_axi_clk", 0x19A, 2, GCC,
+			0x19A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_npu_bwmon_cfg_ahb_clk", 0x199, 2, GCC,
+			0x199, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_npu_cfg_ahb_clk", 0x179, 2, GCC,
+			0x179, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_npu_dma_clk", 0x17B, 2, GCC,
+			0x17B, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_npu_gpll0_clk_src", 0x17E, 2, GCC,
+			0x17E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_npu_gpll0_div_clk_src", 0x17F, 2, GCC,
+			0x17F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie0_phy_refgen_clk", 0x103, 2, GCC,
+			0x103, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie1_phy_refgen_clk", 0x104, 2, GCC,
+			0x104, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie2_phy_refgen_clk", 0x105, 2, GCC,
+			0x105, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_0_aux_clk", 0xF6, 2, GCC,
+			0xF6, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_0_cfg_ahb_clk", 0xF5, 2, GCC,
+			0xF5, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_0_mstr_axi_clk", 0xF4, 2, GCC,
+			0xF4, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_0_pipe_clk", 0xF7, 2, GCC,
+			0xF7, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_0_slv_axi_clk", 0xF3, 2, GCC,
+			0xF3, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_0_slv_q2a_axi_clk", 0xF2, 2, GCC,
+			0xF2, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_1_aux_clk", 0xFE, 2, GCC,
+			0xFE, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_1_cfg_ahb_clk", 0xFD, 2, GCC,
+			0xFD, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_1_mstr_axi_clk", 0xFC, 2, GCC,
+			0xFC, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_1_pipe_clk", 0xFF, 2, GCC,
+			0xFF, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_1_slv_axi_clk", 0xFB, 2, GCC,
+			0xFB, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_1_slv_q2a_axi_clk", 0xFA, 2, GCC,
+			0xFA, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_2_aux_clk", 0x191, 2, GCC,
+			0x191, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_2_cfg_ahb_clk", 0x190, 2, GCC,
+			0x190, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_2_mstr_axi_clk", 0x18F, 2, GCC,
+			0x18F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_2_pipe_clk", 0x192, 2, GCC,
+			0x192, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_2_slv_axi_clk", 0x18E, 2, GCC,
+			0x18E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_2_slv_q2a_axi_clk", 0x18D, 2, GCC,
+			0x18D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pcie_phy_aux_clk", 0x102, 2, GCC,
+			0x102, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_pdm2_clk", 0x9D, 2, GCC,
+			0x9D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_prng_ahb_clk", 0x9E, 2, GCC,
+			0x9E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_core_2x_clk", 0x88, 2, GCC,
+			0x88, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_core_clk", 0x87, 2, GCC,
+			0x87, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_s0_clk", 0x89, 2, GCC,
+			0x89, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_s1_clk", 0x8A, 2, GCC,
+			0x8A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_s2_clk", 0x8B, 2, GCC,
+			0x8B, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_s3_clk", 0x8C, 2, GCC,
+			0x8C, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_s4_clk", 0x8D, 2, GCC,
+			0x8D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_s5_clk", 0x8E, 2, GCC,
+			0x8E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_s6_clk", 0x8F, 2, GCC,
+			0x8F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap0_s7_clk", 0x90, 2, GCC,
+			0x90, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap1_core_2x_clk", 0x94, 2, GCC,
+			0x94, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap1_core_clk", 0x93, 2, GCC,
+			0x93, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap1_s0_clk", 0x95, 2, GCC,
+			0x95, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap1_s1_clk", 0x96, 2, GCC,
+			0x96, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap1_s2_clk", 0x97, 2, GCC,
+			0x97, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap1_s3_clk", 0x98, 2, GCC,
+			0x98, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap1_s4_clk", 0x99, 2, GCC,
+			0x99, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap1_s5_clk", 0x9A, 2, GCC,
+			0x9A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap2_core_2x_clk", 0x184, 2, GCC,
+			0x184, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap2_core_clk", 0x183, 2, GCC,
+			0x183, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap2_s0_clk", 0x185, 2, GCC,
+			0x185, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap2_s1_clk", 0x186, 2, GCC,
+			0x186, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap2_s2_clk", 0x187, 2, GCC,
+			0x187, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap2_s3_clk", 0x188, 2, GCC,
+			0x188, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap2_s4_clk", 0x189, 2, GCC,
+			0x189, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_qupv3_wrap2_s5_clk", 0x18A, 2, GCC,
+			0x18A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_sdcc2_ahb_clk", 0x82, 2, GCC,
+			0x82, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_sdcc2_apps_clk", 0x81, 2, GCC,
+			0x81, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_sdcc4_ahb_clk", 0x84, 2, GCC,
+			0x84, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_sdcc4_apps_clk", 0x83, 2, GCC,
+			0x83, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_sys_noc_cpuss_ahb_clk", 0xC, 2, GCC,
+			0xC, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_tsif_ref_clk", 0xA0, 2, GCC,
+			0xA0, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_card_ahb_clk", 0x107, 2, GCC,
+			0x107, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_card_axi_clk", 0x106, 2, GCC,
+			0x106, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_card_ice_core_clk", 0x10D, 2, GCC,
+			0x10D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_card_phy_aux_clk", 0x10E, 2, GCC,
+			0x10E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_card_rx_symbol_0_clk", 0x109, 2, GCC,
+			0x109, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_card_rx_symbol_1_clk", 0x10F, 2, GCC,
+			0x10F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_card_tx_symbol_0_clk", 0x108, 2, GCC,
+			0x108, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_card_unipro_core_clk", 0x10C, 2, GCC,
+			0x10C, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_phy_ahb_clk", 0x113, 2, GCC,
+			0x113, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_phy_axi_clk", 0x112, 2, GCC,
+			0x112, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_phy_ice_core_clk", 0x119, 2, GCC,
+			0x119, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_phy_phy_aux_clk", 0x11A, 2, GCC,
+			0x11A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_phy_rx_symbol_0_clk", 0x115, 2, GCC,
+			0x115, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_phy_rx_symbol_1_clk", 0x11B, 2, GCC,
+			0x11B, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_phy_tx_symbol_0_clk", 0x114, 2, GCC,
+			0x114, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_ufs_phy_unipro_core_clk", 0x118, 2, GCC,
+			0x118, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb30_prim_master_clk", 0x6E, 2, GCC,
+			0x6E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb30_prim_mock_utmi_clk", 0x70, 2, GCC,
+			0x70, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb30_sec_master_clk", 0x75, 2, GCC,
+			0x75, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb30_sec_mock_utmi_clk", 0x77, 2, GCC,
+			0x77, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb3_prim_phy_aux_clk", 0x71, 2, GCC,
+			0x71, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb3_prim_phy_com_aux_clk", 0x72, 2, GCC,
+			0x72, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb3_prim_phy_pipe_clk", 0x73, 2, GCC,
+			0x73, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb3_sec_phy_aux_clk", 0x78, 2, GCC,
+			0x78, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb3_sec_phy_com_aux_clk", 0x79, 2, GCC,
+			0x79, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_usb3_sec_phy_pipe_clk", 0x7A, 2, GCC,
+			0x7A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_video_ahb_clk", 0x43, 2, GCC,
+			0x43, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_video_axi0_clk", 0x4B, 2, GCC,
+			0x4B, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gcc_video_axi1_clk", 0x4C, 2, GCC,
+			0x4C, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "gpu_cc_ahb_clk", 0x163, 2, GPU_CC,
 			0x10, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_crc_ahb_clk", 0x163, 1, GPU_CC,
-			0x11, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_cx_apb_clk", 0x163, 1, GPU_CC,
-			0x14, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_cx_gmu_clk", 0x163, 1, GPU_CC,
+		{ "gpu_cc_cx_gmu_clk", 0x163, 2, GPU_CC,
 			0x18, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_cx_snoc_dvm_clk", 0x163, 1, GPU_CC,
+		{ "gpu_cc_cx_snoc_dvm_clk", 0x163, 2, GPU_CC,
 			0x15, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_cxo_aon_clk", 0x163, 1, GPU_CC,
-			0xA, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_cxo_clk", 0x163, 1, GPU_CC,
-			0x19, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_gx_gmu_clk", 0x163, 1, GPU_CC,
+		{ "gpu_cc_gx_gmu_clk", 0x163, 2, GPU_CC,
 			0xF, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_gx_vsense_clk", 0x163, 1, GPU_CC,
+		{ "gpu_cc_gx_vsense_clk", 0x163, 2, GPU_CC,
 			0xC, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
-		{ "npu_cc_aon_clk", 0x180, 1, NPU_CC,
-			0x5, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_atb_clk", 0x180, 1, NPU_CC,
-			0x17, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_bto_core_clk", 0x180, 1, NPU_CC,
+		{ "measure_only_cnoc_clk", 0x19, 2, GCC,
+			0x19, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "measure_only_gpu_cc_cx_gfx3d_clk", 0x163, 2, GPU_CC,
+			0x1A, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
+		{ "measure_only_gpu_cc_cx_gfx3d_slv_clk", 0x163, 2, GPU_CC,
+			0x1B, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
+		{ "measure_only_gpu_cc_gx_gfx3d_clk", 0x163, 2, GPU_CC,
+			0xB, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
+		{ "measure_only_ipa_2x_clk", 0x147, 2, GCC,
+			0x147, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "measure_only_snoc_clk", 0x7, 2, GCC,
+			0x7, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+		{ "npu_cc_bto_core_clk", 0x180, 2, NPU_CC,
 			0x19, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_bwmon_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_bwmon_clk", 0x180, 2, NPU_CC,
 			0x18, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_cal_hm0_cdc_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_cal_hm0_cdc_clk", 0x180, 2, NPU_CC,
 			0xB, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_cal_hm0_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_cal_hm0_clk", 0x180, 2, NPU_CC,
 			0x2, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_cal_hm0_dpm_ip_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_cal_hm0_dpm_ip_clk", 0x180, 2, NPU_CC,
 			0xC, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_cal_hm0_perf_cnt_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_cal_hm0_perf_cnt_clk", 0x180, 2, NPU_CC,
 			0xD, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_cal_hm1_cdc_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_cal_hm1_cdc_clk", 0x180, 2, NPU_CC,
 			0xE, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_cal_hm1_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_cal_hm1_clk", 0x180, 2, NPU_CC,
 			0x3, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_cal_hm1_dpm_ip_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_cal_hm1_dpm_ip_clk", 0x180, 2, NPU_CC,
 			0xF, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_cal_hm1_perf_cnt_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_cal_hm1_perf_cnt_clk", 0x180, 2, NPU_CC,
 			0x10, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_core_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_core_clk", 0x180, 2, NPU_CC,
 			0x4, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dl_dpm_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_dl_dpm_clk", 0x180, 2, NPU_CC,
 			0x23, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dl_llm_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_dl_llm_clk", 0x180, 2, NPU_CC,
 			0x22, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dpm_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_dpm_clk", 0x180, 2, NPU_CC,
 			0x8, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dpm_temp_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_dpm_temp_clk", 0x180, 2, NPU_CC,
 			0x14, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dpm_xo_clk", 0x180, 1, NPU_CC,
-			0xA, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dsp_ahbm_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_dsp_ahbm_clk", 0x180, 2, NPU_CC,
 			0x1C, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dsp_ahbs_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_dsp_ahbs_clk", 0x180, 2, NPU_CC,
 			0x1B, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dsp_axi_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_dsp_axi_clk", 0x180, 2, NPU_CC,
 			0x1E, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dsp_bwmon_ahb_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_dsp_bwmon_ahb_clk", 0x180, 2, NPU_CC,
 			0x1D, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_dsp_bwmon_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_dsp_bwmon_clk", 0x180, 2, NPU_CC,
 			0x1F, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_isense_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_isense_clk", 0x180, 2, NPU_CC,
 			0x7, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_llm_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_llm_clk", 0x180, 2, NPU_CC,
 			0x6, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_llm_curr_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_llm_curr_clk", 0x180, 2, NPU_CC,
 			0x21, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_llm_temp_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_llm_temp_clk", 0x180, 2, NPU_CC,
 			0x15, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_llm_xo_clk", 0x180, 1, NPU_CC,
-			0x9, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_noc_ahb_clk", 0x180, 1, NPU_CC,
-			0x13, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_noc_axi_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_noc_axi_clk", 0x180, 2, NPU_CC,
 			0x12, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_noc_dma_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_noc_dma_clk", 0x180, 2, NPU_CC,
 			0x11, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_rsc_xo_clk", 0x180, 1, NPU_CC,
-			0x1A, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_s2p_clk", 0x180, 1, NPU_CC,
+		{ "npu_cc_s2p_clk", 0x180, 2, NPU_CC,
 			0x16, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "npu_cc_xo_clk", 0x180, 1, NPU_CC,
-			0x1, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
-		{ "video_cc_ahb_clk", 0x57, 1, VIDEO_CC,
+		{ "video_cc_ahb_clk", 0x57, 2, VIDEO_CC,
 			0x7, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
-		{ "video_cc_mvs0_clk", 0x57, 1, VIDEO_CC,
+		{ "video_cc_mvs0_clk", 0x57, 2, VIDEO_CC,
 			0x3, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
-		{ "video_cc_mvs0c_clk", 0x57, 1, VIDEO_CC,
+		{ "video_cc_mvs0c_clk", 0x57, 2, VIDEO_CC,
 			0x1, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
-		{ "video_cc_mvs1_clk", 0x57, 1, VIDEO_CC,
+		{ "video_cc_mvs1_clk", 0x57, 2, VIDEO_CC,
 			0x5, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
-		{ "video_cc_mvs1_div2_clk", 0x57, 1, VIDEO_CC,
+		{ "video_cc_mvs1_div2_clk", 0x57, 2, VIDEO_CC,
 			0x8, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
-		{ "video_cc_mvs1c_clk", 0x57, 1, VIDEO_CC,
+		{ "video_cc_mvs1c_clk", 0x57, 2, VIDEO_CC,
 			0x9, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
-		{ "video_cc_xo_clk", 0x57, 1, VIDEO_CC,
-			0xB, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
 	),
 	.hw.init = &(struct clk_init_data){
 		.name = "gcc_debug_mux",
diff --git a/drivers/clk/qcom/dispcc-kona.c b/drivers/clk/qcom/dispcc-kona.c
index 8cd708f..3835bb7 100644
--- a/drivers/clk/qcom/dispcc-kona.c
+++ b/drivers/clk/qcom/dispcc-kona.c
@@ -31,6 +31,8 @@
 
 static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner);
 
+#define DISP_CC_MISC_CMD	0x8000
+
 enum {
 	P_BI_TCXO,
 	P_CHIP_SLEEP_CLK,
@@ -306,115 +308,6 @@
 	},
 };
 
-
-static struct clk_regmap_div disp_cc_mdss_spdm_dp_crypto_div_clk_src = {
-	.reg = 0x6034,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_dp_crypto_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_dp_crypto_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_dp_pixel1_div_clk_src = {
-	.reg = 0x603c,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_dp_pixel1_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_dp_pixel1_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_dp_pixel_div_clk_src = {
-	.reg = 0x6038,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_dp_pixel_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_dp_pixel_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_mdp_div_clk_src = {
-	.reg = 0x602c,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_mdp_div_clk_src",
-		.parent_names = (const char *[]){ "disp_cc_mdss_mdp_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_pclk0_div_clk_src = {
-	.reg = 0x6024,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_pclk0_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_pclk0_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_pclk1_div_clk_src = {
-	.reg = 0x6028,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_pclk1_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_pclk1_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_rot_div_clk_src = {
-	.reg = 0x6030,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_rot_div_clk_src",
-		.parent_names = (const char *[]){ "disp_cc_mdss_rot_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_pll_test_div_clk_src = {
-	.reg = 0x5014,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_pll_test_div_clk_src",
-		.parent_names = (const char *[]){ "disp_cc_pll0" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
 static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
@@ -529,60 +422,6 @@
 	},
 };
 
-static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto1_clk_src[] = {
-	F( 108000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 180000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 360000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 540000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	{ }
-};
-
-static struct clk_rcg2 disp_cc_mdss_dp_crypto1_clk_src = {
-	.cmd_rcgr = 0x2228,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = disp_cc_parent_map_0,
-	.freq_tbl = ftbl_disp_cc_mdss_dp_crypto1_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "disp_cc_mdss_dp_crypto1_clk_src",
-		.parent_names = disp_cc_parent_names_0,
-		.num_parents = 8,
-		.flags = CLK_GET_RATE_NOCACHE,
-		.ops = &clk_rcg2_ops,
-		.vdd_class = &vdd_mm,
-		.num_rate_max = VDD_NUM,
-		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 12800,
-			[VDD_LOWER] = 108000,
-			[VDD_LOW] = 180000,
-			[VDD_LOW_L1] = 360000,
-			[VDD_NOMINAL] = 540000},
-	},
-};
-
-static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
-	.cmd_rcgr = 0x2194,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = disp_cc_parent_map_0,
-	.freq_tbl = ftbl_disp_cc_mdss_dp_crypto1_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "disp_cc_mdss_dp_crypto_clk_src",
-		.parent_names = disp_cc_parent_names_0,
-		.num_parents = 8,
-		.flags = CLK_GET_RATE_NOCACHE,
-		.ops = &clk_rcg2_ops,
-		.vdd_class = &vdd_mm,
-		.num_rate_max = VDD_NUM,
-		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 12800,
-			[VDD_LOWER] = 108000,
-			[VDD_LOW] = 180000,
-			[VDD_LOW_L1] = 360000,
-			[VDD_NOMINAL] = 540000},
-	},
-};
-
 static const struct freq_tbl ftbl_disp_cc_mdss_dp_link1_clk_src[] = {
 	F( 162000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
 	F( 270000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
@@ -978,38 +817,6 @@
 	},
 };
 
-static struct clk_rcg2 disp_cc_xo_clk_src = {
-	.cmd_rcgr = 0x6044,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = disp_cc_parent_map_1,
-	.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "disp_cc_xo_clk_src",
-		.parent_names = disp_cc_parent_names_1,
-		.num_parents = 2,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
-		.vdd_class = &vdd_mm,
-		.num_rate_max = VDD_NUM,
-		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 19200000},
-	},
-};
-
-static struct clk_branch disp_cc_debug_clk = {
-	.halt_reg = 0x500c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x500c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_debug_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch disp_cc_mdss_ahb_clk = {
 	.halt_reg = 0x2080,
 	.halt_check = BRANCH_HALT,
@@ -1136,42 +943,6 @@
 	},
 };
 
-static struct clk_branch disp_cc_mdss_dp_crypto1_clk = {
-	.halt_reg = 0x2064,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x2064,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_dp_crypto1_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_crypto1_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
-	.halt_reg = 0x2048,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x2048,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_dp_crypto_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_crypto_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch disp_cc_mdss_dp_link1_clk = {
 	.halt_reg = 0x205c,
 	.halt_check = BRANCH_HALT,
@@ -1567,145 +1338,6 @@
 	},
 };
 
-static struct clk_branch disp_cc_mdss_spdm_debug_clk = {
-	.halt_reg = 0x6020,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6020,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_debug_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_dp_crypto_clk = {
-	.halt_reg = 0x6014,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6014,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_dp_crypto_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_dp_crypto_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_dp_pixel1_clk = {
-	.halt_reg = 0x601c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x601c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_dp_pixel1_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_dp_pixel1_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_dp_pixel_clk = {
-	.halt_reg = 0x6018,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6018,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_dp_pixel_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_dp_pixel_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_mdp_clk = {
-	.halt_reg = 0x600c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x600c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_mdp_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_mdp_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_pclk0_clk = {
-	.halt_reg = 0x6004,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6004,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_pclk0_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_pclk0_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_pclk1_clk = {
-	.halt_reg = 0x6008,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6008,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_pclk1_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_pclk1_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_rot_clk = {
-	.halt_reg = 0x6010,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6010,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_rot_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_rot_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch disp_cc_mdss_vsync_clk = {
 	.halt_reg = 0x2024,
 	.halt_check = BRANCH_HALT,
@@ -1724,24 +1356,6 @@
 	},
 };
 
-static struct clk_branch disp_cc_pll_test_clk = {
-	.halt_reg = 0x5018,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x5018,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_pll_test_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_pll_test_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch disp_cc_sleep_clk = {
 	.halt_reg = 0x6078,
 	.halt_check = BRANCH_HALT,
@@ -1768,10 +1382,6 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "disp_cc_xo_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_xo_clk_src",
-			},
-			.num_parents = 1,
 			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
@@ -1779,7 +1389,6 @@
 };
 
 static struct clk_regmap *disp_cc_kona_clocks[] = {
-	[DISP_CC_DEBUG_CLK] = &disp_cc_debug_clk.clkr,
 	[DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
 	[DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
 	[DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
@@ -1794,11 +1403,6 @@
 	[DISP_CC_MDSS_DP_AUX1_CLK_SRC] = &disp_cc_mdss_dp_aux1_clk_src.clkr,
 	[DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
 	[DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
-	[DISP_CC_MDSS_DP_CRYPTO1_CLK] = &disp_cc_mdss_dp_crypto1_clk.clkr,
-	[DISP_CC_MDSS_DP_CRYPTO1_CLK_SRC] =
-		&disp_cc_mdss_dp_crypto1_clk_src.clkr,
-	[DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
-	[DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK1_CLK] = &disp_cc_mdss_dp_link1_clk.clkr,
 	[DISP_CC_MDSS_DP_LINK1_CLK_SRC] = &disp_cc_mdss_dp_link1_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC] =
@@ -1842,46 +1446,18 @@
 	[DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
 	[DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
 	[DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DEBUG_CLK] = &disp_cc_mdss_spdm_debug_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DP_CRYPTO_CLK] =
-		&disp_cc_mdss_spdm_dp_crypto_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DP_CRYPTO_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_dp_crypto_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_DP_PIXEL1_CLK] =
-		&disp_cc_mdss_spdm_dp_pixel1_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DP_PIXEL1_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_dp_pixel1_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_DP_PIXEL_CLK] = &disp_cc_mdss_spdm_dp_pixel_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DP_PIXEL_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_dp_pixel_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_MDP_CLK] = &disp_cc_mdss_spdm_mdp_clk.clkr,
-	[DISP_CC_MDSS_SPDM_MDP_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_mdp_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_PCLK0_CLK] = &disp_cc_mdss_spdm_pclk0_clk.clkr,
-	[DISP_CC_MDSS_SPDM_PCLK0_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_pclk0_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_PCLK1_CLK] = &disp_cc_mdss_spdm_pclk1_clk.clkr,
-	[DISP_CC_MDSS_SPDM_PCLK1_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_pclk1_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_ROT_CLK] = &disp_cc_mdss_spdm_rot_clk.clkr,
-	[DISP_CC_MDSS_SPDM_ROT_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_rot_div_clk_src.clkr,
 	[DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
 	[DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
 	[DISP_CC_PLL0] = &disp_cc_pll0.clkr,
 	[DISP_CC_PLL1] = &disp_cc_pll1.clkr,
-	[DISP_CC_PLL_TEST_CLK] = &disp_cc_pll_test_clk.clkr,
-	[DISP_CC_PLL_TEST_DIV_CLK_SRC] = &disp_cc_pll_test_div_clk_src.clkr,
 	[DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
 	[DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
 	[DISP_CC_XO_CLK] = &disp_cc_xo_clk.clkr,
-	[DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
 };
 
 static const struct qcom_reset_map disp_cc_kona_resets[] = {
 	[DISP_CC_MDSS_CORE_BCR] = { 0x2000 },
 	[DISP_CC_MDSS_RSCC_BCR] = { 0x4000 },
-	[DISP_CC_MDSS_SPDM_BCR] = { 0x6000 },
 };
 
 static const struct regmap_config disp_cc_kona_regmap_config = {
@@ -1938,6 +1514,9 @@
 	clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
 	clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
 
+	/* Enable clock gating for MDP clocks */
+	regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
 	ret = qcom_cc_really_probe(pdev, &disp_cc_kona_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register Display CC clocks\n");
diff --git a/drivers/clk/qcom/dispcc-lito.c b/drivers/clk/qcom/dispcc-lito.c
new file mode 100644
index 0000000..48833b46
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-lito.c
@@ -0,0 +1,1152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,dispcc-lito.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+#include "vdd-level.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CHIP_SLEEP_CLK,
+	P_CORE_BI_PLL_TEST_SE,
+	P_DISP_CC_PLL0_OUT_EVEN,
+	P_DISP_CC_PLL0_OUT_MAIN,
+	P_DP_PHY_PLL_LINK_CLK,
+	P_DP_PHY_PLL_VCO_DIV_CLK,
+	P_DSI0_PHY_PLL_OUT_BYTECLK,
+	P_DSI0_PHY_PLL_OUT_DSICLK,
+	P_DSI1_PHY_PLL_OUT_BYTECLK,
+	P_DSI1_PHY_PLL_OUT_DSICLK,
+	P_GPLL0_OUT_MAIN,
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+	{ P_DSI1_PHY_PLL_OUT_BYTECLK, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"dsi0_phy_pll_out_byteclk",
+	"dsi1_phy_pll_out_byteclk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DP_PHY_PLL_LINK_CLK, 1 },
+	{ P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"dp_phy_pll_link_clk",
+	"dp_phy_pll_vco_div_clk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DISP_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPLL0_OUT_MAIN, 4 },
+	{ P_DISP_CC_PLL0_OUT_EVEN, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_3[] = {
+	"bi_tcxo",
+	"disp_cc_pll0",
+	"gcc_disp_gpll0_clk_src",
+	"disp_cc_pll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+	{ P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_4[] = {
+	"bi_tcxo",
+	"dsi0_phy_pll_out_dsiclk",
+	"dsi1_phy_pll_out_dsiclk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 4 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_5[] = {
+	"bi_tcxo",
+	"gcc_disp_gpll0_clk_src",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+	{ P_CHIP_SLEEP_CLK, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_6[] = {
+	"chip_sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco lucid_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config disp_cc_pll0_config = {
+	.l = 0x16,
+	.cal_l = 0x44,
+	.alpha = 0x6555,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000001,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+	.reg = 0x2128,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "disp_cc_mdss_byte0_div_clk_src",
+		.parent_names =
+			(const char *[]){ "disp_cc_mdss_byte0_clk_src" },
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+	.reg = 0x2144,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "disp_cc_mdss_byte1_div_clk_src",
+		.parent_names =
+			(const char *[]){ "disp_cc_mdss_byte1_clk_src" },
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+	.reg = 0x2190,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "disp_cc_mdss_dp_link_div_clk_src",
+		.parent_names =
+			(const char *[]){ "disp_cc_mdss_dp_link_clk_src" },
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+	F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+	.cmd_rcgr = 0x22bc,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_5,
+	.freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_ahb_clk_src",
+		.parent_names = disp_cc_parent_names_5,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 37500000,
+			[VDD_NOMINAL] = 75000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+	.cmd_rcgr = 0x2110,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_byte0_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_byte2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 187500000,
+			[VDD_LOW] = 300000000,
+			[VDD_LOW_L1] = 358000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+	.cmd_rcgr = 0x212c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_byte1_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_byte2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 187500000,
+			[VDD_LOW] = 300000000,
+			[VDD_LOW_L1] = 358000000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+	.cmd_rcgr = 0x21dc,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_aux_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto_clk_src[] = {
+	F(108000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
+	F(180000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
+	F(360000, P_DP_PHY_PLL_LINK_CLK, 1.5, 0, 0),
+	F(540000, P_DP_PHY_PLL_LINK_CLK, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+	.cmd_rcgr = 0x2194,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_crypto_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_crypto_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 180000,
+			[VDD_LOW_L1] = 360000,
+			[VDD_NOMINAL] = 540000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
+	F(162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+	F(270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+	F(540000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+	F(810000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+	.cmd_rcgr = 0x2178,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_link_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 270000,
+			[VDD_LOW_L1] = 540000,
+			[VDD_NOMINAL] = 810000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
+	.cmd_rcgr = 0x21c4,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_pixel1_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_dp_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 337500000,
+			[VDD_NOMINAL] = 675000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+	.cmd_rcgr = 0x21ac,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_pixel_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_dp_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 337500000,
+			[VDD_NOMINAL] = 675000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+	.cmd_rcgr = 0x2148,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_esc0_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+	.cmd_rcgr = 0x2160,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_esc1_clk_src",
+		.parent_names = disp_cc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+	F(345000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+	F(460000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+	.cmd_rcgr = 0x20c8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_3,
+	.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_mdp_clk_src",
+		.parent_names = disp_cc_parent_names_3,
+		.num_parents = 5,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 200000000,
+			[VDD_LOW] = 300000000,
+			[VDD_LOW_L1] = 345000000,
+			[VDD_NOMINAL] = 460000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+	.cmd_rcgr = 0x2098,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_pclk0_clk_src",
+		.parent_names = disp_cc_parent_names_4,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_pixel_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 525000000,
+			[VDD_LOW_L1] = 625000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+	.cmd_rcgr = 0x20b0,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_pclk1_clk_src",
+		.parent_names = disp_cc_parent_names_4,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.ops = &clk_pixel_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 525000000,
+			[VDD_LOW_L1] = 625000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+	.cmd_rcgr = 0x20e0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_3,
+	.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_rot_clk_src",
+		.parent_names = disp_cc_parent_names_3,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 200000000,
+			[VDD_LOW] = 300000000,
+			[VDD_LOW_L1] = 345000000,
+			[VDD_NOMINAL] = 460000000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+	.cmd_rcgr = 0x20f8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_vsync_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+	F(32000, P_CHIP_SLEEP_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+	.cmd_rcgr = 0x6060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_6,
+	.freq_tbl = ftbl_disp_cc_sleep_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_sleep_clk_src",
+		.parent_names = disp_cc_parent_names_6,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 32000},
+	},
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+	.cmd_rcgr = 0x6044,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_xo_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+	.halt_reg = 0x2080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_ahb_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+	.halt_reg = 0x2028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+	.halt_reg = 0x202c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x202c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_intf_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte0_div_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+	.halt_reg = 0x2030,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte1_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+	.halt_reg = 0x2034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte1_intf_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_byte1_div_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+	.halt_reg = 0x2054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_aux_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+	.halt_reg = 0x2048,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_crypto_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_crypto_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+	.halt_reg = 0x2040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_link_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_link_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+	.halt_reg = 0x2044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_link_intf_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_link_div_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
+	.halt_reg = 0x2050,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_pixel1_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_pixel1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+	.halt_reg = 0x204c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x204c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_pixel_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_pixel_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+	.halt_reg = 0x2038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_esc0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_esc0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+	.halt_reg = 0x203c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x203c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_esc1_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_esc1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+	.halt_reg = 0x200c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x200c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_mdp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+	.halt_reg = 0x201c,
+	.halt_check = BRANCH_VOTED,
+	.clkr = {
+		.enable_reg = 0x201c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_lut_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_mdp_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+	.halt_reg = 0x4004,
+	.halt_check = BRANCH_VOTED,
+	.clkr = {
+		.enable_reg = 0x4004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_non_gdsc_ahb_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+	.halt_reg = 0x2004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_pclk0_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_pclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+	.halt_reg = 0x2008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_pclk1_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_pclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+	.halt_reg = 0x2014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rot_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_rot_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+	.halt_reg = 0x400c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x400c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rscc_ahb_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+	.halt_reg = 0x4008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rscc_vsync_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_vsync_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+	.halt_reg = 0x2024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_vsync_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_vsync_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+	.halt_reg = 0x6078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_sleep_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_xo_clk = {
+	.halt_reg = 0x605c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x605c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_xo_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *disp_cc_lito_clocks[] = {
+	[DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+	[DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+	[DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+	[DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+	[DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+	[DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+	[DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+	[DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+	[DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
+		&disp_cc_mdss_dp_link_div_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] = &disp_cc_mdss_dp_pixel1_clk_src.clkr,
+	[DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+	[DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+	[DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+	[DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+	[DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+	[DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+	[DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+	[DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+	[DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+	[DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+	[DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+	[DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+	[DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+	[DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+	[DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+	[DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+	[DISP_CC_XO_CLK] = &disp_cc_xo_clk.clkr,
+	[DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct regmap_config disp_cc_lito_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x10000,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_lito_desc = {
+	.config = &disp_cc_lito_regmap_config,
+	.clks = disp_cc_lito_clocks,
+	.num_clks = ARRAY_SIZE(disp_cc_lito_clocks),
+};
+
+static const struct of_device_id disp_cc_lito_match_table[] = {
+	{ .compatible = "qcom,lito-dispcc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_lito_match_table);
+
+static int disp_cc_lito_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct clk *clk;
+	int ret;
+
+	regmap = qcom_cc_map(pdev, &disp_cc_lito_desc);
+	if (IS_ERR(regmap)) {
+		dev_err(&pdev->dev, "Failed to map the disp_cc registers\n");
+		return PTR_ERR(regmap);
+	}
+
+	clk = clk_get(&pdev->dev, "cfg_ahb_clk");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get ahb clock handle\n");
+		return PTR_ERR(clk);
+	}
+	clk_put(clk);
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+	ret = qcom_cc_really_probe(pdev, &disp_cc_lito_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register Display CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered Display CC clocks\n");
+	return 0;
+}
+
+static struct platform_driver disp_cc_lito_driver = {
+	.probe = disp_cc_lito_probe,
+	.driver = {
+		.name = "lito-dispcc",
+		.of_match_table = disp_cc_lito_match_table,
+	},
+};
+
+static int __init disp_cc_lito_init(void)
+{
+	return platform_driver_register(&disp_cc_lito_driver);
+}
+subsys_initcall(disp_cc_lito_init);
+
+static void __exit disp_cc_lito_exit(void)
+{
+	platform_driver_unregister(&disp_cc_lito_driver);
+}
+module_exit(disp_cc_lito_exit);
+
+MODULE_DESCRIPTION("QTI DISP_CC LITO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:disp_cc-lito");
diff --git a/drivers/clk/qcom/gcc-kona.c b/drivers/clk/qcom/gcc-kona.c
index c9036ad..ec44288 100644
--- a/drivers/clk/qcom/gcc-kona.c
+++ b/drivers/clk/qcom/gcc-kona.c
@@ -219,6 +219,7 @@
 		.name = "gcc_cpuss_ahb_postdiv_clk_src",
 		.parent_names = (const char *[]){ "gcc_cpuss_ahb_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -232,6 +233,7 @@
 		.parent_names =
 			(const char *[]){ "gcc_usb30_prim_mock_utmi_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -245,6 +247,7 @@
 		.parent_names =
 			(const char *[]){ "gcc_usb30_sec_mock_utmi_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -1966,19 +1969,6 @@
 	},
 };
 
-static struct clk_branch gcc_npu_at_clk = {
-	.halt_reg = 0x4d014,
-	.halt_check = BRANCH_VOTED,
-	.clkr = {
-		.enable_reg = 0x4d014,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_npu_at_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_npu_axi_clk = {
 	.halt_reg = 0x4d008,
 	.halt_check = BRANCH_VOTED,
@@ -2081,19 +2071,6 @@
 	},
 };
 
-static struct clk_branch gcc_npu_trig_clk = {
-	.halt_reg = 0x4d010,
-	.halt_check = BRANCH_VOTED,
-	.clkr = {
-		.enable_reg = 0x4d010,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_npu_trig_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_pcie0_phy_refgen_clk = {
 	.halt_reg = 0x6f02c,
 	.halt_check = BRANCH_HALT,
@@ -4030,7 +4007,6 @@
 	[GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr,
 	[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
 	[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
-	[GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr,
 	[GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
 	[GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr,
 	[GCC_NPU_BWMON_CFG_AHB_CLK] = &gcc_npu_bwmon_cfg_ahb_clk.clkr,
@@ -4038,7 +4014,6 @@
 	[GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr,
 	[GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
 	[GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
-	[GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr,
 	[GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
 	[GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr,
 	[GCC_PCIE2_PHY_REFGEN_CLK] = &gcc_pcie2_phy_refgen_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
index 3ed2867..0c02861 100644
--- a/drivers/clk/qcom/gcc-lito.c
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -280,31 +280,6 @@
 	},
 };
 
-static const struct freq_tbl ftbl_gcc_dpm_clk_src[] = {
-	F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
-	{ }
-};
-
-static struct clk_rcg2 gcc_dpm_clk_src = {
-	.cmd_rcgr = 0x4600c,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = gcc_parent_map_0,
-	.freq_tbl = ftbl_gcc_dpm_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "gcc_dpm_clk_src",
-		.parent_names = gcc_parent_names_0,
-		.num_parents = 4,
-		.ops = &clk_rcg2_ops,
-		.vdd_class = &vdd_cx,
-		.num_rate_max = VDD_NUM,
-		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_LOWER] = 100000000,
-			[VDD_LOW] = 150000000,
-			[VDD_LOW_L1] = 200000000},
-	},
-};
-
 static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
@@ -1315,39 +1290,6 @@
 	},
 };
 
-static struct clk_branch gcc_dpm_ahb_clk = {
-	.halt_reg = 0x46008,
-	.halt_check = BRANCH_HALT,
-	.hwcg_reg = 0x46008,
-	.hwcg_bit = 1,
-	.clkr = {
-		.enable_reg = 0x46008,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_dpm_ahb_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gcc_dpm_clk = {
-	.halt_reg = 0x46004,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x46004,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_dpm_clk",
-			.parent_names = (const char *[]){
-				"gcc_dpm_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_gp1_clk = {
 	.halt_reg = 0x64000,
 	.halt_check = BRANCH_HALT,
@@ -1512,6 +1454,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_npu_bwmon2_axi_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1525,6 +1468,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_npu_bwmon_axi_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1538,6 +1482,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_npu_bwmon_cfg_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -2565,9 +2510,6 @@
 	[GCC_DISP_THROTTLE_HF_AXI_CLK] = &gcc_disp_throttle_hf_axi_clk.clkr,
 	[GCC_DISP_THROTTLE_SF_AXI_CLK] = &gcc_disp_throttle_sf_axi_clk.clkr,
 	[GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
-	[GCC_DPM_AHB_CLK] = &gcc_dpm_ahb_clk.clkr,
-	[GCC_DPM_CLK] = &gcc_dpm_clk.clkr,
-	[GCC_DPM_CLK_SRC] = &gcc_dpm_clk_src.clkr,
 	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
 	[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
 	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
@@ -2690,11 +2632,14 @@
 	[GCC_PRNG_BCR] = { 0x34000 },
 	[GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
 	[GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+	[GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
 	[GCC_SDCC1_BCR] = { 0x26000 },
 	[GCC_SDCC2_BCR] = { 0x14000 },
 	[GCC_SDCC4_BCR] = { 0x16000 },
 	[GCC_UFS_PHY_BCR] = { 0x77000 },
 	[GCC_USB30_PRIM_BCR] = { 0xf000 },
+	[GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+	[GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
 	[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
 };
 
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index fa1a196..3bf11a6 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -131,8 +131,8 @@
 	"core_bi_pll_test_se",
 };
 
-static const char * const gcc_parent_names_7[] = {
-	"bi_tcxo",
+static const char * const gcc_parent_names_7_ao[] = {
+	"bi_tcxo_ao",
 	"gpll0",
 	"gpll0_out_even",
 	"core_bi_pll_test_se",
@@ -144,6 +144,12 @@
 	"core_bi_pll_test_se",
 };
 
+static const char * const gcc_parent_names_8_ao[] = {
+	"bi_tcxo_ao",
+	"gpll0",
+	"core_bi_pll_test_se",
+};
+
 static const struct parent_map gcc_parent_map_10[] = {
 	{ P_BI_TCXO, 0 },
 	{ P_GPLL0_OUT_MAIN, 1 },
@@ -226,7 +232,7 @@
 	.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_cpuss_ahb_clk_src",
-		.parent_names = gcc_parent_names_7,
+		.parent_names = gcc_parent_names_7_ao,
 		.num_parents = 4,
 		.ops = &clk_rcg2_ops,
 	},
@@ -245,7 +251,7 @@
 	.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_cpuss_rbcpr_clk_src",
-		.parent_names = gcc_parent_names_8,
+		.parent_names = gcc_parent_names_8_ao,
 		.num_parents = 3,
 		.ops = &clk_rcg2_ops,
 	},
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index 7751839..163073e 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -26,6 +26,7 @@
 #define PWR_ON_MASK		BIT(31)
 #define CLK_DIS_WAIT_MASK	(0xF << 12)
 #define CLK_DIS_WAIT_SHIFT	(12)
+#define RETAIN_FF_ENABLE_MASK	BIT(11)
 #define SW_OVERRIDE_MASK	BIT(2)
 #define HW_CONTROL_MASK		BIT(1)
 #define SW_COLLAPSE_MASK	BIT(0)
@@ -57,6 +58,7 @@
 	bool			toggle_mem;
 	bool			toggle_periph;
 	bool			toggle_logic;
+	bool			retain_ff_enable;
 	bool			resets_asserted;
 	bool			root_en;
 	bool			force_root_en;
@@ -311,6 +313,11 @@
 				goto end;
 			}
 		}
+
+		if (sc->retain_ff_enable && !(regval & RETAIN_FF_ENABLE_MASK)) {
+			regval |= RETAIN_FF_ENABLE_MASK;
+			regmap_write(sc->regmap, REG_OFFSET, regval);
+		}
 	} else {
 		for (i = 0; i < sc->reset_count; i++)
 			reset_control_deassert(sc->reset_clocks[i]);
@@ -735,6 +742,8 @@
 	retain_periph = of_property_read_bool(pdev->dev.of_node,
 					    "qcom,retain-periph");
 	sc->toggle_periph = !retain_periph;
+	sc->retain_ff_enable = of_property_read_bool(pdev->dev.of_node,
+						"qcom,retain-regs");
 	sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
 						"qcom,skip-logic-collapse");
 	support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
diff --git a/drivers/clk/qcom/gpucc-kona.c b/drivers/clk/qcom/gpucc-kona.c
index 34cb1b6..8d00c6b 100644
--- a/drivers/clk/qcom/gpucc-kona.c
+++ b/drivers/clk/qcom/gpucc-kona.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
 
 #define pr_fmt(fmt) "clk: %s: " fmt, __func__
 
@@ -26,6 +26,12 @@
 #include "reset.h"
 #include "vdd-level.h"
 
+#define CX_GMU_CBCR_SLEEP_SHIFT	4
+#define CX_GMU_CBCR_SLEEP_MASK	GENMASK(7, 4)
+#define CX_GMU_CBCR_WAKE_SHIFT	8
+#define CX_GMU_CBCR_WAKE_MASK	GENMASK(11, 8)
+
+
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
 
@@ -136,7 +142,7 @@
 
 static struct clk_branch gpu_cc_crc_ahb_clk = {
 	.halt_reg = 0x107c,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x107c,
 		.enable_mask = BIT(0),
@@ -149,7 +155,7 @@
 
 static struct clk_branch gpu_cc_cx_apb_clk = {
 	.halt_reg = 0x1088,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x1088,
 		.enable_mask = BIT(0),
@@ -180,12 +186,16 @@
 
 static struct clk_branch gpu_cc_cx_qdss_at_clk = {
 	.halt_reg = 0x1080,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x1080,
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gpu_cc_cx_qdss_at_clk",
+			.parent_names = (const char *[]){
+				"qdss_qmp_clk",
+			},
+			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -193,12 +203,16 @@
 
 static struct clk_branch gpu_cc_cx_qdss_trig_clk = {
 	.halt_reg = 0x1094,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x1094,
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gpu_cc_cx_qdss_trig_clk",
+			.parent_names = (const char *[]){
+				"qdss_qmp_clk",
+			},
+			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -206,12 +220,16 @@
 
 static struct clk_branch gpu_cc_cx_qdss_tsctr_clk = {
 	.halt_reg = 0x1084,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x1084,
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gpu_cc_cx_qdss_tsctr_clk",
+			.parent_names = (const char *[]){
+				"qdss_qmp_clk",
+			},
+			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -219,7 +237,7 @@
 
 static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
 	.halt_reg = 0x108c,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x108c,
 		.enable_mask = BIT(0),
@@ -232,7 +250,7 @@
 
 static struct clk_branch gpu_cc_cxo_aon_clk = {
 	.halt_reg = 0x1004,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x1004,
 		.enable_mask = BIT(0),
@@ -276,12 +294,16 @@
 
 static struct clk_branch gpu_cc_gx_qdss_tsctr_clk = {
 	.halt_reg = 0x105c,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x105c,
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gpu_cc_gx_qdss_tsctr_clk",
+			.parent_names = (const char *[]){
+				"qdss_qmp_clk",
+			},
+			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -289,7 +311,7 @@
 
 static struct clk_branch gpu_cc_gx_vsense_clk = {
 	.halt_reg = 0x1058,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x1058,
 		.enable_mask = BIT(0),
@@ -302,7 +324,7 @@
 
 static struct clk_branch gpu_cc_sleep_clk = {
 	.halt_reg = 0x1090,
-	.halt_check = BRANCH_HALT,
+	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x1090,
 		.enable_mask = BIT(0),
@@ -403,6 +425,7 @@
 {
 	struct regmap *regmap;
 	struct clk *clk;
+	unsigned int value, mask;
 	int i, ret;
 
 	regmap = qcom_cc_map(pdev, &gpu_cc_kona_desc);
@@ -433,6 +456,12 @@
 			return PTR_ERR(clk);
 	}
 
+	/* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+	mask = CX_GMU_CBCR_SLEEP_MASK | CX_GMU_CBCR_WAKE_MASK;
+	value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+	regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+							mask, value);
+
 	ret = qcom_cc_really_probe(pdev, &gpu_cc_kona_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register GPU CC clocks\n");
diff --git a/drivers/clk/qcom/gpucc-lito.c b/drivers/clk/qcom/gpucc-lito.c
new file mode 100644
index 0000000..92c2f19
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-lito.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gpucc-lito.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "reset.h"
+#include "vdd-level.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
+
+#define CX_GMU_CBCR_SLEEP_MASK		0xF
+#define CX_GMU_CBCR_SLEEP_SHIFT		4
+#define CX_GMU_CBCR_WAKE_MASK		0xF
+#define CX_GMU_CBCR_WAKE_SHIFT		8
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GPLL0_OUT_MAIN,
+	P_GPLL0_OUT_MAIN_DIV,
+	P_GPU_CC_PLL0_OUT_MAIN,
+	P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPU_CC_PLL1_OUT_MAIN, 3 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_MAIN_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"gpu_cc_pll0",
+	"gpu_cc_pll1",
+	"gcc_gpu_gpll0_clk_src",
+	"gcc_gpu_gpll0_div_clk_src",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_MAIN_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"gcc_gpu_gpll0_clk_src",
+	"gcc_gpu_gpll0_div_clk_src",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco lucid_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+	.l = 0x1A,
+	.cal_l = 0x44,
+	.alpha = 0xAAA,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000001,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+	.offset = 0x100,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+	F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+	.cmd_rcgr = 0x1120,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_0,
+	.freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gmu_clk_src",
+		.parent_names = gpu_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 200000000,
+			[VDD_LOW] = 500000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_rbcpr_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(50000000, P_GPLL0_OUT_MAIN_DIV, 6, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_rbcpr_clk_src = {
+	.cmd_rcgr = 0x10b0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_1,
+	.freq_tbl = ftbl_gpu_cc_rbcpr_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_rbcpr_clk_src",
+		.parent_names = gpu_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_NOMINAL] = 50000000},
+	},
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+	.halt_reg = 0x1078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+	.halt_reg = 0x107c,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x107c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_crc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+	.halt_reg = 0x1088,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1088,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_apb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+	.halt_reg = 0x1098,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1098,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gmu_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gmu_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+	.halt_reg = 0x108c,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x108c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_snoc_dvm_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+	.halt_reg = 0x1004,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_aon_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+	.halt_reg = 0x109c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x109c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+	.halt_reg = 0x1064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_gmu_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gmu_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+	.halt_reg = 0x1058,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_vsense_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_rbcpr_clk = {
+	.halt_reg = 0x10f0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10f0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_rbcpr_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_rbcpr_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+	.halt_reg = 0x1090,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1090,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_sleep_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+/* Measure-only clock for gpu_cc_cx_gfx3d_clk. */
+static struct clk_dummy measure_only_gpu_cc_cx_gfx3d_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_gpu_cc_cx_gfx3d_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+/* Measure-only clock for gpu_cc_cx_gfx3d_slv_clk. */
+static struct clk_dummy measure_only_gpu_cc_cx_gfx3d_slv_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_gpu_cc_cx_gfx3d_slv_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+/* Measure-only clock for gpu_cc_gx_gfx3d_clk. */
+static struct clk_dummy measure_only_gpu_cc_gx_gfx3d_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_gpu_cc_gx_gfx3d_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+struct clk_hw *gpu_cc_lito_hws[] = {
+	[MEASURE_ONLY_GPU_CC_CX_GFX3D_CLK] =
+		&measure_only_gpu_cc_cx_gfx3d_clk.hw,
+	[MEASURE_ONLY_GPU_CC_CX_GFX3D_SLV_CLK] =
+		&measure_only_gpu_cc_cx_gfx3d_slv_clk.hw,
+	[MEASURE_ONLY_GPU_CC_GX_GFX3D_CLK] =
+		&measure_only_gpu_cc_gx_gfx3d_clk.hw,
+};
+
+static struct clk_regmap *gpu_cc_lito_clocks[] = {
+	[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+	[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+	[GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+	[GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+	[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+	[GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+	[GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+	[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+	[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+	[GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+	[GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+	[GPU_CC_RBCPR_CLK] = &gpu_cc_rbcpr_clk.clkr,
+	[GPU_CC_RBCPR_CLK_SRC] = &gpu_cc_rbcpr_clk_src.clkr,
+	[GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static const struct regmap_config gpu_cc_lito_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x8008,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_lito_desc = {
+	.config = &gpu_cc_lito_regmap_config,
+	.hwclks = gpu_cc_lito_hws,
+	.num_hwclks = ARRAY_SIZE(gpu_cc_lito_hws),
+	.clks = gpu_cc_lito_clocks,
+	.num_clks = ARRAY_SIZE(gpu_cc_lito_clocks),
+};
+
+static const struct of_device_id gpu_cc_lito_match_table[] = {
+	{ .compatible = "qcom,gpucc-lito" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_lito_match_table);
+
+static int gpu_cc_lito_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	unsigned int value, mask;
+	int ret;
+
+	regmap = qcom_cc_map(pdev, &gpu_cc_lito_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(vdd_mx.regulator[0])) {
+		if (PTR_ERR(vdd_mx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"Unable to get vdd_mx regulator\n");
+		return PTR_ERR(vdd_mx.regulator[0]);
+	}
+
+	clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+	/* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+	mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+	mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+	value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+	regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+								mask, value);
+
+	ret = qcom_cc_really_probe(pdev, &gpu_cc_lito_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GPU CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered GPU CC clocks\n");
+	return 0;
+}
+
+static struct platform_driver gpu_cc_lito_driver = {
+	.probe = gpu_cc_lito_probe,
+	.driver = {
+		.name = "gpu_cc-lito",
+		.of_match_table = gpu_cc_lito_match_table,
+	},
+};
+
+static int __init gpu_cc_lito_init(void)
+{
+	return platform_driver_register(&gpu_cc_lito_driver);
+}
+subsys_initcall(gpu_cc_lito_init);
+
+static void __exit gpu_cc_lito_exit(void)
+{
+	platform_driver_unregister(&gpu_cc_lito_driver);
+}
+module_exit(gpu_cc_lito_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC LITO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gpu_cc-lito");
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
index a4840da..1457715 100644
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
@@ -19,6 +19,7 @@
 #define DP_PHY_PD_CTL				0x0018
 #define DP_PHY_MODE				0x001C
 
+#define DP_PHY_AUX_CFG1				0x0024
 #define DP_PHY_AUX_CFG2				0x0028
 
 #define DP_PHY_VCO_DIV				0x0070
@@ -405,7 +406,8 @@
 	struct dp_pll_db_7nm *pdb = (struct dp_pll_db_7nm *)dp_res->priv;
 	u32 bias_en, drvr_en;
 
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0x24);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG1, 0x13);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0xA4);
 	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
 	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
 	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c
index 2533e7d..e1e7abc 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c
@@ -175,6 +175,7 @@
 #define PLL_CMODE_2				0x0254
 #define PLL_ANALOG_CONTROLS_FIVE_1		0x0258
 #define PLL_ANALOG_CONTROLS_FIVE_2		0x025C
+#define PLL_PERF_OPTIMIZE			0x0260
 
 /* Register Offsets from PHY base address */
 #define PHY_CMN_CLK_CFG0	0x010
@@ -254,6 +255,13 @@
 	return (rsc->pll_interface_type == MDSS_DSI_PLL_7NM_V2) ? true : false;
 }
 
+static inline bool dsi_pll_7nm_is_hw_revision_v4_1(
+		struct mdss_pll_resources *rsc)
+{
+	return (rsc->pll_interface_type == MDSS_DSI_PLL_7NM_V4_1) ?
+		true : false;
+}
+
 static inline int pll_reg_read(void *context, unsigned int reg,
 					unsigned int *val)
 {
@@ -521,10 +529,25 @@
 
 	dec = div_u64(dec_multiple, multiplier);
 
-	if (dsi_pll_7nm_is_hw_revision_v1(rsc))
+	switch (rsc->pll_interface_type) {
+	case MDSS_DSI_PLL_7NM:
 		regs->pll_clock_inverters = 0x0;
-	else
+		break;
+	case MDSS_DSI_PLL_7NM_V2:
 		regs->pll_clock_inverters = 0x28;
+		break;
+	case MDSS_DSI_PLL_7NM_V4_1:
+	default:
+		if (pll_freq <= 1000000000)
+			regs->pll_clock_inverters = 0xA0;
+		else if (pll_freq <= 2500000000)
+			regs->pll_clock_inverters = 0x20;
+		else if (pll_freq <= 3020000000)
+			regs->pll_clock_inverters = 0x00;
+		else
+			regs->pll_clock_inverters = 0x40;
+		break;
+	}
 
 	regs->pll_lockdet_rate = config->lock_timer;
 	regs->decimal_div_start = dec;
@@ -608,14 +631,35 @@
 				  struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
+	u64 vco_rate = rsc->vco_current_rate;
 
-	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE_1, 0x01);
-	MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00);
+	switch (rsc->pll_interface_type) {
+	case MDSS_DSI_PLL_7NM:
+	case MDSS_DSI_PLL_7NM_V2:
+		MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE_1, 0x01);
+		MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00);
+		break;
+	case MDSS_DSI_PLL_7NM_V4_1:
+	default:
+		if (vco_rate < 3100000000)
+			MDSS_PLL_REG_W(pll_base,
+					PLL_ANALOG_CONTROLS_FIVE_1, 0x01);
+		else
+			MDSS_PLL_REG_W(pll_base,
+					PLL_ANALOG_CONTROLS_FIVE_1, 0x03);
+
+		if (vco_rate < 1520000000)
+			MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x08);
+		else if (vco_rate < 2990000000)
+			MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x01);
+		else
+			MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00);
+
+		break;
+	}
 
 	if (dsi_pll_7nm_is_hw_revision_v1(rsc))
 		MDSS_PLL_REG_W(pll_base, PLL_GEAR_BAND_SELECT_CONTROLS, 0x21);
-	else
-		MDSS_PLL_REG_W(pll_base, PLL_GEAR_BAND_SELECT_CONTROLS, 0x22);
 
 	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE, 0x01);
 	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
@@ -638,10 +682,21 @@
 	MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x2f);
 	MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x2a);
 
-	if (dsi_pll_7nm_is_hw_revision_v1(rsc))
+	switch (rsc->pll_interface_type) {
+	case MDSS_DSI_PLL_7NM:
 		MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x30);
-	else
+		break;
+	case MDSS_DSI_PLL_7NM_V2:
 		MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x22);
+		break;
+	case MDSS_DSI_PLL_7NM_V4_1:
+	default:
+		MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x3F);
+		break;
+	}
+
+	if (dsi_pll_7nm_is_hw_revision_v4_1(rsc))
+		MDSS_PLL_REG_W(pll_base, PLL_PERF_OPTIMIZE, 0x22);
 }
 
 static void dsi_pll_init_val(struct mdss_pll_resources *rsc)
@@ -1447,7 +1502,7 @@
 
 static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
-	.min_rate = 1500000000UL,
+	.min_rate = 1000000000UL,
 	.max_rate = 3500000000UL,
 	.hw.init = &(struct clk_init_data){
 			.name = "dsi0pll_vco_clk",
@@ -1460,7 +1515,7 @@
 
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
-	.min_rate = 1500000000UL,
+	.min_rate = 1000000000UL,
 	.max_rate = 3500000000UL,
 	.hw.init = &(struct clk_init_data){
 			.name = "dsi1pll_vco_clk",
@@ -1807,6 +1862,12 @@
 		dsi0pll_byteclk_mux.clkr.regmap = rmap;
 
 		dsi0pll_vco_clk.priv = pll_res;
+
+		if (dsi_pll_7nm_is_hw_revision_v4_1(pll_res)) {
+			dsi0pll_vco_clk.min_rate = 600000000;
+			dsi0pll_vco_clk.max_rate = 5000000000;
+		}
+
 		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_7nm[i]);
@@ -1849,6 +1910,11 @@
 		dsi1pll_byteclk_mux.clkr.regmap = rmap;
 		dsi1pll_vco_clk.priv = pll_res;
 
+		if (dsi_pll_7nm_is_hw_revision_v4_1(pll_res)) {
+			dsi1pll_vco_clk.min_rate = 600000000;
+			dsi1pll_vco_clk.max_rate = 5000000000;
+		}
+
 		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_7nm[i]);
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index e5647f3..7b95b10 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -136,6 +136,8 @@
 		pll_res->pll_interface_type = MDSS_DSI_PLL_7NM;
 	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_7nm_v2"))
 		pll_res->pll_interface_type = MDSS_DSI_PLL_7NM_V2;
+	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_7nm_v4_1"))
+		pll_res->pll_interface_type = MDSS_DSI_PLL_7NM_V4_1;
 	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_28lpm"))
 		pll_res->pll_interface_type = MDSS_DSI_PLL_28LPM;
 	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_14nm"))
@@ -168,6 +170,7 @@
 		break;
 	case MDSS_DSI_PLL_7NM:
 	case MDSS_DSI_PLL_7NM_V2:
+	case MDSS_DSI_PLL_7NM_V4_1:
 		rc = dsi_pll_clock_register_7nm(pdev, pll_res);
 		break;
 	case MDSS_DP_PLL_7NM:
@@ -226,7 +229,7 @@
 
 	label = of_get_property(pdev->dev.of_node, "label", NULL);
 	if (!label)
-		pr_info("%d: MDSS pll label not specified\n");
+		pr_info("MDSS pll label not specified\n");
 	else
 		pr_info("MDSS pll label = %s\n", label);
 
@@ -349,6 +352,7 @@
 	{.compatible = "qcom,mdss_dp_pll_10nm"},
 	{.compatible = "qcom,mdss_dsi_pll_7nm"},
 	{.compatible = "qcom,mdss_dsi_pll_7nm_v2"},
+	{.compatible = "qcom,mdss_dsi_pll_7nm_v4_1"},
 	{.compatible = "qcom,mdss_dp_pll_7nm"},
 	{.compatible = "qcom,mdss_dsi_pll_28lpm"},
 	{.compatible = "qcom,mdss_dsi_pll_14nm"},
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 3153aa0a..17edd24 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -38,6 +38,7 @@
 	MDSS_DP_PLL_10NM,
 	MDSS_DSI_PLL_7NM,
 	MDSS_DSI_PLL_7NM_V2,
+	MDSS_DSI_PLL_7NM_V4_1,
 	MDSS_DP_PLL_7NM,
 	MDSS_DSI_PLL_28LPM,
 	MDSS_DSI_PLL_14NM,
diff --git a/drivers/clk/qcom/mdss/mdss_pll_trace.h b/drivers/clk/qcom/mdss/mdss_pll_trace.h
index fd193bf..cf46c7f 100644
--- a/drivers/clk/qcom/mdss/mdss_pll_trace.h
+++ b/drivers/clk/qcom/mdss/mdss_pll_trace.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #if !defined(_MDSS_PLL_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
@@ -13,7 +13,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mdss_pll
 #undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE mdss_pll_trace
+#define TRACE_INCLUDE_FILE ../../drivers/clk/qcom/mdss/mdss_pll_trace
 
 
 TRACE_EVENT(mdss_pll_lock_start,
diff --git a/drivers/clk/qcom/npucc-kona.c b/drivers/clk/qcom/npucc-kona.c
index 0f29925..2734d9e 100644
--- a/drivers/clk/qcom/npucc-kona.c
+++ b/drivers/clk/qcom/npucc-kona.c
@@ -477,6 +477,10 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "npu_cc_atb_clk",
+			.parent_names = (const char *[]){
+				"qdss_qmp_clk",
+			},
+			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
 	},
diff --git a/drivers/clk/qcom/videocc-kona.c b/drivers/clk/qcom/videocc-kona.c
index ca90ec4..8698403 100644
--- a/drivers/clk/qcom/videocc-kona.c
+++ b/drivers/clk/qcom/videocc-kona.c
@@ -87,9 +87,9 @@
 };
 
 static const struct alpha_pll_config video_pll0_config = {
-	.l = 0x14,
+	.l = 0x25,
 	.cal_l = 0x44,
-	.alpha = 0xD555,
+	.alpha = 0x8000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -121,9 +121,9 @@
 };
 
 static const struct alpha_pll_config video_pll1_config = {
-	.l = 0x14,
+	.l = 0x29,
 	.cal_l = 0x44,
-	.alpha = 0xD555,
+	.alpha = 0xFAAA,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -162,6 +162,7 @@
 		.name = "video_cc_mvs0_div_clk_src",
 		.parent_names = (const char *[]){ "video_cc_mvs0_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -174,6 +175,7 @@
 		.name = "video_cc_mvs0c_div2_div_clk_src",
 		.parent_names = (const char *[]){ "video_cc_mvs0_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -186,6 +188,7 @@
 		.name = "video_cc_mvs1_div_clk_src",
 		.parent_names = (const char *[]){ "video_cc_mvs1_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -198,6 +201,7 @@
 		.name = "video_cc_mvs1c_div2_div_clk_src",
 		.parent_names = (const char *[]){ "video_cc_mvs1_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -223,12 +227,10 @@
 };
 
 static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
-	F(400000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(720000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(1014000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(1098000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(1332000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
-	F(1599000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	{ }
 };
 
@@ -248,22 +250,18 @@
 		.vdd_class = &vdd_mm,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 400000000,
 			[VDD_LOWER] = 720000000,
 			[VDD_LOW] = 1014000000,
 			[VDD_LOW_L1] = 1098000000,
-			[VDD_NOMINAL] = 1332000000,
-			[VDD_HIGH] = 1599000000},
+			[VDD_NOMINAL] = 1332000000},
 	},
 };
 
 static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
-	F(400000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	F(806000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	F(1040000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	F(1098000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	F(1332000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
-	F(1599000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	{ }
 };
 
@@ -283,12 +281,10 @@
 		.vdd_class = &vdd_mm,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 400000000,
 			[VDD_LOWER] = 806000000,
 			[VDD_LOW] = 1040000000,
 			[VDD_LOW_L1] = 1098000000,
-			[VDD_NOMINAL] = 1332000000,
-			[VDD_HIGH] = 1599000000},
+			[VDD_NOMINAL] = 1332000000},
 	},
 };
 
@@ -312,7 +308,7 @@
 		.vdd_class = &vdd_mm,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 32000},
+			[VDD_LOWER] = 32000},
 	},
 };
 
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 67e73fd..69fb3af 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -382,7 +382,7 @@
 	COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
 			RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
 			RK2928_CLKGATE_CON(0), 13, GFLAGS),
-	COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT,
+	COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
 			RK2928_CLKSEL_CON(9), 0,
 			RK2928_CLKGATE_CON(0), 14, GFLAGS,
 			&common_spdif_fracmux),
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 2d5d8b4..c4d0b6f 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -43,7 +43,7 @@
 	/* Read mdiv and fdiv from the fdbck register */
 	reg = readl(socfpgaclk->hw.reg + 0x4);
 	mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
-	vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
+	vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
 
 	return (unsigned long)vco_freq;
 }
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 5b238fc..8281dfb 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -12,17 +12,17 @@
 
 #include "stratix10-clk.h"
 
-static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
-					"f2s_free_clk",};
+static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
+					"f2s-free-clk",};
 static const char * const cntr_mux[] = { "main_pll", "periph_pll",
-					 "osc1", "cb_intosc_hs_div2_clk",
-					 "f2s_free_clk"};
-static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
+					 "osc1", "cb-intosc-hs-div2-clk",
+					 "f2s-free-clk"};
+static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
 
 static const char * const noc_free_mux[] = {"main_noc_base_clk",
 					    "peri_noc_base_clk",
-					    "osc1", "cb_intosc_hs_div2_clk",
-					    "f2s_free_clk"};
+					    "osc1", "cb-intosc-hs-div2-clk",
+					    "f2s-free-clk"};
 
 static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
 static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
@@ -33,14 +33,14 @@
 static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
 static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
 
-static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
+static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
 static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
 static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
 
 static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
 					    "peri_mpu_base_clk",
-					    "osc1", "cb_intosc_hs_div2_clk",
-					    "f2s_free_clk"};
+					    "osc1", "cb-intosc-hs-div2-clk",
+					    "f2s-free-clk"};
 
 /* clocks in AO (always on) controller */
 static const struct stratix10_pll_clock s10_pll_clks[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 13eb5b2..c40d572 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -366,10 +366,10 @@
 static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
 					    "pll-audio-2x", "pll-audio" };
 static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
-			       0x0b0, 16, 2, BIT(31), 0);
+			       0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
 
 static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
-			       0x0b4, 16, 2, BIT(31), 0);
+			       0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
 
 /* TODO: the parent for most of the USB clocks is not known */
 static SUNXI_CCU_GATE(usb_phy0_clk,	"usb-phy0",	"osc24M",
@@ -446,7 +446,7 @@
 static SUNXI_CCU_GATE(ac_dig_clk,	"ac-dig",	"pll-audio",
 		      0x140, BIT(31), CLK_SET_RATE_PARENT);
 static SUNXI_CCU_GATE(ac_dig_4x_clk,	"ac-dig-4x",	"pll-audio-4x",
-		      0x140, BIT(30), 0);
+		      0x140, BIT(30), CLK_SET_RATE_PARENT);
 static SUNXI_CCU_GATE(avs_clk,		"avs",		"osc24M",
 		      0x144, BIT(31), 0);
 
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 4e20733..9e3944f 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -19,6 +19,17 @@
 	unsigned long	m, min_m, max_m;
 };
 
+static unsigned long ccu_nm_calc_rate(unsigned long parent,
+				      unsigned long n, unsigned long m)
+{
+	u64 rate = parent;
+
+	rate *= n;
+	do_div(rate, m);
+
+	return rate;
+}
+
 static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
 			     struct _ccu_nm *nm)
 {
@@ -28,7 +39,8 @@
 
 	for (_n = nm->min_n; _n <= nm->max_n; _n++) {
 		for (_m = nm->min_m; _m <= nm->max_m; _m++) {
-			unsigned long tmp_rate = parent * _n  / _m;
+			unsigned long tmp_rate = ccu_nm_calc_rate(parent,
+								  _n, _m);
 
 			if (tmp_rate > rate)
 				continue;
@@ -100,7 +112,7 @@
 	if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
 		rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
 	else
-		rate = parent_rate * n / m;
+		rate = ccu_nm_calc_rate(parent_rate, n, m);
 
 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
 		rate /= nm->fixed_post_div;
@@ -142,7 +154,7 @@
 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
 
 	ccu_nm_find_best(*parent_rate, rate, &_nm);
-	rate = *parent_rate * _nm.n / _nm.m;
+	rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
 
 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
 		rate /= nm->fixed_post_div;
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 269d359..edc31bb 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -133,9 +133,11 @@
 	struct tegra_dfll_soc_data *soc;
 
 	soc = tegra_dfll_unregister(pdev);
-	if (IS_ERR(soc))
+	if (IS_ERR(soc)) {
 		dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
 			PTR_ERR(soc));
+		return PTR_ERR(soc);
+	}
 
 	tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
 
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ccfb4d9..079f0be 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -367,8 +367,10 @@
 	num_dividers = i;
 
 	tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
-	if (!tmp)
+	if (!tmp) {
+		*table = ERR_PTR(-ENOMEM);
 		return -ENOMEM;
+	}
 
 	valid_div = 0;
 	*width = 0;
@@ -403,6 +405,7 @@
 {
 	struct clk_omap_divider *div;
 	struct clk_omap_reg *reg;
+	int ret;
 
 	if (!setup)
 		return NULL;
@@ -422,6 +425,12 @@
 		div->flags |= CLK_DIVIDER_POWER_OF_TWO;
 
 	div->table = _get_div_table_from_setup(setup, &div->width);
+	if (IS_ERR(div->table)) {
+		ret = PTR_ERR(div->table);
+		kfree(div);
+		return ERR_PTR(ret);
+	}
+
 
 	div->shift = setup->bit_shift;
 	div->latch = -EINVAL;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a11f4ba..316d48d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -290,6 +290,7 @@
 
 config ARC_TIMERS
 	bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
+	depends on GENERIC_SCHED_CLOCK
 	select TIMER_OF
 	help
 	  These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index 20da9b1..b28970c 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -23,6 +23,7 @@
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/sched_clock.h>
 
 #include <soc/arc/timers.h>
 #include <soc/arc/mcip.h>
@@ -88,6 +89,11 @@
 	return (((u64)h) << 32) | l;
 }
 
+static notrace u64 arc_gfrc_clock_read(void)
+{
+	return arc_read_gfrc(NULL);
+}
+
 static struct clocksource arc_counter_gfrc = {
 	.name   = "ARConnect GFRC",
 	.rating = 400,
@@ -111,6 +117,8 @@
 	if (ret)
 		return ret;
 
+	sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
+
 	return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
 }
 TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
@@ -139,6 +147,11 @@
 	return (((u64)h) << 32) | l;
 }
 
+static notrace u64 arc_rtc_clock_read(void)
+{
+	return arc_read_rtc(NULL);
+}
+
 static struct clocksource arc_counter_rtc = {
 	.name   = "ARCv2 RTC",
 	.rating = 350,
@@ -170,6 +183,8 @@
 
 	write_aux_reg(AUX_RTC_CTRL, 1);
 
+	sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
+
 	return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
 }
 TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
@@ -185,6 +200,11 @@
 	return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
 }
 
+static notrace u64 arc_timer1_clock_read(void)
+{
+	return arc_read_timer1(NULL);
+}
+
 static struct clocksource arc_counter_timer1 = {
 	.name   = "ARC Timer1",
 	.rating = 300,
@@ -209,6 +229,8 @@
 	write_aux_reg(ARC_REG_TIMER1_CNT, 0);
 	write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
 
+	sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
+
 	return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
 }
 
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 62d2469..9701107 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -181,8 +181,7 @@
 	int irq;
 	struct clk *clk;
 	unsigned long rate;
-	struct device_node *pri_node;
-	struct device_node *sec_node;
+	struct device_node *alias_node;
 
 	base = of_io_request_and_map(node, 0, "integrator-timer");
 	if (IS_ERR(base))
@@ -204,7 +203,18 @@
 		return err;
 	}
 
-	pri_node = of_find_node_by_path(path);
+	alias_node = of_find_node_by_path(path);
+
+	/*
+	 * The pointer is used as an identifier not as a pointer, we
+	 * can drop the refcount on the of__node immediately after
+	 * getting it.
+	 */
+	of_node_put(alias_node);
+
+	if (node == alias_node)
+		/* The primary timer lacks IRQ, use as clocksource */
+		return integrator_clocksource_init(rate, base);
 
 	err = of_property_read_string(of_aliases,
 				"arm,timer-secondary", &path);
@@ -213,14 +223,11 @@
 		return err;
 	}
 
+	alias_node = of_find_node_by_path(path);
 
-	sec_node = of_find_node_by_path(path);
+	of_node_put(alias_node);
 
-	if (node == pri_node)
-		/* The primary timer lacks IRQ, use as clocksource */
-		return integrator_clocksource_init(rate, base);
-
-	if (node == sec_node) {
+	if (node == alias_node) {
 		/* The secondary timer will drive the clock event */
 		irq = irq_of_parse_and_map(node, 0);
 		return integrator_clockevent_init(rate, base, irq);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index ed5e424..ad48fd5 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -250,6 +250,7 @@
 {
 	struct cn_msg *msg;
 	struct proc_event *ev;
+	struct task_struct *parent;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
 	if (atomic_read(&proc_event_num_listeners) < 1)
@@ -262,8 +263,14 @@
 	ev->what = PROC_EVENT_COREDUMP;
 	ev->event_data.coredump.process_pid = task->pid;
 	ev->event_data.coredump.process_tgid = task->tgid;
-	ev->event_data.coredump.parent_pid = task->real_parent->pid;
-	ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
+
+	rcu_read_lock();
+	if (pid_alive(task)) {
+		parent = rcu_dereference(task->real_parent);
+		ev->event_data.coredump.parent_pid = parent->pid;
+		ev->event_data.coredump.parent_tgid = parent->tgid;
+	}
+	rcu_read_unlock();
 
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
@@ -276,6 +283,7 @@
 {
 	struct cn_msg *msg;
 	struct proc_event *ev;
+	struct task_struct *parent;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
 	if (atomic_read(&proc_event_num_listeners) < 1)
@@ -290,8 +298,14 @@
 	ev->event_data.exit.process_tgid = task->tgid;
 	ev->event_data.exit.exit_code = task->exit_code;
 	ev->event_data.exit.exit_signal = task->exit_signal;
-	ev->event_data.exit.parent_pid = task->real_parent->pid;
-	ev->event_data.exit.parent_tgid = task->real_parent->tgid;
+
+	rcu_read_lock();
+	if (pid_alive(task)) {
+		parent = rcu_dereference(task->real_parent);
+		ev->event_data.exit.parent_pid = parent->pid;
+		ev->event_data.exit.parent_tgid = parent->tgid;
+	}
+	rcu_read_unlock();
 
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2d59e72..5279839 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -26,7 +26,6 @@
 #include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/sched/cpufreq.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
@@ -359,7 +358,7 @@
 		}
 
 		cpufreq_stats_record_transition(policy, freqs->new);
-		cpufreq_times_record_transition(freqs);
+		cpufreq_times_record_transition(policy, freqs->new);
 		policy->cur = freqs->new;
 	}
 }
@@ -556,13 +555,13 @@
  *                          SYSFS INTERFACE                          *
  *********************************************************************/
 static ssize_t show_boost(struct kobject *kobj,
-				 struct attribute *attr, char *buf)
+			  struct kobj_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
 }
 
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
-				  const char *buf, size_t count)
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t count)
 {
 	int ret, enable;
 
@@ -1543,17 +1542,16 @@
 {
 	unsigned int ret_freq = 0;
 
-	if (!cpufreq_driver->get)
+	if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
 		return ret_freq;
 
 	ret_freq = cpufreq_driver->get(policy->cpu);
 
 	/*
-	 * Updating inactive policies is invalid, so avoid doing that.  Also
-	 * if fast frequency switching is used with the given policy, the check
+	 * If fast frequency switching is used with the given policy, the check
 	 * against policy->cur is pointless, so skip it in that case too.
 	 */
-	if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
+	if (policy->fast_switch_enabled)
 		return ret_freq;
 
 	if (ret_freq && policy->cur &&
@@ -1582,10 +1580,7 @@
 
 	if (policy) {
 		down_read(&policy->rwsem);
-
-		if (!policy_is_inactive(policy))
-			ret_freq = __cpufreq_get(policy);
-
+		ret_freq = __cpufreq_get(policy);
 		up_read(&policy->rwsem);
 
 		cpufreq_cpu_put(policy);
@@ -1874,9 +1869,15 @@
 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 					unsigned int target_freq)
 {
+	int ret;
+
 	target_freq = clamp_val(target_freq, policy->min, policy->max);
 
-	return cpufreq_driver->fast_switch(policy, target_freq);
+	ret = cpufreq_driver->fast_switch(policy, target_freq);
+	if (ret)
+		cpufreq_times_record_transition(policy, ret);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
 
@@ -2283,7 +2284,6 @@
 		ret = cpufreq_start_governor(policy);
 		if (!ret) {
 			pr_debug("cpufreq: governor change\n");
-			sched_cpufreq_governor_change(policy, old_gov);
 			return 0;
 		}
 		cpufreq_exit_governor(policy);
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
index a43eeee..2883d67 100644
--- a/drivers/cpufreq/cpufreq_times.c
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -32,11 +32,17 @@
 static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
 static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
 
+struct concurrent_times {
+	atomic64_t active[NR_CPUS];
+	atomic64_t policy[NR_CPUS];
+};
+
 struct uid_entry {
 	uid_t uid;
 	unsigned int max_state;
 	struct hlist_node hash;
 	struct rcu_head rcu;
+	struct concurrent_times *concurrent_times;
 	u64 time_in_state[0];
 };
 
@@ -87,6 +93,7 @@
 static struct uid_entry *find_or_register_uid_locked(uid_t uid)
 {
 	struct uid_entry *uid_entry, *temp;
+	struct concurrent_times *times;
 	unsigned int max_state = READ_ONCE(next_offset);
 	size_t alloc_size = sizeof(*uid_entry) + max_state *
 		sizeof(uid_entry->time_in_state[0]);
@@ -115,9 +122,15 @@
 	uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
 	if (!uid_entry)
 		return NULL;
+	times = kzalloc(sizeof(*times), GFP_ATOMIC);
+	if (!times) {
+		kfree(uid_entry);
+		return NULL;
+	}
 
 	uid_entry->uid = uid;
 	uid_entry->max_state = max_state;
+	uid_entry->concurrent_times = times;
 
 	hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
 
@@ -180,10 +193,12 @@
 
 static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	(*pos)++;
+	do {
+		(*pos)++;
 
-	if (*pos >= HASH_SIZE(uid_hash_table))
-		return NULL;
+		if (*pos >= HASH_SIZE(uid_hash_table))
+			return NULL;
+	} while (hlist_empty(&uid_hash_table[*pos]));
 
 	return &uid_hash_table[*pos];
 }
@@ -207,7 +222,8 @@
 				if (freqs->freq_table[i] ==
 				    CPUFREQ_ENTRY_INVALID)
 					continue;
-				seq_printf(m, " %d", freqs->freq_table[i]);
+				seq_put_decimal_ull(m, " ",
+						    freqs->freq_table[i]);
 			}
 		}
 		seq_putc(m, '\n');
@@ -216,13 +232,16 @@
 	rcu_read_lock();
 
 	hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
-		if (uid_entry->max_state)
-			seq_printf(m, "%d:", uid_entry->uid);
+		if (uid_entry->max_state) {
+			seq_put_decimal_ull(m, "", uid_entry->uid);
+			seq_putc(m, ':');
+		}
 		for (i = 0; i < uid_entry->max_state; ++i) {
+			u64 time;
 			if (freq_index_invalid(i))
 				continue;
-			seq_printf(m, " %lu", (unsigned long)nsec_to_clock_t(
-					   uid_entry->time_in_state[i]));
+			time = nsec_to_clock_t(uid_entry->time_in_state[i]);
+			seq_put_decimal_ull(m, " ", time);
 		}
 		if (uid_entry->max_state)
 			seq_putc(m, '\n');
@@ -232,6 +251,86 @@
 	return 0;
 }
 
+static int concurrent_time_seq_show(struct seq_file *m, void *v,
+	atomic64_t *(*get_times)(struct concurrent_times *))
+{
+	struct uid_entry *uid_entry;
+	int i, num_possible_cpus = num_possible_cpus();
+
+	rcu_read_lock();
+
+	hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
+		atomic64_t *times = get_times(uid_entry->concurrent_times);
+
+		seq_put_decimal_ull(m, "", (u64)uid_entry->uid);
+		seq_putc(m, ':');
+
+		for (i = 0; i < num_possible_cpus; ++i) {
+			u64 time = nsec_to_clock_t(atomic64_read(&times[i]));
+
+			seq_put_decimal_ull(m, " ", time);
+		}
+		seq_putc(m, '\n');
+	}
+
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static inline atomic64_t *get_active_times(struct concurrent_times *times)
+{
+	return times->active;
+}
+
+static int concurrent_active_time_seq_show(struct seq_file *m, void *v)
+{
+	if (v == uid_hash_table) {
+		seq_put_decimal_ull(m, "cpus: ", num_possible_cpus());
+		seq_putc(m, '\n');
+	}
+
+	return concurrent_time_seq_show(m, v, get_active_times);
+}
+
+static inline atomic64_t *get_policy_times(struct concurrent_times *times)
+{
+	return times->policy;
+}
+
+static int concurrent_policy_time_seq_show(struct seq_file *m, void *v)
+{
+	int i;
+	struct cpu_freqs *freqs, *last_freqs = NULL;
+
+	if (v == uid_hash_table) {
+		int cnt = 0;
+
+		for_each_possible_cpu(i) {
+			freqs = all_freqs[i];
+			if (!freqs)
+				continue;
+			if (freqs != last_freqs) {
+				if (last_freqs) {
+					seq_put_decimal_ull(m, ": ", cnt);
+					seq_putc(m, ' ');
+					cnt = 0;
+				}
+				seq_put_decimal_ull(m, "policy", i);
+
+				last_freqs = freqs;
+			}
+			cnt++;
+		}
+		if (last_freqs) {
+			seq_put_decimal_ull(m, ": ", cnt);
+			seq_putc(m, '\n');
+		}
+	}
+
+	return concurrent_time_seq_show(m, v, get_policy_times);
+}
+
 void cpufreq_task_times_init(struct task_struct *p)
 {
 	unsigned long flags;
@@ -326,11 +425,16 @@
 {
 	unsigned long flags;
 	unsigned int state;
+	unsigned int active_cpu_cnt = 0;
+	unsigned int policy_cpu_cnt = 0;
+	unsigned int policy_first_cpu;
 	struct uid_entry *uid_entry;
 	struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+	struct cpufreq_policy *policy;
 	uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
+	int cpu = 0;
 
-	if (!freqs || p->flags & PF_EXITING)
+	if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
 		return;
 
 	state = freqs->offset + READ_ONCE(freqs->last_index);
@@ -346,6 +450,42 @@
 	if (uid_entry && state < uid_entry->max_state)
 		uid_entry->time_in_state[state] += cputime;
 	spin_unlock_irqrestore(&uid_lock, flags);
+
+	rcu_read_lock();
+	uid_entry = find_uid_entry_rcu(uid);
+	if (!uid_entry) {
+		rcu_read_unlock();
+		return;
+	}
+
+	for_each_possible_cpu(cpu)
+		if (!idle_cpu(cpu))
+			++active_cpu_cnt;
+
+	atomic64_add(cputime,
+		     &uid_entry->concurrent_times->active[active_cpu_cnt - 1]);
+
+	policy = cpufreq_cpu_get(task_cpu(p));
+	if (!policy) {
+		/*
+		 * This CPU may have just come up and not have a cpufreq policy
+		 * yet.
+		 */
+		rcu_read_unlock();
+		return;
+	}
+
+	for_each_cpu(cpu, policy->related_cpus)
+		if (!idle_cpu(cpu))
+			++policy_cpu_cnt;
+
+	policy_first_cpu = cpumask_first(policy->related_cpus);
+	cpufreq_cpu_put(policy);
+
+	atomic64_add(cputime,
+		     &uid_entry->concurrent_times->policy[policy_first_cpu +
+							  policy_cpu_cnt - 1]);
+	rcu_read_unlock();
 }
 
 void cpufreq_times_create_policy(struct cpufreq_policy *policy)
@@ -387,6 +527,14 @@
 		all_freqs[cpu] = freqs;
 }
 
+static void uid_entry_reclaim(struct rcu_head *rcu)
+{
+	struct uid_entry *uid_entry = container_of(rcu, struct uid_entry, rcu);
+
+	kfree(uid_entry->concurrent_times);
+	kfree(uid_entry);
+}
+
 void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
 {
 	struct uid_entry *uid_entry;
@@ -400,7 +548,7 @@
 			hash, uid_start) {
 			if (uid_start == uid_entry->uid) {
 				hash_del_rcu(&uid_entry->hash);
-				kfree_rcu(uid_entry, rcu);
+				call_rcu(&uid_entry->rcu, uid_entry_reclaim);
 			}
 		}
 	}
@@ -408,24 +556,17 @@
 	spin_unlock_irqrestore(&uid_lock, flags);
 }
 
-void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+	unsigned int new_freq)
 {
 	int index;
-	struct cpu_freqs *freqs = all_freqs[freq->cpu];
-	struct cpufreq_policy *policy;
-
+	struct cpu_freqs *freqs = all_freqs[policy->cpu];
 	if (!freqs)
 		return;
 
-	policy = cpufreq_cpu_get(freq->cpu);
-	if (!policy)
-		return;
-
-	index = cpufreq_frequency_table_get_index(policy, freq->new);
+	index = cpufreq_frequency_table_get_index(policy, new_freq);
 	if (index >= 0)
 		WRITE_ONCE(freqs->last_index, index);
-
-	cpufreq_cpu_put(policy);
 }
 
 static const struct seq_operations uid_time_in_state_seq_ops = {
@@ -453,11 +594,55 @@
 	.release	= seq_release,
 };
 
+static const struct seq_operations concurrent_active_time_seq_ops = {
+	.start = uid_seq_start,
+	.next = uid_seq_next,
+	.stop = uid_seq_stop,
+	.show = concurrent_active_time_seq_show,
+};
+
+static int concurrent_active_time_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &concurrent_active_time_seq_ops);
+}
+
+static const struct file_operations concurrent_active_time_fops = {
+	.open		= concurrent_active_time_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static const struct seq_operations concurrent_policy_time_seq_ops = {
+	.start = uid_seq_start,
+	.next = uid_seq_next,
+	.stop = uid_seq_stop,
+	.show = concurrent_policy_time_seq_show,
+};
+
+static int concurrent_policy_time_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &concurrent_policy_time_seq_ops);
+}
+
+static const struct file_operations concurrent_policy_time_fops = {
+	.open		= concurrent_policy_time_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
 static int __init cpufreq_times_init(void)
 {
 	proc_create_data("uid_time_in_state", 0444, NULL,
 			 &uid_time_in_state_fops, NULL);
 
+	proc_create_data("uid_concurrent_active_time", 0444, NULL,
+			 &concurrent_active_time_fops, NULL);
+
+	proc_create_data("uid_concurrent_policy_time", 0444, NULL,
+			 &concurrent_policy_time_fops, NULL);
+
 	return 0;
 }
 
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b6a1aad..a005711 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -833,7 +833,7 @@
 /************************** sysfs begin ************************/
 #define show_one(file_name, object)					\
 	static ssize_t show_##file_name					\
-	(struct kobject *kobj, struct attribute *attr, char *buf)	\
+	(struct kobject *kobj, struct kobj_attribute *attr, char *buf)	\
 	{								\
 		return sprintf(buf, "%u\n", global.object);		\
 	}
@@ -842,7 +842,7 @@
 static int intel_pstate_update_status(const char *buf, size_t size);
 
 static ssize_t show_status(struct kobject *kobj,
-			   struct attribute *attr, char *buf)
+			   struct kobj_attribute *attr, char *buf)
 {
 	ssize_t ret;
 
@@ -853,7 +853,7 @@
 	return ret;
 }
 
-static ssize_t store_status(struct kobject *a, struct attribute *b,
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
 			    const char *buf, size_t count)
 {
 	char *p = memchr(buf, '\n', count);
@@ -867,7 +867,7 @@
 }
 
 static ssize_t show_turbo_pct(struct kobject *kobj,
-				struct attribute *attr, char *buf)
+				struct kobj_attribute *attr, char *buf)
 {
 	struct cpudata *cpu;
 	int total, no_turbo, turbo_pct;
@@ -893,7 +893,7 @@
 }
 
 static ssize_t show_num_pstates(struct kobject *kobj,
-				struct attribute *attr, char *buf)
+				struct kobj_attribute *attr, char *buf)
 {
 	struct cpudata *cpu;
 	int total;
@@ -914,7 +914,7 @@
 }
 
 static ssize_t show_no_turbo(struct kobject *kobj,
-			     struct attribute *attr, char *buf)
+			     struct kobj_attribute *attr, char *buf)
 {
 	ssize_t ret;
 
@@ -936,7 +936,7 @@
 	return ret;
 }
 
-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
 			      const char *buf, size_t count)
 {
 	unsigned int input;
@@ -983,7 +983,7 @@
 	return count;
 }
 
-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
 				  const char *buf, size_t count)
 {
 	unsigned int input;
@@ -1013,7 +1013,7 @@
 	return count;
 }
 
-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
 				  const char *buf, size_t count)
 {
 	unsigned int input;
@@ -1045,12 +1045,13 @@
 }
 
 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
-				struct attribute *attr, char *buf)
+				struct kobj_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%u\n", hwp_boost);
 }
 
-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
+static ssize_t store_hwp_dynamic_boost(struct kobject *a,
+				       struct kobj_attribute *b,
 				       const char *buf, size_t count)
 {
 	unsigned int input;
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 9df4413..5e81669 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -11,17 +11,21 @@
 #include <linux/of_platform.h>
 #include <linux/pm_opp.h>
 #include <linux/energy_model.h>
+#include <linux/sched.h>
+#include <linux/cpu_cooling.h>
 
 #define LUT_MAX_ENTRIES			40U
 #define CORE_COUNT_VAL(val)		(((val) & (GENMASK(18, 16))) >> 16)
 #define LUT_ROW_SIZE			4
 #define CLK_HW_DIV			2
+#define CYCLE_CNTR_OFFSET(c, m)		((c - cpumask_first(m) + 1) * 4)
 
 enum {
 	REG_ENABLE,
 	REG_FREQ_LUT_TABLE,
 	REG_VOLT_LUT_TABLE,
 	REG_PERF_STATE,
+	REG_CYCLE_CNTR,
 
 	REG_ARRAY_SIZE,
 };
@@ -35,15 +39,56 @@
 	unsigned long cpu_hw_rate;
 };
 
+struct cpufreq_counter {
+	u64 total_cycle_counter;
+	u32 prev_cycle_counter;
+	spinlock_t lock;
+};
+
 static const u16 cpufreq_qcom_std_offsets[REG_ARRAY_SIZE] = {
 	[REG_ENABLE]		= 0x0,
 	[REG_FREQ_LUT_TABLE]	= 0x100,
 	[REG_VOLT_LUT_TABLE]	= 0x200,
 	[REG_PERF_STATE]	= 0x320,
+	[REG_CYCLE_CNTR]	= 0x3c4,
 };
 
+
+static struct cpufreq_counter qcom_cpufreq_counter[NR_CPUS];
 static struct cpufreq_qcom *qcom_freq_domain_map[NR_CPUS];
 
+static u64 qcom_cpufreq_get_cpu_cycle_counter(int cpu)
+{
+	struct cpufreq_counter *cpu_counter;
+	struct cpufreq_qcom *cpu_domain;
+	u64 cycle_counter_ret;
+	unsigned long flags;
+	u16 offset;
+	u32 val;
+
+	cpu_domain = qcom_freq_domain_map[cpu];
+	cpu_counter = &qcom_cpufreq_counter[cpu];
+	spin_lock_irqsave(&cpu_counter->lock, flags);
+
+	offset = CYCLE_CNTR_OFFSET(cpu, &cpu_domain->related_cpus);
+	val = readl_relaxed(cpu_domain->reg_bases[REG_CYCLE_CNTR] + offset);
+
+	if (val < cpu_counter->prev_cycle_counter) {
+		/* Handle counter overflow */
+		cpu_counter->total_cycle_counter += UINT_MAX -
+			cpu_counter->prev_cycle_counter + val;
+		cpu_counter->prev_cycle_counter = val;
+	} else {
+		cpu_counter->total_cycle_counter += val -
+			cpu_counter->prev_cycle_counter;
+		cpu_counter->prev_cycle_counter = val;
+	}
+	cycle_counter_ret = cpu_counter->total_cycle_counter;
+	spin_unlock_irqrestore(&cpu_counter->lock, flags);
+
+	return cycle_counter_ret;
+}
+
 static int
 qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
 			     unsigned int index)
@@ -134,6 +179,35 @@
 	NULL
 };
 
+static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
+{
+	static struct thermal_cooling_device *cdev[NR_CPUS];
+	struct device_node *np;
+	unsigned int cpu = policy->cpu;
+
+	if (cdev[cpu])
+		return;
+
+	np = of_cpu_device_node_get(cpu);
+	if (WARN_ON(!np))
+		return;
+
+	/*
+	 * For now, just loading the cooling device;
+	 * thermal DT code takes care of matching them.
+	 */
+	if (of_find_property(np, "#cooling-cells", NULL)) {
+		cdev[cpu] = of_cpufreq_cooling_register(policy);
+		if (IS_ERR(cdev[cpu])) {
+			pr_err("running cpufreq for CPU%d without cooling dev: %ld\n",
+			       cpu, PTR_ERR(cdev[cpu]));
+			cdev[cpu] = NULL;
+		}
+	}
+
+	of_node_put(np);
+}
+
 static struct cpufreq_driver cpufreq_qcom_hw_driver = {
 	.flags		= CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
 			  CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
@@ -145,6 +219,7 @@
 	.name		= "qcom-cpufreq-hw",
 	.attr		= qcom_cpufreq_hw_attr,
 	.boost_enabled	= true,
+	.ready		= qcom_cpufreq_ready,
 };
 
 static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
@@ -342,6 +417,9 @@
 static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
 {
 	int rc;
+	struct cpu_cycle_counter_cb cycle_counter_cb = {
+		.get_cpu_cycle_counter = qcom_cpufreq_get_cpu_cycle_counter,
+	};
 
 	/* Get the bases of cpufreq for domains */
 	rc = qcom_resources_init(pdev);
@@ -356,7 +434,14 @@
 		return rc;
 	}
 
+	rc = register_cpu_cycle_counter_cb(&cycle_counter_cb);
+	if (rc) {
+		dev_err(&pdev->dev, "cycle counter cb failed to register\n");
+		return rc;
+	}
+
 	dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 80a7f8d..80001120 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -53,9 +53,9 @@
 	int ret;
 	struct scmi_data *priv = policy->driver_data;
 	struct scmi_perf_ops *perf_ops = handle->perf_ops;
-	u64 freq = policy->freq_table[index].frequency * 1000;
+	u64 freq = policy->freq_table[index].frequency;
 
-	ret = perf_ops->freq_set(handle, priv->domain_id, freq, false);
+	ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
 	if (!ret)
 		arch_set_freq_scale(policy->related_cpus, freq,
 				    policy->cpuinfo.max_freq);
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index db2ede5..b44476a 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -167,6 +167,7 @@
 {
 	int ret;
 	struct device_node *root = of_find_node_by_path("/");
+	const struct of_device_id *match_id;
 
 	if (!root)
 		return -ENODEV;
@@ -174,7 +175,11 @@
 	/*
 	 * Initialize the driver just for a compliant set of machines
 	 */
-	if (!of_match_node(compatible_machine_match, root))
+	match_id = of_match_node(compatible_machine_match, root);
+
+	of_node_put(root);
+
+	if (!match_id)
 		return -ENODEV;
 
 	if (!mcpm_is_available())
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 9e56bc4..74c2479 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -247,7 +247,13 @@
 		return -ENODEV;
 
 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
-		if (lppaca_shared_proc(get_lppaca())) {
+		/*
+		 * Use local_paca instead of get_lppaca() since
+		 * preemption is not disabled, and it is not required in
+		 * fact, since lppaca_ptr does not need to be the value
+		 * associated to the current CPU, it can be from any CPU.
+		 */
+		if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
 			cpuidle_state_table = shared_states;
 			max_idle_state = ARRAY_SIZE(shared_states);
 		} else {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f9374dd..470ff59 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -131,10 +131,6 @@
 	int		interval_ptr;
 };
 
-
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
 static inline int get_loadavg(unsigned long load)
 {
 	return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index a8c4ce0..668cd3e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -681,6 +681,7 @@
 	depends on ARCH_BCM_IPROC
 	depends on MAILBOX
 	default m
+	select CRYPTO_AUTHENC
 	select CRYPTO_DES
 	select CRYPTO_MD5
 	select CRYPTO_SHA1
@@ -761,4 +762,8 @@
 
 source "drivers/crypto/hisilicon/Kconfig"
 
+if ARCH_QCOM
+source drivers/crypto/msm/Kconfig
+endif
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9cd4c0c..e2ca339 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -21,7 +21,7 @@
 obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
 obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
 n2_crypto-y := n2_core.o n2_asm.o
-obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += msm/
 obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
 obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
 obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 2d1f1db..cd46463 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -2845,44 +2845,28 @@
 	struct spu_hw *spu = &iproc_priv.spu;
 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
-	struct rtattr *rta = (void *)key;
-	struct crypto_authenc_key_param *param;
-	const u8 *origkey = key;
-	const unsigned int origkeylen = keylen;
-
-	int ret = 0;
+	struct crypto_authenc_keys keys;
+	int ret;
 
 	flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
 		 keylen);
 	flow_dump("  key: ", key, keylen);
 
-	if (!RTA_OK(rta, keylen))
-		goto badkey;
-	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
-		goto badkey;
-	if (RTA_PAYLOAD(rta) < sizeof(*param))
+	ret = crypto_authenc_extractkeys(&keys, key, keylen);
+	if (ret)
 		goto badkey;
 
-	param = RTA_DATA(rta);
-	ctx->enckeylen = be32_to_cpu(param->enckeylen);
-
-	key += RTA_ALIGN(rta->rta_len);
-	keylen -= RTA_ALIGN(rta->rta_len);
-
-	if (keylen < ctx->enckeylen)
-		goto badkey;
-	if (ctx->enckeylen > MAX_KEY_SIZE)
+	if (keys.enckeylen > MAX_KEY_SIZE ||
+	    keys.authkeylen > MAX_KEY_SIZE)
 		goto badkey;
 
-	ctx->authkeylen = keylen - ctx->enckeylen;
+	ctx->enckeylen = keys.enckeylen;
+	ctx->authkeylen = keys.authkeylen;
 
-	if (ctx->authkeylen > MAX_KEY_SIZE)
-		goto badkey;
-
-	memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
+	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
 	/* May end up padding auth key. So make sure it's zeroed. */
 	memset(ctx->authkey, 0, sizeof(ctx->authkey));
-	memcpy(ctx->authkey, key, ctx->authkeylen);
+	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
 
 	switch (ctx->alg->cipher_info.alg) {
 	case CIPHER_ALG_DES:
@@ -2890,7 +2874,7 @@
 			u32 tmp[DES_EXPKEY_WORDS];
 			u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
 
-			if (des_ekey(tmp, key) == 0) {
+			if (des_ekey(tmp, keys.enckey) == 0) {
 				if (crypto_aead_get_flags(cipher) &
 				    CRYPTO_TFM_REQ_WEAK_KEY) {
 					crypto_aead_set_flags(cipher, flags);
@@ -2905,7 +2889,7 @@
 		break;
 	case CIPHER_ALG_3DES:
 		if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
-			const u32 *K = (const u32 *)key;
+			const u32 *K = (const u32 *)keys.enckey;
 			u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
 
 			if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -2956,9 +2940,7 @@
 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 		ctx->fallback_cipher->base.crt_flags |=
 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
-		ret =
-		    crypto_aead_setkey(ctx->fallback_cipher, origkey,
-				       origkeylen);
+		ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
 		if (ret) {
 			flow_log("  fallback setkey() returned:%d\n", ret);
 			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 43975ab..f84ca2f 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1131,13 +1131,16 @@
 
 	desc = edesc->hw_desc;
 
-	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, state->buf_dma)) {
-		dev_err(jrdev, "unable to map src\n");
-		goto unmap;
-	}
+	if (buflen) {
+		state->buf_dma = dma_map_single(jrdev, buf, buflen,
+						DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, state->buf_dma)) {
+			dev_err(jrdev, "unable to map src\n");
+			goto unmap;
+		}
 
-	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+	}
 
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
index 2ae6124..5d54ebc 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
@@ -73,7 +73,7 @@
 static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
 {
 	struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
-	void *fctx;
+	struct crypto_ctx_hdr *chdr;
 
 	/* get the first device */
 	nctx->ndev = nitrox_get_first_device();
@@ -81,12 +81,14 @@
 		return -ENODEV;
 
 	/* allocate nitrox crypto context */
-	fctx = crypto_alloc_context(nctx->ndev);
-	if (!fctx) {
+	chdr = crypto_alloc_context(nctx->ndev);
+	if (!chdr) {
 		nitrox_put_device(nctx->ndev);
 		return -ENOMEM;
 	}
-	nctx->u.ctx_handle = (uintptr_t)fctx;
+	nctx->chdr = chdr;
+	nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
+					 sizeof(struct ctx_hdr));
 	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
 				    sizeof(struct nitrox_kcrypt_request));
 	return 0;
@@ -102,7 +104,7 @@
 
 		memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
 		memset(&fctx->auth, 0, sizeof(struct auth_keys));
-		crypto_free_context((void *)fctx);
+		crypto_free_context((void *)nctx->chdr);
 	}
 	nitrox_put_device(nctx->ndev);
 
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4d31df0..28baf1a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -146,20 +146,31 @@
 void *crypto_alloc_context(struct nitrox_device *ndev)
 {
 	struct ctx_hdr *ctx;
+	struct crypto_ctx_hdr *chdr;
 	void *vaddr;
 	dma_addr_t dma;
 
-	vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
-	if (!vaddr)
+	chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
+	if (!chdr)
 		return NULL;
 
+	vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
+	if (!vaddr) {
+		kfree(chdr);
+		return NULL;
+	}
+
 	/* fill meta data */
 	ctx = vaddr;
 	ctx->pool = ndev->ctx_pool;
 	ctx->dma = dma;
 	ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
 
-	return ((u8 *)vaddr + sizeof(struct ctx_hdr));
+	chdr->pool = ndev->ctx_pool;
+	chdr->dma = dma;
+	chdr->vaddr = vaddr;
+
+	return chdr;
 }
 
 /**
@@ -168,13 +179,14 @@
  */
 void crypto_free_context(void *ctx)
 {
-	struct ctx_hdr *ctxp;
+	struct crypto_ctx_hdr *ctxp;
 
 	if (!ctx)
 		return;
 
-	ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
-	dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
+	ctxp = ctx;
+	dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
+	kfree(ctxp);
 }
 
 /**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index d091b6f..19f0a20 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -181,12 +181,19 @@
 	struct auth_keys auth;
 };
 
+struct crypto_ctx_hdr {
+	struct dma_pool *pool;
+	dma_addr_t dma;
+	void *vaddr;
+};
+
 struct nitrox_crypto_ctx {
 	struct nitrox_device *ndev;
 	union {
 		u64 ctx_handle;
 		struct flexi_crypto_context *fctx;
 	} u;
+	struct crypto_ctx_hdr *chdr;
 };
 
 struct nitrox_kcrypt_request {
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 01b82b8..5852d29 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -540,13 +540,12 @@
 			  unsigned int keylen)
 {
 	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct rtattr *rta = (struct rtattr *)key;
 	struct cc_crypto_req cc_req = {};
-	struct crypto_authenc_key_param *param;
 	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
-	int rc = -EINVAL;
 	unsigned int seq_len = 0;
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	const u8 *enckey, *authkey;
+	int rc;
 
 	dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
 		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@@ -554,35 +553,33 @@
 	/* STAT_PHASE_0: Init and sanity checks */
 
 	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
-		if (!RTA_OK(rta, keylen))
+		struct crypto_authenc_keys keys;
+
+		rc = crypto_authenc_extractkeys(&keys, key, keylen);
+		if (rc)
 			goto badkey;
-		if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
-			goto badkey;
-		if (RTA_PAYLOAD(rta) < sizeof(*param))
-			goto badkey;
-		param = RTA_DATA(rta);
-		ctx->enc_keylen = be32_to_cpu(param->enckeylen);
-		key += RTA_ALIGN(rta->rta_len);
-		keylen -= RTA_ALIGN(rta->rta_len);
-		if (keylen < ctx->enc_keylen)
-			goto badkey;
-		ctx->auth_keylen = keylen - ctx->enc_keylen;
+		enckey = keys.enckey;
+		authkey = keys.authkey;
+		ctx->enc_keylen = keys.enckeylen;
+		ctx->auth_keylen = keys.authkeylen;
 
 		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 			/* the nonce is stored in bytes at end of key */
+			rc = -EINVAL;
 			if (ctx->enc_keylen <
 			    (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 				goto badkey;
 			/* Copy nonce from last 4 bytes in CTR key to
 			 *  first 4 bytes in CTR IV
 			 */
-			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
-			       ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
-			       CTR_RFC3686_NONCE_SIZE);
+			memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
+			       CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
 			/* Set CTR key size */
 			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 		}
 	} else { /* non-authenc - has just one key */
+		enckey = key;
+		authkey = NULL;
 		ctx->enc_keylen = keylen;
 		ctx->auth_keylen = 0;
 	}
@@ -594,13 +591,14 @@
 	/* STAT_PHASE_1: Copy key to ctx */
 
 	/* Get key material */
-	memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+	memcpy(ctx->enckey, enckey, ctx->enc_keylen);
 	if (ctx->enc_keylen == 24)
 		memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
 	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
-		memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+		memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
+		       ctx->auth_keylen);
 	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
-		rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+		rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
 		if (rc)
 			goto badkey;
 	}
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 461b97e..1ff8738 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -303,7 +303,10 @@
 
 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
 {
-	int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+	int hdrlen;
+
+	hdrlen = sizeof(struct fw_ulptx_wr) +
+		 sizeof(struct chcr_ipsec_req) + kctx_len;
 
 	hdrlen += sizeof(struct cpl_tx_pkt);
 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6988012..f4f3e9a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1361,23 +1361,18 @@
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	bool is_sec1 = has_ftr_sec1(priv);
 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
-	void *err;
 
 	if (cryptlen + authsize > max_len) {
 		dev_err(dev, "length exceeds h/w max limit\n");
 		return ERR_PTR(-EINVAL);
 	}
 
-	if (ivsize)
-		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
-
 	if (!dst || dst == src) {
 		src_len = assoclen + cryptlen + authsize;
 		src_nents = sg_nents_for_len(src, src_len);
 		if (src_nents < 0) {
 			dev_err(dev, "Invalid number of src SG.\n");
-			err = ERR_PTR(-EINVAL);
-			goto error_sg;
+			return ERR_PTR(-EINVAL);
 		}
 		src_nents = (src_nents == 1) ? 0 : src_nents;
 		dst_nents = dst ? src_nents : 0;
@@ -1387,16 +1382,14 @@
 		src_nents = sg_nents_for_len(src, src_len);
 		if (src_nents < 0) {
 			dev_err(dev, "Invalid number of src SG.\n");
-			err = ERR_PTR(-EINVAL);
-			goto error_sg;
+			return ERR_PTR(-EINVAL);
 		}
 		src_nents = (src_nents == 1) ? 0 : src_nents;
 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
 		dst_nents = sg_nents_for_len(dst, dst_len);
 		if (dst_nents < 0) {
 			dev_err(dev, "Invalid number of dst SG.\n");
-			err = ERR_PTR(-EINVAL);
-			goto error_sg;
+			return ERR_PTR(-EINVAL);
 		}
 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
 	}
@@ -1423,11 +1416,14 @@
 	/* if its a ahash, add space for a second desc next to the first one */
 	if (is_sec1 && !dst)
 		alloc_len += sizeof(struct talitos_desc);
+	alloc_len += ivsize;
 
 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
-	if (!edesc) {
-		err = ERR_PTR(-ENOMEM);
-		goto error_sg;
+	if (!edesc)
+		return ERR_PTR(-ENOMEM);
+	if (ivsize) {
+		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
+		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
 	}
 	memset(&edesc->desc, 0, sizeof(edesc->desc));
 
@@ -1445,10 +1441,6 @@
 						     DMA_BIDIRECTIONAL);
 	}
 	return edesc;
-error_sg:
-	if (iv_dma)
-		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
-	return err;
 }
 
 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index d2663a4..a92a66b 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -556,7 +556,7 @@
 		desc = dmaengine_prep_slave_sg(channel,
 				ctx->device->dma.sg_src,
 				ctx->device->dma.sg_src_len,
-				direction, DMA_CTRL_ACK);
+				DMA_MEM_TO_DEV, DMA_CTRL_ACK);
 		break;
 
 	case DMA_FROM_DEVICE:
@@ -580,7 +580,7 @@
 		desc = dmaengine_prep_slave_sg(channel,
 				ctx->device->dma.sg_dst,
 				ctx->device->dma.sg_dst_len,
-				direction,
+				DMA_DEV_TO_MEM,
 				DMA_CTRL_ACK |
 				DMA_PREP_INTERRUPT);
 
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 633321a..a0bb8a6 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -166,7 +166,7 @@
 		__func__);
 	desc = dmaengine_prep_slave_sg(channel,
 			ctx->device->dma.sg, ctx->device->dma.sg_len,
-			direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+			DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
 	if (!desc) {
 		dev_err(ctx->device->dev,
 			"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 99e2aac..2c1f459 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -48,9 +48,8 @@
 	percpu_ref_exit(ref);
 }
 
-static void dax_pmem_percpu_kill(void *data)
+static void dax_pmem_percpu_kill(struct percpu_ref *ref)
 {
-	struct percpu_ref *ref = data;
 	struct dax_pmem *dax_pmem = to_dax_pmem(ref);
 
 	dev_dbg(dax_pmem->dev, "trace\n");
@@ -112,17 +111,10 @@
 	}
 
 	dax_pmem->pgmap.ref = &dax_pmem->ref;
+	dax_pmem->pgmap.kill = dax_pmem_percpu_kill;
 	addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
-	if (IS_ERR(addr)) {
-		devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
-		percpu_ref_exit(&dax_pmem->ref);
+	if (IS_ERR(addr))
 		return PTR_ERR(addr);
-	}
-
-	rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
-							&dax_pmem->ref);
-	if (rc)
-		return rc;
 
 	/* adjust the dax_region resource to the start of data */
 	memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index 8b74ecc..a0ac20b 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -580,15 +580,16 @@
 		WARN(1, "Invalid\n");
 		return 0;
 	case MON2:
-		count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
+		count = readl_relaxed(MON2_ZONE_MAX(m, zone));
 		break;
 	case MON3:
 		count = readl_relaxed(MON3_ZONE_MAX(m, zone));
-		if (count)
-			count++;
 		break;
 	}
 
+	if (count)
+		count++;
+
 	return count;
 }
 
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 9ccb000..2ebf83f 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -72,28 +72,28 @@
 	return ERR_PTR(-ENODEV);
 }
 
-static unsigned long find_available_min_freq(struct devfreq *devfreq)
+static long find_available_min_freq(struct devfreq *devfreq)
 {
 	struct dev_pm_opp *opp;
-	unsigned long min_freq = 0;
+	long min_freq = 0;
 
 	opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
 	if (IS_ERR(opp))
-		min_freq = 0;
+		min_freq = PTR_ERR(opp);
 	else
 		dev_pm_opp_put(opp);
 
 	return min_freq;
 }
 
-static unsigned long find_available_max_freq(struct devfreq *devfreq)
+static long find_available_max_freq(struct devfreq *devfreq)
 {
 	struct dev_pm_opp *opp;
-	unsigned long max_freq = ULONG_MAX;
+	long max_freq = LONG_MAX;
 
 	opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
 	if (IS_ERR(opp))
-		max_freq = 0;
+		max_freq = PTR_ERR(opp);
 	else
 		dev_pm_opp_put(opp);
 
@@ -494,20 +494,25 @@
 {
 	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
 	int ret;
+	long freq;
 
 	mutex_lock(&devfreq->lock);
 
-	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
-	if (!devfreq->scaling_min_freq) {
+	freq = find_available_min_freq(devfreq);
+	if (freq < 0) {
+		devfreq->scaling_min_freq = 0;
 		mutex_unlock(&devfreq->lock);
 		return -EINVAL;
 	}
+	devfreq->scaling_min_freq = freq;
 
-	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
-	if (!devfreq->scaling_max_freq) {
+	freq = find_available_max_freq(devfreq);
+	if (freq < 0) {
+		devfreq->scaling_max_freq = 0;
 		mutex_unlock(&devfreq->lock);
 		return -EINVAL;
 	}
+	devfreq->scaling_max_freq = freq;
 
 	ret = update_devfreq(devfreq);
 	mutex_unlock(&devfreq->lock);
@@ -562,6 +567,7 @@
 	struct devfreq *devfreq;
 	struct devfreq_governor *governor;
 	int err = 0;
+	long freq;
 
 	if (!dev || !profile || !governor_name) {
 		dev_err(dev, "%s: Invalid parameters.\n", __func__);
@@ -606,21 +612,21 @@
 		mutex_lock(&devfreq->lock);
 	}
 
-	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
-	if (!devfreq->scaling_min_freq) {
+	freq = find_available_min_freq(devfreq);
+	if (freq < 0) {
 		mutex_unlock(&devfreq->lock);
 		err = -EINVAL;
 		goto err_dev;
 	}
-	devfreq->min_freq = devfreq->scaling_min_freq;
+	devfreq->min_freq = devfreq->scaling_min_freq = freq;
 
-	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
-	if (!devfreq->scaling_max_freq) {
+	freq = find_available_max_freq(devfreq);
+	if (freq < 0) {
 		mutex_unlock(&devfreq->lock);
 		err = -EINVAL;
 		goto err_dev;
 	}
-	devfreq->max_freq = devfreq->scaling_max_freq;
+	devfreq->max_freq = devfreq->scaling_max_freq = freq;
 
 	dev_set_name(&devfreq->dev, "%s", dev_name(dev));
 	err = device_register(&devfreq->dev);
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index ed3b785..0dece83 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -30,4 +30,15 @@
 	  WARNING: improper use of this can result in deadlocking kernel
 	  drivers from userspace. Intended for test and debug only.
 
+config DEBUG_DMA_BUF_REF
+	bool "DEBUG Reference Count"
+	depends on STACKDEPOT
+	depends on DMA_SHARED_BUFFER
+	default n
+	help
+	  Save stack traces for every call to dma_buf_get and dma_buf_put, to
+	  help debug memory leaks. Potential leaks may be found by manually
+	  matching the get/put call stacks.  This feature consumes extra memory
+	  in order to save the stack traces using STACKDEPOT.
+
 endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index c33bf88..dcbc33f 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,4 @@
 obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
+obj-$(CONFIG_DEBUG_DMA_BUF_REF)	+= dma-buf-ref.o
diff --git a/drivers/dma-buf/dma-buf-ref.c b/drivers/dma-buf/dma-buf-ref.c
new file mode 100644
index 0000000..6298574
--- /dev/null
+++ b/drivers/dma-buf/dma-buf-ref.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/stackdepot.h>
+#include <linux/stacktrace.h>
+#include <linux/seq_file.h>
+
+#define DMA_BUF_STACK_DEPTH (16)
+
+struct dma_buf_ref {
+	struct list_head list;
+	depot_stack_handle_t handle;
+	int count;
+};
+
+void dma_buf_ref_init(struct dma_buf *dmabuf)
+{
+	INIT_LIST_HEAD(&dmabuf->refs);
+}
+
+void dma_buf_ref_destroy(struct dma_buf *dmabuf)
+{
+	struct dma_buf_ref *r, *n;
+
+	mutex_lock(&dmabuf->lock);
+	list_for_each_entry_safe(r, n, &dmabuf->refs, list) {
+		list_del(&r->list);
+		kfree(r);
+	}
+	mutex_unlock(&dmabuf->lock);
+}
+
+static void dma_buf_ref_insert_handle(struct dma_buf *dmabuf,
+				      depot_stack_handle_t handle,
+				      int count)
+{
+	struct dma_buf_ref *r;
+
+	mutex_lock(&dmabuf->lock);
+	list_for_each_entry(r, &dmabuf->refs, list) {
+		if (r->handle == handle) {
+			r->count += count;
+			goto out;
+		}
+	}
+
+	r = kzalloc(sizeof(*r), GFP_KERNEL);
+	if (!r)
+		goto out;
+
+	INIT_LIST_HEAD(&r->list);
+	r->handle = handle;
+	r->count = count;
+	list_add(&r->list, &dmabuf->refs);
+
+out:
+	mutex_unlock(&dmabuf->lock);
+}
+
+void dma_buf_ref_mod(struct dma_buf *dmabuf, int nr)
+{
+	unsigned long entries[DMA_BUF_STACK_DEPTH];
+	struct stack_trace trace = {
+		.nr_entries = 0,
+		.entries = entries,
+		.max_entries = DMA_BUF_STACK_DEPTH,
+		.skip = 1
+	};
+	depot_stack_handle_t handle;
+
+	save_stack_trace(&trace);
+	if (trace.nr_entries != 0 &&
+	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
+		trace.nr_entries--;
+
+	handle = depot_save_stack(&trace, GFP_KERNEL);
+	if (!handle)
+		return;
+
+	dma_buf_ref_insert_handle(dmabuf, handle, nr);
+}
+
+/**
+ * Called with dmabuf->lock held
+ */
+int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf)
+{
+	char *buf;
+	struct dma_buf_ref *ref;
+	int count = 0;
+	struct stack_trace trace;
+
+	buf = (void *)__get_free_page(GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	list_for_each_entry(ref, &dmabuf->refs, list) {
+		count += ref->count;
+
+		seq_printf(s, "References: %d\n", ref->count);
+		depot_fetch_stack(ref->handle, &trace);
+		snprint_stack_trace(buf, PAGE_SIZE, &trace, 0);
+		seq_puts(s, buf);
+		seq_putc(s, '\n');
+	}
+
+	seq_printf(s, "Total references: %d\n\n\n", count);
+	free_page((unsigned long)buf);
+
+	return 0;
+}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5b0c24f..db82aae 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -36,6 +36,9 @@
 #include <linux/mm.h>
 #include <linux/kernel.h>
 #include <linux/atomic.h>
+#include <linux/sched/signal.h>
+#include <linux/fdtable.h>
+#include <linux/list_sort.h>
 
 #include <uapi/linux/dma-buf.h>
 
@@ -48,6 +51,19 @@
 	struct mutex lock;
 };
 
+struct dma_info {
+	struct dma_buf *dmabuf;
+	struct list_head head;
+};
+
+struct dma_proc {
+	char name[TASK_COMM_LEN];
+	pid_t pid;
+	size_t size;
+	struct list_head dma_bufs;
+	struct list_head head;
+};
+
 static struct dma_buf_list db_list;
 
 static int dma_buf_release(struct inode *inode, struct file *file)
@@ -71,12 +87,14 @@
 	 */
 	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
 
-	dmabuf->ops->release(dmabuf);
-
 	mutex_lock(&db_list.lock);
 	list_del(&dmabuf->list_node);
 	mutex_unlock(&db_list.lock);
 
+	dmabuf->ops->release(dmabuf);
+
+	dma_buf_ref_destroy(dmabuf);
+
 	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
 		reservation_object_fini(dmabuf->resv);
 
@@ -457,6 +475,7 @@
 	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 	dmabuf->name = bufname;
+	dmabuf->ktime = ktime_get();
 
 	if (!resv) {
 		resv = (struct reservation_object *)&dmabuf[1];
@@ -477,6 +496,9 @@
 	mutex_init(&dmabuf->lock);
 	INIT_LIST_HEAD(&dmabuf->attachments);
 
+	dma_buf_ref_init(dmabuf);
+	dma_buf_ref_mod(dmabuf, 1);
+
 	mutex_lock(&db_list.lock);
 	list_add(&dmabuf->list_node, &db_list.head);
 	mutex_unlock(&db_list.lock);
@@ -538,6 +560,7 @@
 		fput(file);
 		return ERR_PTR(-EINVAL);
 	}
+	dma_buf_ref_mod(file->private_data, 1);
 
 	return file->private_data;
 }
@@ -558,6 +581,7 @@
 	if (WARN_ON(!dmabuf || !dmabuf->file))
 		return;
 
+	dma_buf_ref_mod(dmabuf, -1);
 	fput(dmabuf->file);
 }
 EXPORT_SYMBOL_GPL(dma_buf_put);
@@ -1203,6 +1227,8 @@
 		seq_printf(s, "Total %d devices attached\n\n",
 				attach_count);
 
+		dma_buf_ref_show(s, buf_obj);
+
 		count++;
 		size += buf_obj->size;
 		mutex_unlock(&buf_obj->lock);
@@ -1226,6 +1252,157 @@
 	.release        = single_release,
 };
 
+static bool list_contains(struct list_head *list, struct dma_buf *info)
+{
+	struct dma_info *curr;
+
+	list_for_each_entry(curr, list, head)
+		if (curr->dmabuf == info)
+			return true;
+
+	return false;
+}
+
+static int get_dma_info(const void *data, struct file *file, unsigned int n)
+{
+	struct dma_proc *dma_proc;
+	struct dma_info *dma_info;
+
+	dma_proc = (struct dma_proc *)data;
+	if (!is_dma_buf_file(file))
+		return 0;
+
+	if (list_contains(&dma_proc->dma_bufs, file->private_data))
+		return 0;
+
+	dma_info = kzalloc(sizeof(*dma_info), GFP_ATOMIC);
+	if (!dma_info)
+		return -ENOMEM;
+
+	get_file(file);
+	dma_info->dmabuf = file->private_data;
+	dma_proc->size += dma_info->dmabuf->size / SZ_1K;
+	list_add(&dma_info->head, &dma_proc->dma_bufs);
+	return 0;
+}
+
+static void write_proc(struct seq_file *s, struct dma_proc *proc)
+{
+	struct dma_info *tmp;
+
+	seq_printf(s, "\n%s (PID %ld) size: %ld\nDMA Buffers:\n",
+		proc->name, proc->pid, proc->size);
+	seq_printf(s, "%-8s\t%-8s\t%-8s\n",
+		"Name", "Size (KB)", "Time Alive (sec)");
+
+	list_for_each_entry(tmp, &proc->dma_bufs, head) {
+		struct dma_buf *dmabuf = tmp->dmabuf;
+		ktime_t elapmstime = ktime_ms_delta(ktime_get(), dmabuf->ktime);
+
+		elapmstime = ktime_divns(elapmstime, MSEC_PER_SEC);
+		seq_printf(s, "%-8s\t%-8ld\t%-8ld\n",
+				dmabuf->name,
+				dmabuf->size / SZ_1K,
+				elapmstime);
+	}
+}
+
+static void free_proc(struct dma_proc *proc)
+{
+	struct dma_info *tmp, *n;
+
+	list_for_each_entry_safe(tmp, n, &proc->dma_bufs, head) {
+		dma_buf_put(tmp->dmabuf);
+		list_del(&tmp->head);
+		kfree(tmp);
+	}
+	kfree(proc);
+}
+
+static int dmacmp(void *unused, struct list_head *a, struct list_head *b)
+{
+	struct dma_info *a_buf, *b_buf;
+
+	a_buf = list_entry(a, struct dma_info, head);
+	b_buf = list_entry(b, struct dma_info, head);
+	return b_buf->dmabuf->size - a_buf->dmabuf->size;
+}
+
+static int proccmp(void *unused, struct list_head *a, struct list_head *b)
+{
+	struct dma_proc *a_proc, *b_proc;
+
+	a_proc = list_entry(a, struct dma_proc, head);
+	b_proc = list_entry(b, struct dma_proc, head);
+	return b_proc->size - a_proc->size;
+}
+
+static int dma_procs_debug_show(struct seq_file *s, void *unused)
+{
+	struct task_struct *task, *thread;
+	struct files_struct *files;
+	int ret = 0;
+	struct dma_proc *tmp, *n;
+	LIST_HEAD(plist);
+
+	read_lock(&tasklist_lock);
+	for_each_process(task) {
+		struct files_struct *group_leader_files = NULL;
+
+		tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+		if (!tmp) {
+			ret = -ENOMEM;
+			read_unlock(&tasklist_lock);
+			goto mem_err;
+		}
+		INIT_LIST_HEAD(&tmp->dma_bufs);
+		for_each_thread(task, thread) {
+			task_lock(thread);
+			if (unlikely(!group_leader_files))
+				group_leader_files = task->group_leader->files;
+			files = thread->files;
+			if (files && (group_leader_files != files ||
+				      thread == task->group_leader))
+				ret = iterate_fd(files, 0, get_dma_info, tmp);
+			task_unlock(thread);
+		}
+		if (ret || list_empty(&tmp->dma_bufs))
+			goto skip;
+		list_sort(NULL, &tmp->dma_bufs, dmacmp);
+		get_task_comm(tmp->name, task);
+		tmp->pid = task->tgid;
+		list_add(&tmp->head, &plist);
+		continue;
+skip:
+		free_proc(tmp);
+	}
+	read_unlock(&tasklist_lock);
+
+	list_sort(NULL, &plist, proccmp);
+	list_for_each_entry(tmp, &plist, head)
+		write_proc(s, tmp);
+
+	ret = 0;
+mem_err:
+	list_for_each_entry_safe(tmp, n, &plist, head) {
+		list_del(&tmp->head);
+		free_proc(tmp);
+	}
+	return ret;
+}
+
+static int dma_procs_debug_open(struct inode *f_inode, struct file *file)
+{
+	return single_open(file, dma_procs_debug_show, NULL);
+}
+
+static const struct file_operations dma_procs_debug_fops = {
+	.open           = dma_procs_debug_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release
+};
+
 static struct dentry *dma_buf_debugfs_dir;
 
 static int dma_buf_init_debugfs(void)
@@ -1246,6 +1423,17 @@
 		debugfs_remove_recursive(dma_buf_debugfs_dir);
 		dma_buf_debugfs_dir = NULL;
 		err = PTR_ERR(d);
+		return err;
+	}
+
+	d = debugfs_create_file("dmaprocs", 0444, dma_buf_debugfs_dir,
+				NULL, &dma_procs_debug_fops);
+
+	if (IS_ERR(d)) {
+		pr_debug("dma_buf: debugfs: failed to create node dmaprocs\n");
+		debugfs_remove_recursive(dma_buf_debugfs_dir);
+		dma_buf_debugfs_dir = NULL;
+		err = PTR_ERR(d);
 	}
 
 	return err;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4bf7256..a75b95f 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -203,6 +203,7 @@
 	u32				save_cim;
 	u32				save_cnda;
 	u32				save_cndc;
+	u32				irq_status;
 	unsigned long			status;
 	struct tasklet_struct		tasklet;
 	struct dma_slave_config		sconfig;
@@ -1580,8 +1581,8 @@
 	struct at_xdmac_desc	*desc;
 	u32			error_mask;
 
-	dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
-		 __func__, atchan->status);
+	dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
+		__func__, atchan->irq_status);
 
 	error_mask = AT_XDMAC_CIS_RBEIS
 		     | AT_XDMAC_CIS_WBEIS
@@ -1589,15 +1590,15 @@
 
 	if (at_xdmac_chan_is_cyclic(atchan)) {
 		at_xdmac_handle_cyclic(atchan);
-	} else if ((atchan->status & AT_XDMAC_CIS_LIS)
-		   || (atchan->status & error_mask)) {
+	} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
+		   || (atchan->irq_status & error_mask)) {
 		struct dma_async_tx_descriptor  *txd;
 
-		if (atchan->status & AT_XDMAC_CIS_RBEIS)
+		if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
 			dev_err(chan2dev(&atchan->chan), "read bus error!!!");
-		if (atchan->status & AT_XDMAC_CIS_WBEIS)
+		if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
 			dev_err(chan2dev(&atchan->chan), "write bus error!!!");
-		if (atchan->status & AT_XDMAC_CIS_ROIS)
+		if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
 			dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
 
 		spin_lock_bh(&atchan->lock);
@@ -1652,7 +1653,7 @@
 			atchan = &atxdmac->chan[i];
 			chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
 			chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
-			atchan->status = chan_status & chan_imr;
+			atchan->irq_status = chan_status & chan_imr;
 			dev_vdbg(atxdmac->dma.dev,
 				 "%s: chan%d: imr=0x%x, status=0x%x\n",
 				 __func__, i, chan_imr, chan_status);
@@ -1666,7 +1667,7 @@
 				 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
 				 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
 
-			if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
+			if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
 				at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
 
 			tasklet_schedule(&atchan->tasklet);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 847f84a..2b11d96 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -415,38 +415,32 @@
 	}
 }
 
-static int bcm2835_dma_abort(void __iomem *chan_base)
+static int bcm2835_dma_abort(struct bcm2835_chan *c)
 {
-	unsigned long cs;
+	void __iomem *chan_base = c->chan_base;
 	long int timeout = 10000;
 
-	cs = readl(chan_base + BCM2835_DMA_CS);
-	if (!(cs & BCM2835_DMA_ACTIVE))
+	/*
+	 * A zero control block address means the channel is idle.
+	 * (The ACTIVE flag in the CS register is not a reliable indicator.)
+	 */
+	if (!readl(chan_base + BCM2835_DMA_ADDR))
 		return 0;
 
 	/* Write 0 to the active bit - Pause the DMA */
 	writel(0, chan_base + BCM2835_DMA_CS);
 
 	/* Wait for any current AXI transfer to complete */
-	while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+	while ((readl(chan_base + BCM2835_DMA_CS) &
+		BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
 		cpu_relax();
-		cs = readl(chan_base + BCM2835_DMA_CS);
-	}
 
-	/* We'll un-pause when we set of our next DMA */
+	/* Peripheral might be stuck and fail to signal AXI write responses */
 	if (!timeout)
-		return -ETIMEDOUT;
+		dev_err(c->vc.chan.device->dev,
+			"failed to complete outstanding writes\n");
 
-	if (!(cs & BCM2835_DMA_ACTIVE))
-		return 0;
-
-	/* Terminate the control block chain */
-	writel(0, chan_base + BCM2835_DMA_NEXTCB);
-
-	/* Abort the whole DMA */
-	writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
-	       chan_base + BCM2835_DMA_CS);
-
+	writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
 	return 0;
 }
 
@@ -485,8 +479,15 @@
 
 	spin_lock_irqsave(&c->vc.lock, flags);
 
-	/* Acknowledge interrupt */
-	writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+	/*
+	 * Clear the INT flag to receive further interrupts. Keep the channel
+	 * active in case the descriptor is cyclic or in case the client has
+	 * already terminated the descriptor and issued a new one. (May happen
+	 * if this IRQ handler is threaded.) If the channel is finished, it
+	 * will remain idle despite the ACTIVE flag being set.
+	 */
+	writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
+	       c->chan_base + BCM2835_DMA_CS);
 
 	d = c->desc;
 
@@ -494,11 +495,7 @@
 		if (d->cyclic) {
 			/* call the cyclic callback */
 			vchan_cyclic_callback(&d->vd);
-
-			/* Keep the DMA engine running */
-			writel(BCM2835_DMA_ACTIVE,
-			       c->chan_base + BCM2835_DMA_CS);
-		} else {
+		} else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
 			vchan_cookie_complete(&c->desc->vd);
 			bcm2835_dma_start_desc(c);
 		}
@@ -796,7 +793,6 @@
 	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
 	struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
 	unsigned long flags;
-	int timeout = 10000;
 	LIST_HEAD(head);
 
 	spin_lock_irqsave(&c->vc.lock, flags);
@@ -806,27 +802,11 @@
 	list_del_init(&c->node);
 	spin_unlock(&d->lock);
 
-	/*
-	 * Stop DMA activity: we assume the callback will not be called
-	 * after bcm_dma_abort() returns (even if it does, it will see
-	 * c->desc is NULL and exit.)
-	 */
+	/* stop DMA activity */
 	if (c->desc) {
 		vchan_terminate_vdesc(&c->desc->vd);
 		c->desc = NULL;
-		bcm2835_dma_abort(c->chan_base);
-
-		/* Wait for stopping */
-		while (--timeout) {
-			if (!(readl(c->chan_base + BCM2835_DMA_CS) &
-						BCM2835_DMA_ACTIVE))
-				break;
-
-			cpu_relax();
-		}
-
-		if (!timeout)
-			dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+		bcm2835_dma_abort(c);
 	}
 
 	vchan_get_all_descriptors(&c->vc, &head);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index aa1712b..7b7fba0 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -642,11 +642,9 @@
 			srcs[i] = um->addr[i] + src_off;
 			ret = dma_mapping_error(dev->dev, um->addr[i]);
 			if (ret) {
-				dmaengine_unmap_put(um);
 				result("src mapping error", total_tests,
 				       src_off, dst_off, len, ret);
-				failed_tests++;
-				continue;
+				goto error_unmap_continue;
 			}
 			um->to_cnt++;
 		}
@@ -661,11 +659,9 @@
 					       DMA_BIDIRECTIONAL);
 			ret = dma_mapping_error(dev->dev, dsts[i]);
 			if (ret) {
-				dmaengine_unmap_put(um);
 				result("dst mapping error", total_tests,
 				       src_off, dst_off, len, ret);
-				failed_tests++;
-				continue;
+				goto error_unmap_continue;
 			}
 			um->bidi_cnt++;
 		}
@@ -693,12 +689,10 @@
 		}
 
 		if (!tx) {
-			dmaengine_unmap_put(um);
 			result("prep error", total_tests, src_off,
 			       dst_off, len, ret);
 			msleep(100);
-			failed_tests++;
-			continue;
+			goto error_unmap_continue;
 		}
 
 		done->done = false;
@@ -707,12 +701,10 @@
 		cookie = tx->tx_submit(tx);
 
 		if (dma_submit_error(cookie)) {
-			dmaengine_unmap_put(um);
 			result("submit error", total_tests, src_off,
 			       dst_off, len, ret);
 			msleep(100);
-			failed_tests++;
-			continue;
+			goto error_unmap_continue;
 		}
 		dma_async_issue_pending(chan);
 
@@ -725,16 +717,14 @@
 			dmaengine_unmap_put(um);
 			result("test timed out", total_tests, src_off, dst_off,
 			       len, 0);
-			failed_tests++;
-			continue;
+			goto error_unmap_continue;
 		} else if (status != DMA_COMPLETE) {
 			dmaengine_unmap_put(um);
 			result(status == DMA_ERROR ?
 			       "completion error status" :
 			       "completion busy status", total_tests, src_off,
 			       dst_off, len, ret);
-			failed_tests++;
-			continue;
+			goto error_unmap_continue;
 		}
 
 		dmaengine_unmap_put(um);
@@ -779,6 +769,12 @@
 			verbose_result("test passed", total_tests, src_off,
 				       dst_off, len, 0);
 		}
+
+		continue;
+
+error_unmap_continue:
+		dmaengine_unmap_put(um);
+		failed_tests++;
 	}
 	ktime = ktime_sub(ktime_get(), ktime);
 	ktime = ktime_sub(ktime, comparetime);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 75b6ff0..118d371 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -617,7 +617,7 @@
 {
 	struct imxdma_channel *imxdmac = (void *)data;
 	struct imxdma_engine *imxdma = imxdmac->imxdma;
-	struct imxdma_desc *desc;
+	struct imxdma_desc *desc, *next_desc;
 	unsigned long flags;
 
 	spin_lock_irqsave(&imxdma->lock, flags);
@@ -647,10 +647,10 @@
 	list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
 
 	if (!list_empty(&imxdmac->ld_queue)) {
-		desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
-					node);
+		next_desc = list_first_entry(&imxdmac->ld_queue,
+					     struct imxdma_desc, node);
 		list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
-		if (imxdma_xfer_desc(desc) < 0)
+		if (imxdma_xfer_desc(next_desc) < 0)
 			dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
 				 __func__, imxdmac->channel);
 	}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index c74a88b..73de6a6 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -163,7 +163,7 @@
 	u32 ctrl;
 	u64 nxtdscraddr;
 	u64 rsvd;
-}; __aligned(64)
+};
 
 /**
  * struct zynqmp_dma_desc_sw - Per Transaction structure
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 472c88a..92f843ea 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -119,6 +119,11 @@
 }
 EXPORT_SYMBOL_GPL(scmi_driver_unregister);
 
+static void scmi_device_release(struct device *dev)
+{
+	kfree(to_scmi_dev(dev));
+}
+
 struct scmi_device *
 scmi_device_create(struct device_node *np, struct device *parent, int protocol)
 {
@@ -138,6 +143,7 @@
 	scmi_dev->dev.parent = parent;
 	scmi_dev->dev.of_node = np;
 	scmi_dev->dev.bus = &scmi_bus_type;
+	scmi_dev->dev.release = scmi_device_release;
 	dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
 
 	retval = device_register(&scmi_dev->dev);
@@ -156,9 +162,8 @@
 void scmi_device_destroy(struct scmi_device *scmi_dev)
 {
 	scmi_handle_put(scmi_dev->handle);
-	device_unregister(&scmi_dev->dev);
 	ida_simple_remove(&scmi_bus_id, scmi_dev->id);
-	kfree(scmi_dev);
+	device_unregister(&scmi_dev->dev);
 }
 
 void scmi_set_handle(struct scmi_device *scmi_dev)
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c516276..d984509 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -9,7 +9,10 @@
 cflags-$(CONFIG_X86_64)		:= -mcmodel=small
 cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ -O2 \
 				   -fPIC -fno-strict-aliasing -mno-red-zone \
-				   -mno-mmx -mno-sse -fshort-wchar
+				   -mno-mmx -mno-sse -fshort-wchar \
+				   -Wno-pointer-sign \
+				   $(call cc-disable-warning, address-of-packed-member) \
+				   $(call cc-disable-warning, gnu)
 
 # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
 # disable the stackleak plugin
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index aa66cbf..b0aeffd 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -173,6 +173,13 @@
 static DEFINE_SEMAPHORE(efi_runtime_lock);
 
 /*
+ * Expose the EFI runtime lock to the UV platform
+ */
+#ifdef CONFIG_X86_UV
+extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
+#endif
+
+/*
  * Calls the appropriate efi_runtime_service() with the appropriate
  * arguments.
  *
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 9336ffd..fceaafd 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -318,7 +318,12 @@
 static efi_status_t
 check_var_size(u32 attributes, unsigned long size)
 {
-	const struct efivar_operations *fops = __efivars->ops;
+	const struct efivar_operations *fops;
+
+	if (!__efivars)
+		return EFI_UNSUPPORTED;
+
+	fops = __efivars->ops;
 
 	if (!fops->query_variable_store)
 		return EFI_UNSUPPORTED;
@@ -329,7 +334,12 @@
 static efi_status_t
 check_var_size_nonblocking(u32 attributes, unsigned long size)
 {
-	const struct efivar_operations *fops = __efivars->ops;
+	const struct efivar_operations *fops;
+
+	if (!__efivars)
+		return EFI_UNSUPPORTED;
+
+	fops = __efivars->ops;
 
 	if (!fops->query_variable_store)
 		return EFI_UNSUPPORTED;
@@ -429,13 +439,18 @@
 int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
 		void *data, bool duplicates, struct list_head *head)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	unsigned long variable_name_size = 1024;
 	efi_char16_t *variable_name;
 	efi_status_t status;
 	efi_guid_t vendor_guid;
 	int err = 0;
 
+	if (!__efivars)
+		return -EFAULT;
+
+	ops = __efivars->ops;
+
 	variable_name = kzalloc(variable_name_size, GFP_KERNEL);
 	if (!variable_name) {
 		printk(KERN_ERR "efivars: Memory allocation failed.\n");
@@ -583,12 +598,14 @@
  */
 int __efivar_entry_delete(struct efivar_entry *entry)
 {
-	const struct efivar_operations *ops = __efivars->ops;
 	efi_status_t status;
 
-	status = ops->set_variable(entry->var.VariableName,
-				   &entry->var.VendorGuid,
-				   0, 0, NULL);
+	if (!__efivars)
+		return -EINVAL;
+
+	status = __efivars->ops->set_variable(entry->var.VariableName,
+					      &entry->var.VendorGuid,
+					      0, 0, NULL);
 
 	return efi_status_to_err(status);
 }
@@ -607,12 +624,17 @@
  */
 int efivar_entry_delete(struct efivar_entry *entry)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
 
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+	ops = __efivars->ops;
 	status = ops->set_variable(entry->var.VariableName,
 				   &entry->var.VendorGuid,
 				   0, 0, NULL);
@@ -650,13 +672,19 @@
 int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
 		     unsigned long size, void *data, struct list_head *head)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 	efi_char16_t *name = entry->var.VariableName;
 	efi_guid_t vendor = entry->var.VendorGuid;
 
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
+
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+	ops = __efivars->ops;
 	if (head && efivar_entry_find(name, vendor, head, false)) {
 		up(&efivars_lock);
 		return -EEXIST;
@@ -687,12 +715,17 @@
 efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
 			     u32 attributes, unsigned long size, void *data)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 
 	if (down_trylock(&efivars_lock))
 		return -EBUSY;
 
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+
 	status = check_var_size_nonblocking(attributes,
 					    size + ucs2_strsize(name, 1024));
 	if (status != EFI_SUCCESS) {
@@ -700,6 +733,7 @@
 		return -ENOSPC;
 	}
 
+	ops = __efivars->ops;
 	status = ops->set_variable_nonblocking(name, &vendor, attributes,
 					       size, data);
 
@@ -727,9 +761,13 @@
 int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
 			  bool block, unsigned long size, void *data)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 
+	if (!__efivars)
+		return -EINVAL;
+
+	ops = __efivars->ops;
 	if (!ops->query_variable_store)
 		return -ENOSYS;
 
@@ -829,13 +867,18 @@
  */
 int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 
 	*size = 0;
 
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+	ops = __efivars->ops;
 	status = ops->get_variable(entry->var.VariableName,
 				   &entry->var.VendorGuid, NULL, size, NULL);
 	up(&efivars_lock);
@@ -861,12 +904,14 @@
 int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
 		       unsigned long *size, void *data)
 {
-	const struct efivar_operations *ops = __efivars->ops;
 	efi_status_t status;
 
-	status = ops->get_variable(entry->var.VariableName,
-				   &entry->var.VendorGuid,
-				   attributes, size, data);
+	if (!__efivars)
+		return -EINVAL;
+
+	status = __efivars->ops->get_variable(entry->var.VariableName,
+					      &entry->var.VendorGuid,
+					      attributes, size, data);
 
 	return efi_status_to_err(status);
 }
@@ -882,14 +927,19 @@
 int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
 		     unsigned long *size, void *data)
 {
-	const struct efivar_operations *ops = __efivars->ops;
 	efi_status_t status;
 
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
-	status = ops->get_variable(entry->var.VariableName,
-				   &entry->var.VendorGuid,
-				   attributes, size, data);
+
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+
+	status = __efivars->ops->get_variable(entry->var.VariableName,
+					      &entry->var.VendorGuid,
+					      attributes, size, data);
 	up(&efivars_lock);
 
 	return efi_status_to_err(status);
@@ -921,7 +971,7 @@
 int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
 			      unsigned long *size, void *data, bool *set)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_char16_t *name = entry->var.VariableName;
 	efi_guid_t *vendor = &entry->var.VendorGuid;
 	efi_status_t status;
@@ -940,6 +990,11 @@
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
 
+	if (!__efivars) {
+		err = -EINVAL;
+		goto out;
+	}
+
 	/*
 	 * Ensure that the available space hasn't shrunk below the safe level
 	 */
@@ -956,6 +1011,8 @@
 		}
 	}
 
+	ops = __efivars->ops;
+
 	status = ops->set_variable(name, vendor, attributes, *size, data);
 	if (status != EFI_SUCCESS) {
 		err = efi_status_to_err(status);
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6bc8e66..c51462f 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -542,6 +542,7 @@
 	case ISCSI_BOOT_TGT_NIC_ASSOC:
 	case ISCSI_BOOT_TGT_CHAP_TYPE:
 		rc = S_IRUGO;
+		break;
 	case ISCSI_BOOT_TGT_NAME:
 		if (tgt->tgt_name_len)
 			rc = S_IRUGO;
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
index 0f3fc4d..b7fa43d 100644
--- a/drivers/firmware/qcom/tz_log.c
+++ b/drivers/firmware/qcom/tz_log.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/debugfs.h>
 #include <linux/errno.h>
@@ -877,7 +877,7 @@
 
 	if (desc.ret[0] != QSEOS_RESULT_SUCCESS) {
 		pr_err(
-		"%s: scm_call to register log buf failed, resp result =%d\n",
+		"%s: scm_call to register log buf failed, resp result =%lld\n",
 		__func__, desc.ret[0]);
 		goto err;
 	}
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 7fa7936..7a42c19 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -403,6 +403,7 @@
 	struct altera_cvp_conf *conf;
 	struct fpga_manager *mgr;
 	u16 cmd, val;
+	u32 regval;
 	int ret;
 
 	/*
@@ -416,6 +417,14 @@
 		return -ENODEV;
 	}
 
+	pci_read_config_dword(pdev, VSE_CVP_STATUS, &regval);
+	if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
+		dev_err(&pdev->dev,
+			"CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
+			regval);
+		return -ENODEV;
+	}
+
 	conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
 	if (!conf)
 		return -ENOMEM;
@@ -468,18 +477,11 @@
 		goto err_unmap;
 	}
 
-	ret = driver_create_file(&altera_cvp_driver.driver,
-				 &driver_attr_chkcfg);
-	if (ret) {
-		dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
-		fpga_mgr_unregister(mgr);
-		goto err_unmap;
-	}
-
 	return 0;
 
 err_unmap:
-	pci_iounmap(pdev, conf->map);
+	if (conf->map)
+		pci_iounmap(pdev, conf->map);
 	pci_release_region(pdev, CVP_BAR);
 err_disable:
 	cmd &= ~PCI_COMMAND_MEMORY;
@@ -493,16 +495,39 @@
 	struct altera_cvp_conf *conf = mgr->priv;
 	u16 cmd;
 
-	driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
 	fpga_mgr_unregister(mgr);
-	pci_iounmap(pdev, conf->map);
+	if (conf->map)
+		pci_iounmap(pdev, conf->map);
 	pci_release_region(pdev, CVP_BAR);
 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
 	cmd &= ~PCI_COMMAND_MEMORY;
 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
 }
 
-module_pci_driver(altera_cvp_driver);
+static int __init altera_cvp_init(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&altera_cvp_driver);
+	if (ret)
+		return ret;
+
+	ret = driver_create_file(&altera_cvp_driver.driver,
+				 &driver_attr_chkcfg);
+	if (ret)
+		pr_warn("Can't create sysfs chkcfg file\n");
+
+	return 0;
+}
+
+static void __exit altera_cvp_exit(void)
+{
+	driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
+	pci_unregister_driver(&altera_cvp_driver);
+}
+
+module_init(altera_cvp_init);
+module_exit(altera_cvp_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 2c22836..4596fde1 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -310,30 +310,26 @@
 			ret = -ENODEV;
 			goto err_put_device;
 		}
+
+		ret = regulator_enable(data->vcc);
+		if (ret)
+			goto err_put_device;
+
+		/* Wait for chip to boot into hibernate mode. */
+		msleep(SIRF_BOOT_DELAY);
 	}
 
 	if (data->wakeup) {
 		ret = gpiod_to_irq(data->wakeup);
 		if (ret < 0)
-			goto err_put_device;
-
+			goto err_disable_vcc;
 		data->irq = ret;
 
-		ret = devm_request_threaded_irq(dev, data->irq, NULL,
-				sirf_wakeup_handler,
+		ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
 				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
 				"wakeup", data);
 		if (ret)
-			goto err_put_device;
-	}
-
-	if (data->on_off) {
-		ret = regulator_enable(data->vcc);
-		if (ret)
-			goto err_put_device;
-
-		/* Wait for chip to boot into hibernate mode */
-		msleep(SIRF_BOOT_DELAY);
+			goto err_disable_vcc;
 	}
 
 	if (IS_ENABLED(CONFIG_PM)) {
@@ -342,7 +338,7 @@
 	} else {
 		ret = sirf_runtime_resume(dev);
 		if (ret < 0)
-			goto err_disable_vcc;
+			goto err_free_irq;
 	}
 
 	ret = gnss_register_device(gdev);
@@ -356,6 +352,9 @@
 		pm_runtime_disable(dev);
 	else
 		sirf_runtime_suspend(dev);
+err_free_irq:
+	if (data->wakeup)
+		free_irq(data->irq, data);
 err_disable_vcc:
 	if (data->on_off)
 		regulator_disable(data->vcc);
@@ -376,6 +375,9 @@
 	else
 		sirf_runtime_suspend(&serdev->dev);
 
+	if (data->wakeup)
+		free_irq(data->irq, data);
+
 	if (data->on_off)
 		regulator_disable(data->vcc);
 
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 6b11f13..7f9e030 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -66,8 +66,10 @@
 static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
 					    unsigned int nr, int value)
 {
-	if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
+	if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
+		altr_a10sr_gpio_set(gc, nr, value);
 		return 0;
+	}
 	return -EINVAL;
 }
 
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index e0d6a0a..e41223c 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -180,7 +180,18 @@
 
 static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
 {
-	return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
+	struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
+
+	switch (sprd_eic->type) {
+	case SPRD_EIC_DEBOUNCE:
+		return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
+	case SPRD_EIC_ASYNC:
+		return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
+	case SPRD_EIC_SYNC:
+		return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
+	default:
+		return -ENOTSUPP;
+	}
 }
 
 static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -368,6 +379,7 @@
 			irq_set_handler_locked(data, handle_edge_irq);
 			break;
 		case IRQ_TYPE_EDGE_BOTH:
+			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
 			irq_set_handler_locked(data, handle_edge_irq);
 			break;
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index 05813fb..647dfbb 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -25,7 +25,7 @@
 	struct spi_device *spi = to_spi_device(dev);
 	u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
 
-	return spi_write(spi, (const u8 *)&word, sizeof(word));
+	return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
 }
 
 /* A read from the MAX7301 means two transfers; here, one message each */
@@ -37,14 +37,8 @@
 	struct spi_device *spi = to_spi_device(dev);
 
 	word = 0x8000 | (reg << 8);
-	ret = spi_write(spi, (const u8 *)&word, sizeof(word));
-	if (ret)
-		return ret;
-	/*
-	 * This relies on the fact, that a transfer with NULL tx_buf shifts out
-	 * zero bytes (=NOOP for MAX7301)
-	 */
-	ret = spi_read(spi, (u8 *)&word, sizeof(word));
+	ret = spi_write_then_read(spi, &word, sizeof(word), &word,
+				  sizeof(word));
 	if (ret)
 		return ret;
 	return word & 0xff;
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d72af6f..74401e0 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -30,6 +30,7 @@
 #define GPIO_REG_EDGE		0xA0
 
 struct mtk_gc {
+	struct irq_chip irq_chip;
 	struct gpio_chip chip;
 	spinlock_t lock;
 	int bank;
@@ -189,13 +190,6 @@
 	return 0;
 }
 
-static struct irq_chip mediatek_gpio_irq_chip = {
-	.irq_unmask		= mediatek_gpio_irq_unmask,
-	.irq_mask		= mediatek_gpio_irq_mask,
-	.irq_mask_ack		= mediatek_gpio_irq_mask,
-	.irq_set_type		= mediatek_gpio_irq_type,
-};
-
 static int
 mediatek_gpio_xlate(struct gpio_chip *chip,
 		    const struct of_phandle_args *spec, u32 *flags)
@@ -244,6 +238,8 @@
 	rg->chip.of_xlate = mediatek_gpio_xlate;
 	rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
 					dev_name(dev), bank);
+	if (!rg->chip.label)
+		return -ENOMEM;
 
 	ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
 	if (ret < 0) {
@@ -252,6 +248,13 @@
 		return ret;
 	}
 
+	rg->irq_chip.name = dev_name(dev);
+	rg->irq_chip.parent_device = dev;
+	rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
+	rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
+	rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
+	rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
+
 	if (mtk->gpio_irq) {
 		/*
 		 * Manually request the irq here instead of passing
@@ -268,14 +271,14 @@
 			return ret;
 		}
 
-		ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
+		ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
 					   0, handle_simple_irq, IRQ_TYPE_NONE);
 		if (ret) {
 			dev_err(dev, "failed to add gpiochip_irqchip\n");
 			return ret;
 		}
 
-		gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
+		gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
 					     mtk->gpio_irq, NULL);
 	}
 
@@ -295,6 +298,7 @@
 	struct device_node *np = dev->of_node;
 	struct mtk *mtk;
 	int i;
+	int ret;
 
 	mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
 	if (!mtk)
@@ -307,10 +311,12 @@
 	mtk->gpio_irq = irq_of_parse_and_map(np, 0);
 	mtk->dev = dev;
 	platform_set_drvdata(pdev, mtk);
-	mediatek_gpio_irq_chip.name = dev_name(dev);
 
-	for (i = 0; i < MTK_BANK_CNT; i++)
-		mediatek_gpio_bank_probe(dev, np, i);
+	for (i = 0; i < MTK_BANK_CNT; i++) {
+		ret = mediatek_gpio_bank_probe(dev, np, i);
+		if (ret)
+			return ret;
+	}
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 6e02148..adc768f 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -773,9 +773,6 @@
 				     "marvell,armada-370-gpio"))
 		return 0;
 
-	if (IS_ERR(mvchip->clk))
-		return PTR_ERR(mvchip->clk);
-
 	/*
 	 * There are only two sets of PWM configuration registers for
 	 * all the GPIO lines on those SoCs which this driver reserves
@@ -786,6 +783,9 @@
 	if (!res)
 		return 0;
 
+	if (IS_ERR(mvchip->clk))
+		return PTR_ERR(mvchip->clk);
+
 	/*
 	 * Use set A for lines of GPIO chip with id 0, B for GPIO chip
 	 * with id 1. Don't allow further GPIO chips to be used for PWM.
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 995cf0b..2d1dfa1 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -17,6 +17,7 @@
 #include <linux/irqchip/chained_irq.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/syscore_ops.h>
 #include <linux/gpio/driver.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -550,33 +551,38 @@
 	writel(port->gpio_saved_reg.dr, port->base + GPIO_DR);
 }
 
-static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
+static int mxc_gpio_syscore_suspend(void)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+	struct mxc_gpio_port *port;
 
-	mxc_gpio_save_regs(port);
-	clk_disable_unprepare(port->clk);
+	/* walk through all ports */
+	list_for_each_entry(port, &mxc_gpio_ports, node) {
+		mxc_gpio_save_regs(port);
+		clk_disable_unprepare(port->clk);
+	}
 
 	return 0;
 }
 
-static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
+static void mxc_gpio_syscore_resume(void)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+	struct mxc_gpio_port *port;
 	int ret;
 
-	ret = clk_prepare_enable(port->clk);
-	if (ret)
-		return ret;
-	mxc_gpio_restore_regs(port);
-
-	return 0;
+	/* walk through all ports */
+	list_for_each_entry(port, &mxc_gpio_ports, node) {
+		ret = clk_prepare_enable(port->clk);
+		if (ret) {
+			pr_err("mxc: failed to enable gpio clock %d\n", ret);
+			return;
+		}
+		mxc_gpio_restore_regs(port);
+	}
 }
 
-static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
-	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
+static struct syscore_ops mxc_gpio_syscore_ops = {
+	.suspend = mxc_gpio_syscore_suspend,
+	.resume = mxc_gpio_syscore_resume,
 };
 
 static struct platform_driver mxc_gpio_driver = {
@@ -584,7 +590,6 @@
 		.name	= "gpio-mxc",
 		.of_match_table = mxc_gpio_dt_ids,
 		.suppress_bind_attrs = true,
-		.pm = &mxc_gpio_dev_pm_ops,
 	},
 	.probe		= mxc_gpio_probe,
 	.id_table	= mxc_gpio_devtype,
@@ -592,6 +597,8 @@
 
 static int __init gpio_mxc_init(void)
 {
+	register_syscore_ops(&mxc_gpio_syscore_ops);
+
 	return platform_driver_register(&mxc_gpio_driver);
 }
 subsys_initcall(gpio_mxc_init);
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index adf72dd..68a35b6 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -84,6 +84,7 @@
  */
 struct pcf857x {
 	struct gpio_chip	chip;
+	struct irq_chip		irqchip;
 	struct i2c_client	*client;
 	struct mutex		lock;		/* protect 'out' */
 	unsigned		out;		/* software latch */
@@ -252,18 +253,6 @@
 	mutex_unlock(&gpio->lock);
 }
 
-static struct irq_chip pcf857x_irq_chip = {
-	.name		= "pcf857x",
-	.irq_enable	= pcf857x_irq_enable,
-	.irq_disable	= pcf857x_irq_disable,
-	.irq_ack	= noop,
-	.irq_mask	= noop,
-	.irq_unmask	= noop,
-	.irq_set_wake	= pcf857x_irq_set_wake,
-	.irq_bus_lock		= pcf857x_irq_bus_lock,
-	.irq_bus_sync_unlock	= pcf857x_irq_bus_sync_unlock,
-};
-
 /*-------------------------------------------------------------------------*/
 
 static int pcf857x_probe(struct i2c_client *client,
@@ -376,8 +365,17 @@
 
 	/* Enable irqchip if we have an interrupt */
 	if (client->irq) {
+		gpio->irqchip.name = "pcf857x",
+		gpio->irqchip.irq_enable = pcf857x_irq_enable,
+		gpio->irqchip.irq_disable = pcf857x_irq_disable,
+		gpio->irqchip.irq_ack = noop,
+		gpio->irqchip.irq_mask = noop,
+		gpio->irqchip.irq_unmask = noop,
+		gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
+		gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
+		gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
 		status = gpiochip_irqchip_add_nested(&gpio->chip,
-						     &pcf857x_irq_chip,
+						     &gpio->irqchip,
 						     0, handle_level_irq,
 						     IRQ_TYPE_NONE);
 		if (status) {
@@ -392,7 +390,7 @@
 		if (status)
 			goto fail;
 
-		gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
+		gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
 					    client->irq);
 		gpio->irq_parent = client->irq;
 	}
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 2afd9de..dc42571 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -54,6 +54,7 @@
 
 	void __iomem		*base;
 	struct gpio_chip	gc;
+	struct irq_chip		irq_chip;
 	int			parent_irq;
 
 #ifdef CONFIG_PM
@@ -281,15 +282,6 @@
 	return irq_set_irq_wake(pl061->parent_irq, state);
 }
 
-static struct irq_chip pl061_irqchip = {
-	.name		= "pl061",
-	.irq_ack	= pl061_irq_ack,
-	.irq_mask	= pl061_irq_mask,
-	.irq_unmask	= pl061_irq_unmask,
-	.irq_set_type	= pl061_irq_type,
-	.irq_set_wake	= pl061_irq_set_wake,
-};
-
 static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
 {
 	struct device *dev = &adev->dev;
@@ -328,6 +320,13 @@
 	/*
 	 * irq_chip support
 	 */
+	pl061->irq_chip.name = dev_name(dev);
+	pl061->irq_chip.irq_ack	= pl061_irq_ack;
+	pl061->irq_chip.irq_mask = pl061_irq_mask;
+	pl061->irq_chip.irq_unmask = pl061_irq_unmask;
+	pl061->irq_chip.irq_set_type = pl061_irq_type;
+	pl061->irq_chip.irq_set_wake = pl061_irq_set_wake;
+
 	writeb(0, pl061->base + GPIOIE); /* disable irqs */
 	irq = adev->irq[0];
 	if (irq < 0) {
@@ -336,14 +335,14 @@
 	}
 	pl061->parent_irq = irq;
 
-	ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip,
+	ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip,
 				   0, handle_bad_irq,
 				   IRQ_TYPE_NONE);
 	if (ret) {
 		dev_info(&adev->dev, "could not add irqchip\n");
 		return ret;
 	}
-	gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip,
+	gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip,
 				     irq, pl061_irq_handler);
 
 	amba_set_drvdata(adev, pl061);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 9f3f166..eb27fa7 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -245,6 +245,7 @@
 {
 	switch (gpio_type) {
 	case PXA3XX_GPIO:
+	case MMP2_GPIO:
 		return false;
 
 	default:
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index d4ad6d0..7e09ce7 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -259,6 +259,7 @@
 	struct vf610_gpio_port *port;
 	struct resource *iores;
 	struct gpio_chip *gc;
+	int i;
 	int ret;
 
 	port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -298,6 +299,10 @@
 	if (ret < 0)
 		return ret;
 
+	/* Mask all GPIO interrupts */
+	for (i = 0; i < gc->ngpio; i++)
+		vf610_gpio_writel(0, port->base + PORT_PCR(i));
+
 	/* Clear the interrupt status register for all GPIO's */
 	vf610_gpio_writel(~0, port->base + PORT_ISFR);
 
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 8b9d7e4..c5e009f 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -23,11 +23,28 @@
 
 #include "gpiolib.h"
 
+/**
+ * struct acpi_gpio_event - ACPI GPIO event handler data
+ *
+ * @node:	  list-entry of the events list of the struct acpi_gpio_chip
+ * @handle:	  handle of ACPI method to execute when the IRQ triggers
+ * @handler:	  irq_handler to pass to request_irq when requesting the IRQ
+ * @pin:	  GPIO pin number on the gpio_chip
+ * @irq:	  Linux IRQ number for the event, for request_ / free_irq
+ * @irqflags:     flags to pass to request_irq when requesting the IRQ
+ * @irq_is_wake:  If the ACPI flags indicate the IRQ is a wakeup source
+ * @is_requested: True if request_irq has been done
+ * @desc:	  gpio_desc for the GPIO pin for this event
+ */
 struct acpi_gpio_event {
 	struct list_head node;
 	acpi_handle handle;
+	irq_handler_t handler;
 	unsigned int pin;
 	unsigned int irq;
+	unsigned long irqflags;
+	bool irq_is_wake;
+	bool irq_requested;
 	struct gpio_desc *desc;
 };
 
@@ -53,10 +70,10 @@
 
 /*
  * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
- * (so builtin drivers) we register the ACPI GpioInt event handlers from a
+ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
  * late_initcall_sync handler, so that other builtin drivers can register their
  * OpRegions before the event handlers can run.  This list contains gpiochips
- * for which the acpi_gpiochip_request_interrupts() has been deferred.
+ * for which the acpi_gpiochip_request_irqs() call has been deferred.
  */
 static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
 static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
@@ -137,8 +154,42 @@
 }
 EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
 
-static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
-						   void *context)
+static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
+				      struct acpi_gpio_event *event)
+{
+	int ret, value;
+
+	ret = request_threaded_irq(event->irq, NULL, event->handler,
+				   event->irqflags, "ACPI:Event", event);
+	if (ret) {
+		dev_err(acpi_gpio->chip->parent,
+			"Failed to setup interrupt handler for %d\n",
+			event->irq);
+		return;
+	}
+
+	if (event->irq_is_wake)
+		enable_irq_wake(event->irq);
+
+	event->irq_requested = true;
+
+	/* Make sure we trigger the initial state of edge-triggered IRQs */
+	value = gpiod_get_raw_value_cansleep(event->desc);
+	if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+	    ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+		event->handler(event->irq, event);
+}
+
+static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
+{
+	struct acpi_gpio_event *event;
+
+	list_for_each_entry(event, &acpi_gpio->events, node)
+		acpi_gpiochip_request_irq(acpi_gpio, event);
+}
+
+static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
+					     void *context)
 {
 	struct acpi_gpio_chip *acpi_gpio = context;
 	struct gpio_chip *chip = acpi_gpio->chip;
@@ -147,8 +198,7 @@
 	struct acpi_gpio_event *event;
 	irq_handler_t handler = NULL;
 	struct gpio_desc *desc;
-	unsigned long irqflags;
-	int ret, pin, irq, value;
+	int ret, pin, irq;
 
 	if (!acpi_gpio_get_irq_resource(ares, &agpio))
 		return AE_OK;
@@ -179,8 +229,6 @@
 
 	gpiod_direction_input(desc);
 
-	value = gpiod_get_value_cansleep(desc);
-
 	ret = gpiochip_lock_as_irq(chip, pin);
 	if (ret) {
 		dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -193,64 +241,42 @@
 		goto fail_unlock_irq;
 	}
 
-	irqflags = IRQF_ONESHOT;
-	if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
-		if (agpio->polarity == ACPI_ACTIVE_HIGH)
-			irqflags |= IRQF_TRIGGER_HIGH;
-		else
-			irqflags |= IRQF_TRIGGER_LOW;
-	} else {
-		switch (agpio->polarity) {
-		case ACPI_ACTIVE_HIGH:
-			irqflags |= IRQF_TRIGGER_RISING;
-			break;
-		case ACPI_ACTIVE_LOW:
-			irqflags |= IRQF_TRIGGER_FALLING;
-			break;
-		default:
-			irqflags |= IRQF_TRIGGER_RISING |
-				    IRQF_TRIGGER_FALLING;
-			break;
-		}
-	}
-
 	event = kzalloc(sizeof(*event), GFP_KERNEL);
 	if (!event)
 		goto fail_unlock_irq;
 
+	event->irqflags = IRQF_ONESHOT;
+	if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
+		if (agpio->polarity == ACPI_ACTIVE_HIGH)
+			event->irqflags |= IRQF_TRIGGER_HIGH;
+		else
+			event->irqflags |= IRQF_TRIGGER_LOW;
+	} else {
+		switch (agpio->polarity) {
+		case ACPI_ACTIVE_HIGH:
+			event->irqflags |= IRQF_TRIGGER_RISING;
+			break;
+		case ACPI_ACTIVE_LOW:
+			event->irqflags |= IRQF_TRIGGER_FALLING;
+			break;
+		default:
+			event->irqflags |= IRQF_TRIGGER_RISING |
+					   IRQF_TRIGGER_FALLING;
+			break;
+		}
+	}
+
 	event->handle = evt_handle;
+	event->handler = handler;
 	event->irq = irq;
+	event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
 	event->pin = pin;
 	event->desc = desc;
 
-	ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
-				   "ACPI:Event", event);
-	if (ret) {
-		dev_err(chip->parent,
-			"Failed to setup interrupt handler for %d\n",
-			event->irq);
-		goto fail_free_event;
-	}
-
-	if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
-		enable_irq_wake(irq);
-
 	list_add_tail(&event->node, &acpi_gpio->events);
 
-	/*
-	 * Make sure we trigger the initial state of the IRQ when using RISING
-	 * or FALLING.  Note we run the handlers on late_init, the AML code
-	 * may refer to OperationRegions from other (builtin) drivers which
-	 * may be probed after us.
-	 */
-	if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-	    ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
-		handler(event->irq, event);
-
 	return AE_OK;
 
-fail_free_event:
-	kfree(event);
 fail_unlock_irq:
 	gpiochip_unlock_as_irq(chip, pin);
 fail_free_desc:
@@ -287,6 +313,9 @@
 	if (ACPI_FAILURE(status))
 		return;
 
+	acpi_walk_resources(handle, "_AEI",
+			    acpi_gpiochip_alloc_event, acpi_gpio);
+
 	mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
 	defer = !acpi_gpio_deferred_req_irqs_done;
 	if (defer)
@@ -297,8 +326,7 @@
 	if (defer)
 		return;
 
-	acpi_walk_resources(handle, "_AEI",
-			    acpi_gpiochip_request_interrupt, acpi_gpio);
+	acpi_gpiochip_request_irqs(acpi_gpio);
 }
 EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
 
@@ -335,10 +363,13 @@
 	list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
 		struct gpio_desc *desc;
 
-		if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
-			disable_irq_wake(event->irq);
+		if (event->irq_requested) {
+			if (event->irq_is_wake)
+				disable_irq_wake(event->irq);
 
-		free_irq(event->irq, event);
+			free_irq(event->irq, event);
+		}
+
 		desc = event->desc;
 		if (WARN_ON(IS_ERR(desc)))
 			continue;
@@ -1204,23 +1235,16 @@
 	return con_id == NULL;
 }
 
-/* Run deferred acpi_gpiochip_request_interrupts() */
-static int acpi_gpio_handle_deferred_request_interrupts(void)
+/* Run deferred acpi_gpiochip_request_irqs() */
+static int acpi_gpio_handle_deferred_request_irqs(void)
 {
 	struct acpi_gpio_chip *acpi_gpio, *tmp;
 
 	mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
 	list_for_each_entry_safe(acpi_gpio, tmp,
 				 &acpi_gpio_deferred_req_irqs_list,
-				 deferred_req_irqs_list_entry) {
-		acpi_handle handle;
-
-		handle = ACPI_HANDLE(acpi_gpio->chip->parent);
-		acpi_walk_resources(handle, "_AEI",
-				    acpi_gpiochip_request_interrupt, acpi_gpio);
-
-		list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
-	}
+				 deferred_req_irqs_list_entry)
+		acpi_gpiochip_request_irqs(acpi_gpio);
 
 	acpi_gpio_deferred_req_irqs_done = true;
 	mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
@@ -1228,4 +1252,4 @@
 	return 0;
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
+late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 976ad91..f845471 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -817,7 +817,15 @@
 	/* Do not leak kernel stack to userspace */
 	memset(&ge, 0, sizeof(ge));
 
-	ge.timestamp = le->timestamp;
+	/*
+	 * We may be running from a nested threaded interrupt in which case
+	 * we didn't get the timestamp from lineevent_irq_handler().
+	 */
+	if (!le->timestamp)
+		ge.timestamp = ktime_get_real_ns();
+	else
+		ge.timestamp = le->timestamp;
+
 	level = gpiod_get_value_cansleep(le->desc);
 
 	if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
@@ -2278,6 +2286,12 @@
 	unsigned long		flags;
 	unsigned		offset;
 
+	if (label) {
+		label = kstrdup_const(label, GFP_KERNEL);
+		if (!label)
+			return -ENOMEM;
+	}
+
 	spin_lock_irqsave(&gpio_lock, flags);
 
 	/* NOTE:  gpio_request() can be called in early boot,
@@ -2288,6 +2302,7 @@
 		desc_set_label(desc, label ? : "?");
 		status = 0;
 	} else {
+		kfree_const(label);
 		status = -EBUSY;
 		goto done;
 	}
@@ -2304,6 +2319,7 @@
 
 		if (status < 0) {
 			desc_set_label(desc, NULL);
+			kfree_const(label);
 			clear_bit(FLAG_REQUESTED, &desc->flags);
 			goto done;
 		}
@@ -2399,6 +2415,7 @@
 			chip->free(chip, gpio_chip_hwgpio(desc));
 			spin_lock_irqsave(&gpio_lock, flags);
 		}
+		kfree_const(desc->label);
 		desc_set_label(desc, NULL);
 		clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
 		clear_bit(FLAG_REQUESTED, &desc->flags);
@@ -3220,11 +3237,19 @@
  * @desc: gpio to set the consumer name on
  * @name: the new consumer name
  */
-void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
 {
-	VALIDATE_DESC_VOID(desc);
-	/* Just overwrite whatever the previous name was */
-	desc->label = name;
+	VALIDATE_DESC(desc);
+	if (name) {
+		name = kstrdup_const(name, GFP_KERNEL);
+		if (!name)
+			return -ENOMEM;
+	}
+
+	kfree_const(desc->label);
+	desc_set_label(desc, name);
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
 
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 65f3eaf..f1a3e8f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -111,6 +111,26 @@
 	  is 100. Typical values for double buffering will be 200,
 	  triple buffering 300.
 
+config DRM_FBDEV_LEAK_PHYS_SMEM
+	bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
+	depends on DRM_FBDEV_EMULATION && EXPERT
+	default n
+	help
+	  In order to keep user-space compatibility, we want in certain
+	  use-cases to keep leaking the fbdev physical address to the
+	  user-space program handling the fbdev buffer.
+	  This affects, not only, Amlogic, Allwinner or Rockchip devices
+	  with ARM Mali GPUs using an userspace Blob.
+	  This option is not supported by upstream developers and should be
+	  removed as soon as possible and be considered as a broken and
+	  legacy behaviour from a modern fbdev device driver.
+
+	  Please send any bug reports when using this to your proprietary
+	  software vendor that requires this.
+
+	  If in doubt, say "N" or spread the word to your closed source
+	  library vendor.
+
 config DRM_LOAD_EDID_FIRMWARE
 	bool "Allow to specify an EDID data set instead of probing for it"
 	depends on DRM
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a028661..92b11de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -576,6 +576,7 @@
 	{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
+	{ 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0, 0, 0, 0, 0 },
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b31d121..81001d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -122,14 +122,14 @@
 		goto free_chunk;
 	}
 
+	mutex_lock(&p->ctx->lock);
+
 	/* skip guilty context job */
 	if (atomic_read(&p->ctx->guilty) == 1) {
 		ret = -ECANCELED;
 		goto free_chunk;
 	}
 
-	mutex_lock(&p->ctx->lock);
-
 	/* get chunks */
 	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 	if (copy_from_user(chunk_array, chunk_array_user,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 39bf2ce..7f6af42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1653,8 +1653,10 @@
 
 	amdgpu_amdkfd_device_init(adev);
 
-	if (amdgpu_sriov_vf(adev))
+	if (amdgpu_sriov_vf(adev)) {
+		amdgpu_virt_init_data_exchange(adev);
 		amdgpu_virt_release_full_gpu(adev, true);
+	}
 
 	return 0;
 }
@@ -2555,9 +2557,6 @@
 		goto failed;
 	}
 
-	if (amdgpu_sriov_vf(adev))
-		amdgpu_virt_init_data_exchange(adev);
-
 	amdgpu_fbdev_init(adev);
 
 	r = amdgpu_pm_sysfs_init(adev);
@@ -3269,6 +3268,7 @@
 	r = amdgpu_ib_ring_tests(adev);
 
 error:
+	amdgpu_virt_init_data_exchange(adev);
 	amdgpu_virt_release_full_gpu(adev, true);
 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
 		atomic_inc(&adev->vram_lost_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8e26e1c..b40e9c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -753,6 +753,7 @@
 	/* VEGAM */
 	{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
 	{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+	{0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
 	/* Vega 10 */
 	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
 	{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index fd825d3..c0396e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -159,6 +159,7 @@
 	}
 
 	if (amdgpu_device_is_px(dev)) {
+		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
 		pm_runtime_use_autosuspend(dev->dev);
 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
 		pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7b4e657a..c3df75a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1443,7 +1443,8 @@
 		effective_mode &= ~S_IWUSR;
 
 	if ((adev->flags & AMD_IS_APU) &&
-	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+	    (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
+	     attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
 	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
 	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 1c5d97f..8dcf622 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -37,6 +37,7 @@
 #include "amdgpu_display.h"
 #include <drm/amdgpu_drm.h>
 #include <linux/dma-buf.h>
+#include <linux/dma-fence-array.h>
 
 static const struct dma_buf_ops amdgpu_dmabuf_ops;
 
@@ -188,6 +189,48 @@
 	return ERR_PTR(ret);
 }
 
+static int
+__reservation_object_make_exclusive(struct reservation_object *obj)
+{
+	struct dma_fence **fences;
+	unsigned int count;
+	int r;
+
+	if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+		return 0;
+
+	r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+	if (r)
+		return r;
+
+	if (count == 0) {
+		/* Now that was unexpected. */
+	} else if (count == 1) {
+		reservation_object_add_excl_fence(obj, fences[0]);
+		dma_fence_put(fences[0]);
+		kfree(fences);
+	} else {
+		struct dma_fence_array *array;
+
+		array = dma_fence_array_create(count, fences,
+					       dma_fence_context_alloc(1), 0,
+					       false);
+		if (!array)
+			goto err_fences_put;
+
+		reservation_object_add_excl_fence(obj, &array->base);
+		dma_fence_put(&array->base);
+	}
+
+	return 0;
+
+err_fences_put:
+	while (count--)
+		dma_fence_put(fences[count]);
+	kfree(fences);
+	return -ENOMEM;
+}
+
 /**
  * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
  * @dma_buf: shared DMA buffer
@@ -219,16 +262,16 @@
 
 	if (attach->dev->driver != adev->dev->driver) {
 		/*
-		 * Wait for all shared fences to complete before we switch to future
-		 * use of exclusive fence on this prime shared bo.
+		 * We only create shared fences for internal use, but importers
+		 * of the dmabuf rely on exclusive fences for implicitly
+		 * tracking write hazards. As any of the current fences may
+		 * correspond to a write, we need to convert all existing
+		 * fences on the reservation object into a single exclusive
+		 * fence.
 		 */
-		r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
-							true, false,
-							MAX_SCHEDULE_TIMEOUT);
-		if (unlikely(r < 0)) {
-			DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
+		r = __reservation_object_make_exclusive(bo->tbo.resv);
+		if (r)
 			goto error_unreserve;
-		}
 	}
 
 	/* pin buffer into GTT */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6a84526..49fe508 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3011,14 +3011,15 @@
 			 struct amdgpu_task_info *task_info)
 {
 	struct amdgpu_vm *vm;
+	unsigned long flags;
 
-	spin_lock(&adev->vm_manager.pasid_lock);
+	spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
 
 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
 	if (vm)
 		*task_info = vm->task_info;
 
-	spin_unlock(&adev->vm_manager.pasid_lock);
+	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 078f70f..d06332b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -174,7 +174,7 @@
 			return r;
 		}
 		/* Retrieve checksum from mailbox2 */
-		if (req == IDH_REQ_GPU_INIT_ACCESS) {
+		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
 			adev->virt.fw_reserve.checksum_key =
 				RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
 					mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7c3b634..de5a689 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -71,7 +71,6 @@
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
-	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -89,6 +88,7 @@
 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
 };
@@ -96,6 +96,7 @@
 static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 8a926d1..2b4199a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,16 +116,16 @@
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 5024805..88c006c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -113,16 +113,16 @@
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 6ae82cc..d407083 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -420,16 +420,16 @@
 		DRM_INFO("UVD ENC is disabled\n");
 	}
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	if (uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 			ring = &adev->uvd.inst->ring_enc[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 9b7f846..057151b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -444,10 +444,6 @@
 		DRM_INFO("PSP loading UVD firmware\n");
 	}
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 		if (adev->uvd.harvest_config & (1 << j))
 			continue;
@@ -479,6 +475,10 @@
 		}
 	}
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 1427675..5aba50f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -661,6 +661,7 @@
 {
 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
 	bool is_patched = false;
+	unsigned long flags;
 
 	if (!kfd->init_complete)
 		return;
@@ -670,7 +671,7 @@
 		return;
 	}
 
-	spin_lock(&kfd->interrupt_lock);
+	spin_lock_irqsave(&kfd->interrupt_lock, flags);
 
 	if (kfd->interrupts_active
 	    && interrupt_is_wanted(kfd, ih_ring_entry,
@@ -679,7 +680,7 @@
 				     is_patched ? patched_ihre : ih_ring_entry))
 		queue_work(kfd->ih_wq, &kfd->interrupt_work);
 
-	spin_unlock(&kfd->interrupt_lock);
+	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
 }
 
 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 80f5db4..0805c42 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1072,8 +1072,6 @@
  *		the GPU device is not already present in the topology device
  *		list then return NULL. This means a new topology device has to
  *		be created for this GPU.
- * TODO: Rather than assiging @gpu to first topology device withtout
- *		gpu attached, it will better to have more stringent check.
  */
 static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
 {
@@ -1081,12 +1079,20 @@
 	struct kfd_topology_device *out_dev = NULL;
 
 	down_write(&topology_lock);
-	list_for_each_entry(dev, &topology_device_list, list)
+	list_for_each_entry(dev, &topology_device_list, list) {
+		/* Discrete GPUs need their own topology device list
+		 * entries. Don't assign them to CPU/APU nodes.
+		 */
+		if (!gpu->device_info->needs_iommu_device &&
+		    dev->node_props.cpu_cores_count)
+			continue;
+
 		if (!dev->gpu && (dev->node_props.simd_count > 0)) {
 			dev->gpu = gpu;
 			out_dev = dev;
 			break;
 		}
+	}
 	up_write(&topology_lock);
 	return out_dev;
 }
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 299def8..c5ba912 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -565,22 +565,36 @@
 {
 	struct amdgpu_dm_connector *aconnector;
 	struct drm_connector *connector;
+	struct drm_dp_mst_topology_mgr *mgr;
+	int ret;
+	bool need_hotplug = false;
 
 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		   aconnector = to_amdgpu_dm_connector(connector);
-		   if (aconnector->dc_link->type == dc_connection_mst_branch &&
-				   !aconnector->mst_port) {
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    head) {
+		aconnector = to_amdgpu_dm_connector(connector);
+		if (aconnector->dc_link->type != dc_connection_mst_branch ||
+		    aconnector->mst_port)
+			continue;
 
-			   if (suspend)
-				   drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
-			   else
-				   drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
-		   }
+		mgr = &aconnector->mst_mgr;
+
+		if (suspend) {
+			drm_dp_mst_topology_mgr_suspend(mgr);
+		} else {
+			ret = drm_dp_mst_topology_mgr_resume(mgr);
+			if (ret < 0) {
+				drm_dp_mst_topology_mgr_set_mst(mgr, false);
+				need_hotplug = true;
+			}
+		}
 	}
 
 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+	if (need_hotplug)
+		drm_kms_helper_hotplug_event(dev);
 }
 
 static int dm_hw_init(void *handle)
@@ -610,12 +624,13 @@
 	struct amdgpu_display_manager *dm = &adev->dm;
 	int ret = 0;
 
+	WARN_ON(adev->dm.cached_state);
+	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
 	s3_handle_mst(adev->ddev, true);
 
 	amdgpu_dm_irq_suspend(adev);
 
-	WARN_ON(adev->dm.cached_state);
-	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
 
 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
@@ -736,7 +751,6 @@
 	struct drm_plane_state *new_plane_state;
 	struct dm_plane_state *dm_new_plane_state;
 	enum dc_connection_type new_connection_type = dc_connection_none;
-	int ret;
 	int i;
 
 	/* power on hardware */
@@ -809,13 +823,13 @@
 		}
 	}
 
-	ret = drm_atomic_helper_resume(ddev, dm->cached_state);
+	drm_atomic_helper_resume(ddev, dm->cached_state);
 
 	dm->cached_state = NULL;
 
 	amdgpu_dm_irq_resume_late(adev);
 
-	return ret;
+	return 0;
 }
 
 static const struct amd_ip_funcs amdgpu_dm_funcs = {
@@ -2894,6 +2908,7 @@
 		state->underscan_enable = false;
 		state->underscan_hborder = 0;
 		state->underscan_vborder = 0;
+		state->max_bpc = 8;
 
 		__drm_atomic_helper_connector_reset(connector, &state->base);
 	}
@@ -2911,6 +2926,7 @@
 	if (new_state) {
 		__drm_atomic_helper_connector_duplicate_state(connector,
 							      &new_state->base);
+		new_state->max_bpc = state->max_bpc;
 		return &new_state->base;
 	}
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 9bfb040..6a6d977 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -60,6 +60,11 @@
 		return -EINVAL;
 	}
 
+	if (!stream_state) {
+		DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
+		return -EINVAL;
+	}
+
 	/* When enabling CRC, we should also disable dithering. */
 	if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
 		if (dc_stream_configure_crc(stream_state->ctx->dc,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fced3c1..23a7ef9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -324,7 +324,7 @@
 {
 	enum gpio_result gpio_result;
 	uint32_t clock_pin = 0;
-
+	uint8_t retry = 0;
 	struct ddc *ddc;
 
 	enum connector_id connector_id =
@@ -353,11 +353,22 @@
 		return present;
 	}
 
-	/* Read GPIO: DP sink is present if both clock and data pins are zero */
-	/* [anaumov] in DAL2, there was no check for GPIO failure */
-
-	gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
-	ASSERT(gpio_result == GPIO_RESULT_OK);
+	/*
+	 * Read GPIO: DP sink is present if both clock and data pins are zero
+	 *
+	 * [W/A] plug-unplug DP cable, sometimes customer board has
+	 * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
+	 * then monitor can't br light up. Add retry 3 times
+	 * But in real passive dongle, it need additional 3ms to detect
+	 */
+	do {
+		gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+		ASSERT(gpio_result == GPIO_RESULT_OK);
+		if (clock_pin)
+			udelay(1000);
+		else
+			break;
+	} while (retry++ < 3);
 
 	present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
 
@@ -2457,11 +2468,11 @@
 {
 	struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
 
+	core_dc->hwss.blank_stream(pipe_ctx);
+
 	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
 		deallocate_mst_payload(pipe_ctx);
 
-	core_dc->hwss.blank_stream(pipe_ctx);
-
 	core_dc->hwss.disable_stream(pipe_ctx, option);
 
 	disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index a7553b6..05840f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2240,7 +2240,8 @@
 					translate_dpcd_max_bpc(
 						hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
 
-				link->dpcd_caps.dongle_caps.extendedCapValid = true;
+				if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
+					link->dpcd_caps.dongle_caps.extendedCapValid = true;
 			}
 
 			break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ea6becc..87bf422 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1917,6 +1917,8 @@
 		}
 	*/
 
+	calculate_phy_pix_clks(stream);
+
 	/* acquire new resources */
 	pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 0941f3c..53ccacf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1000,7 +1000,7 @@
 
 		pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
 
-		if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+		if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
 			/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
 			pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
 		/* un-mute audio */
@@ -1017,6 +1017,8 @@
 	pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
 			pipe_ctx->stream_res.stream_enc, true);
 	if (pipe_ctx->stream_res.audio) {
+		struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+
 		if (option != KEEP_ACQUIRED_RESOURCE ||
 				!dc->debug.az_endpoint_mute_only) {
 			/*only disalbe az_endpoint if power down or free*/
@@ -1036,6 +1038,9 @@
 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
 			pipe_ctx->stream_res.audio = NULL;
 		}
+		if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+			/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+			pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
 
 		/* TODO: notify audio driver for if audio modes list changed
 		 * add audio mode list change flag */
@@ -1268,10 +1273,19 @@
 		pipe_ctx->plane_res.scl_data.lb_params.depth,
 		&pipe_ctx->stream->bit_depth_params);
 
-	if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
+	if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) {
+		/*
+		 * The way 420 is packed, 2 channels carry Y component, 1 channel
+		 * alternate between Cb and Cr, so both channels need the pixel
+		 * value for Y
+		 */
+		if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+			color.color_r_cr = color.color_g_y;
+
 		pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
 				pipe_ctx->stream_res.tg,
 				&color);
+	}
 
 	pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
 		&pipe_ctx->plane_res.scl_data);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index cfcc54f..4058b59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1190,7 +1190,8 @@
 		tf = plane_state->in_transfer_func;
 
 	if (plane_state->gamma_correction &&
-		!plane_state->gamma_correction->is_identity
+		!dpp_base->ctx->dc->debug.always_use_regamma
+		&& !plane_state->gamma_correction->is_identity
 			&& dce_use_lut(plane_state->format))
 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
 
@@ -2120,6 +2121,15 @@
 	color_space = stream->output_color_space;
 	color_space_to_black_color(dc, color_space, &black_color);
 
+	/*
+	 * The way 420 is packed, 2 channels carry Y component, 1 channel
+	 * alternate between Cb and Cr, so both channels need the pixel
+	 * value for Y
+	 */
+	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+		black_color.color_r_cr = black_color.color_g_y;
+
+
 	if (stream_res->tg->funcs->set_blank_color)
 		stream_res->tg->funcs->set_blank_color(
 				stream_res->tg,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index a63e006..1546bc4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -984,6 +984,7 @@
 		break;
 	case amd_pp_dpp_clock:
 		pclk_vol_table = pinfo->vdd_dep_on_dppclk;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 16b1a9c..743d3c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -32,6 +32,7 @@
 #include "vega10_pptable.h"
 
 #define NUM_DSPCLK_LEVELS 8
+#define VEGA10_ENGINECLOCK_HARDMAX 198000
 
 static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
 		enum phm_platform_caps cap)
@@ -258,7 +259,26 @@
 		struct pp_hwmgr *hwmgr,
 		const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
 {
-	hwmgr->platform_descriptor.overdriveLimit.engineClock =
+	const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
+			(const ATOM_Vega10_GFXCLK_Dependency_Table *)
+			(((unsigned long) powerplay_table) +
+			le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
+	bool is_acg_enabled = false;
+	ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
+
+	if (gfxclk_dep_table->ucRevId == 1) {
+		patom_record_v2 =
+			(ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
+		is_acg_enabled =
+			(bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
+	}
+
+	if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
+		!is_acg_enabled)
+		hwmgr->platform_descriptor.overdriveLimit.engineClock =
+			VEGA10_ENGINECLOCK_HARDMAX;
+	else
+		hwmgr->platform_descriptor.overdriveLimit.engineClock =
 			le32_to_cpu(powerplay_table->ulMaxODEngineClock);
 	hwmgr->platform_descriptor.overdriveLimit.memoryClock =
 			le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5b67f57..45629f2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1528,8 +1528,21 @@
 	efuse = efuse >> 24;
 
 	if (hwmgr->chip_id == CHIP_POLARIS10) {
-		min = 1000;
-		max = 2300;
+		if (hwmgr->is_kicker) {
+			min = 1200;
+			max = 2500;
+		} else {
+			min = 1000;
+			max = 2300;
+		}
+	} else if (hwmgr->chip_id == CHIP_POLARIS11) {
+		if (hwmgr->is_kicker) {
+			min = 900;
+			max = 2100;
+		} else {
+			min = 1100;
+			max = 2100;
+		}
 	} else {
 		min = 1100;
 		max = 2100;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8e28e73..3915473 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -98,6 +98,8 @@
 #define DP0_STARTVAL		0x064c
 #define DP0_ACTIVEVAL		0x0650
 #define DP0_SYNCVAL		0x0654
+#define SYNCVAL_HS_POL_ACTIVE_LOW	(1 << 15)
+#define SYNCVAL_VS_POL_ACTIVE_LOW	(1 << 31)
 #define DP0_MISC		0x0658
 #define TU_SIZE_RECOMMENDED		(63) /* LSCLK cycles per TU */
 #define BPC_6				(0 << 5)
@@ -142,6 +144,8 @@
 #define DP0_LTLOOPCTRL		0x06d8
 #define DP0_SNKLTCTRL		0x06e4
 
+#define DP1_SRCCTRL		0x07a0
+
 /* PHY */
 #define DP_PHY_CTRL		0x0800
 #define DP_PHY_RST			BIT(28)  /* DP PHY Global Soft Reset */
@@ -150,6 +154,7 @@
 #define PHY_M1_RST			BIT(12)  /* Reset PHY1 Main Channel */
 #define PHY_RDY				BIT(16)  /* PHY Main Channels Ready */
 #define PHY_M0_RST			BIT(8)   /* Reset PHY0 Main Channel */
+#define PHY_2LANE			BIT(2)   /* PHY Enable 2 lanes */
 #define PHY_A0_EN			BIT(1)   /* PHY Aux Channel0 Enable */
 #define PHY_M0_EN			BIT(0)   /* PHY Main Channel0 Enable */
 
@@ -540,6 +545,7 @@
 	unsigned long rate;
 	u32 value;
 	int ret;
+	u32 dp_phy_ctrl;
 
 	rate = clk_get_rate(tc->refclk);
 	switch (rate) {
@@ -564,7 +570,10 @@
 	value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
 	tc_write(SYS_PLLPARAM, value);
 
-	tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
+	dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
+	if (tc->link.base.num_lanes == 2)
+		dp_phy_ctrl |= PHY_2LANE;
+	tc_write(DP_PHY_CTRL, dp_phy_ctrl);
 
 	/*
 	 * Initially PLLs are in bypass. Force PLL parameter update,
@@ -719,7 +728,9 @@
 
 	tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
 
-	tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
+	tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
+		 ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
+		 ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
 
 	tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
 		 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
@@ -829,12 +840,11 @@
 	if (!tc->mode)
 		return -EINVAL;
 
-	/* from excel file - DP0_SrcCtrl */
-	tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
-		 DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
-		 DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
-	/* from excel file - DP1_SrcCtrl */
-	tc_write(0x07a0, 0x00003083);
+	tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
+	/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
+	tc_write(DP1_SRCCTRL,
+		 (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
+		 ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
 
 	rate = clk_get_rate(tc->refclk);
 	switch (rate) {
@@ -855,8 +865,11 @@
 	}
 	value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
 	tc_write(SYS_PLLPARAM, value);
+
 	/* Setup Main Link */
-	dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN |  PHY_M0_EN;
+	dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
+	if (tc->link.base.num_lanes == 2)
+		dp_phy_ctrl |= PHY_2LANE;
 	tc_write(DP_PHY_CTRL, dp_phy_ctrl);
 	msleep(100);
 
@@ -1105,10 +1118,20 @@
 static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
 				   struct drm_display_mode *mode)
 {
+	struct tc_data *tc = connector_to_tc(connector);
+	u32 req, avail;
+	u32 bits_per_pixel = 24;
+
 	/* DPI interface clock limitation: upto 154 MHz */
 	if (mode->clock > 154000)
 		return MODE_CLOCK_HIGH;
 
+	req = mode->clock * bits_per_pixel / 8;
+	avail = tc->link.base.num_lanes * tc->link.base.rate;
+
+	if (req > avail)
+		return MODE_BAD;
+
 	return MODE_OK;
 }
 
@@ -1195,6 +1218,10 @@
 
 	drm_display_info_set_bus_formats(&tc->connector.display_info,
 					 &bus_format, 1);
+	tc->connector.display_info.bus_flags =
+		DRM_BUS_FLAG_DE_HIGH |
+		DRM_BUS_FLAG_PIXDATA_NEGEDGE |
+		DRM_BUS_FLAG_SYNC_NEGEDGE;
 	drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 1bb4c31..94f5c364 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1425,6 +1425,9 @@
 			DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
 				  crtc->base.id, crtc->name);
 	}
+
+	if (old_state->fake_commit)
+		complete_all(&old_state->fake_commit->flip_done);
 }
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
 
@@ -1561,6 +1564,24 @@
 	    old_plane_state->crtc != new_plane_state->crtc)
 		return -EINVAL;
 
+	/*
+	 * FIXME: Since prepare_fb and cleanup_fb are always called on
+	 * the new_plane_state for async updates we need to block framebuffer
+	 * changes. This prevents use of a fb that's been cleaned up and
+	 * double cleanups from occuring.
+	 */
+	if (old_plane_state->fb != new_plane_state->fb)
+		return -EINVAL;
+
+	/*
+	 * FIXME: Since prepare_fb and cleanup_fb are always called on
+	 * the new_plane_state for async updates we need to block framebuffer
+	 * changes. This prevents use of a fb that's been cleaned up and
+	 * double cleanups from occuring.
+	 */
+	if (old_plane_state->fb != new_plane_state->fb)
+		return -EINVAL;
+
 	funcs = plane->helper_private;
 	if (!funcs->atomic_async_update)
 		return -EINVAL;
@@ -3189,7 +3210,7 @@
 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
 					      struct drm_modeset_acquire_ctx *ctx)
 {
-	int i;
+	int i, ret;
 	struct drm_plane *plane;
 	struct drm_plane_state *new_plane_state;
 	struct drm_connector *connector;
@@ -3208,7 +3229,11 @@
 	for_each_new_connector_in_state(state, connector, new_conn_state, i)
 		state->connectors[i].old_state = connector->state;
 
-	return drm_atomic_commit(state);
+	ret = drm_atomic_commit(state);
+
+	state->acquire_ctx = NULL;
+
+	return ret;
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
 
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index ba8cfe6..e2f775d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -36,6 +36,8 @@
 #include <drm/drmP.h>
 #include "drm_legacy.h"
 
+#include <linux/nospec.h>
+
 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
 						  struct drm_local_map *map)
 {
@@ -1417,6 +1419,7 @@
 				  idx, dma->buf_count - 1);
 			return -EINVAL;
 		}
+		idx = array_index_nospec(idx, dma->buf_count);
 		buf = dma->buflist[idx];
 		if (buf->file_priv != file_priv) {
 			DRM_ERROR("Process %d freeing buffer not owned\n",
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index d708472..6794d60 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -51,10 +51,6 @@
 				     int id,
 				     struct drm_dp_payload *payload);
 
-static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_port *port,
-				  int offset, int size, u8 *bytes);
-
 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 				     struct drm_dp_mst_branch *mstb);
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -439,6 +435,7 @@
 	if (idx > raw->curlen)
 		goto fail_len;
 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+	idx++;
 	if (idx > raw->curlen)
 		goto fail_len;
 
@@ -1402,7 +1399,6 @@
 	return false;
 }
 
-#if 0
 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
 {
 	struct drm_dp_sideband_msg_req_body req;
@@ -1415,7 +1411,6 @@
 
 	return 0;
 }
-#endif
 
 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
 				    bool up, u8 *msg, int len)
@@ -1981,30 +1976,65 @@
 }
 EXPORT_SYMBOL(drm_dp_update_payload_part2);
 
-#if 0 /* unused as of yet */
-static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port,
-				 int offset, int size)
+				 int offset, int size, u8 *bytes)
 {
 	int len;
+	int ret;
 	struct drm_dp_sideband_msg_tx *txmsg;
+	struct drm_dp_mst_branch *mstb;
+
+	memset(bytes, 0, size);
+
+	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+	if (!mstb)
+		return -EINVAL;
 
 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
-	if (!txmsg)
-		return -ENOMEM;
+	if (!txmsg) {
+		ret = -ENOMEM;
+		goto fail_put;
+	}
 
-	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
-	txmsg->dst = port->parent;
+	len = build_dpcd_read(txmsg, port->port_num, offset, size);
+	txmsg->dst = mstb;
 
 	drm_dp_queue_down_tx(mgr, txmsg);
+	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+	if (ret <= 0) {
+		DRM_ERROR("dpcd read failed\n");
+		goto fail_free_msg;
+	}
 
-	return 0;
+	if (txmsg->reply.reply_type == 1) {
+		DRM_ERROR("dpcd read nack received\n");
+		ret = -EINVAL;
+		goto fail_free_msg;
+	}
+
+	if (port->port_num != txmsg->reply.u.remote_dpcd_read_ack.port_number) {
+		DRM_ERROR("got incorrect port in response\n");
+		ret = -EINVAL;
+		goto fail_free_msg;
+	}
+
+	if (size > txmsg->reply.u.remote_dpcd_read_ack.num_bytes)
+		size = txmsg->reply.u.remote_dpcd_read_ack.num_bytes;
+
+	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, size);
+
+fail_free_msg:
+	kfree(txmsg);
+fail_put:
+	drm_dp_put_mst_branch_device(mstb);
+	return ret;
 }
-#endif
+EXPORT_SYMBOL(drm_dp_send_dpcd_read);
 
-static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_port *port,
-				  int offset, int size, u8 *bytes)
+int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+			   struct drm_dp_mst_port *port,
+			   int offset, int size, u8 *bytes)
 {
 	int len;
 	int ret;
@@ -2038,6 +2068,7 @@
 	drm_dp_put_mst_branch_device(mstb);
 	return ret;
 }
+EXPORT_SYMBOL(drm_dp_send_dpcd_write);
 
 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
 {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 9214c8b..1bda809 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -56,6 +56,25 @@
 		 "Overallocation of the fbdev buffer (%) [default="
 		 __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]");
 
+/*
+ * In order to keep user-space compatibility, we want in certain use-cases
+ * to keep leaking the fbdev physical address to the user-space program
+ * handling the fbdev buffer.
+ * This is a bad habit essentially kept into closed source opengl driver
+ * that should really be moved into open-source upstream projects instead
+ * of using legacy physical addresses in user space to communicate with
+ * other out-of-tree kernel modules.
+ *
+ * This module_param *should* be removed as soon as possible and be
+ * considered as a broken and legacy behaviour from a modern fbdev device.
+ */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+static bool drm_leak_fbdev_smem = false;
+module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
+MODULE_PARM_DESC(fbdev_emulation,
+		 "Allow unsafe leaking fbdev physical smem address [default=false]");
+#endif
+
 static LIST_HEAD(kernel_fb_helper_list);
 static DEFINE_MUTEX(kernel_fb_helper_lock);
 
@@ -1602,6 +1621,64 @@
 	       var_1->transp.msb_right == var_2->transp.msb_right;
 }
 
+static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
+					 u8 depth)
+{
+	switch (depth) {
+	case 8:
+		var->red.offset = 0;
+		var->green.offset = 0;
+		var->blue.offset = 0;
+		var->red.length = 8; /* 8bit DAC */
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		break;
+	case 15:
+		var->red.offset = 10;
+		var->green.offset = 5;
+		var->blue.offset = 0;
+		var->red.length = 5;
+		var->green.length = 5;
+		var->blue.length = 5;
+		var->transp.offset = 15;
+		var->transp.length = 1;
+		break;
+	case 16:
+		var->red.offset = 11;
+		var->green.offset = 5;
+		var->blue.offset = 0;
+		var->red.length = 5;
+		var->green.length = 6;
+		var->blue.length = 5;
+		var->transp.offset = 0;
+		break;
+	case 24:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		break;
+	case 32:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.offset = 24;
+		var->transp.length = 8;
+		break;
+	default:
+		break;
+	}
+}
+
 /**
  * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
  * @var: screeninfo to check
@@ -1613,9 +1690,14 @@
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_framebuffer *fb = fb_helper->fb;
 
-	if (var->pixclock != 0 || in_dbg_master())
+	if (in_dbg_master())
 		return -EINVAL;
 
+	if (var->pixclock != 0) {
+		DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
+		var->pixclock = 0;
+	}
+
 	/*
 	 * Changes struct fb_var_screeninfo are currently not pushed back
 	 * to KMS, hence fail if different settings are requested.
@@ -1632,6 +1714,20 @@
 	}
 
 	/*
+	 * Workaround for SDL 1.2, which is known to be setting all pixel format
+	 * fields values to zero in some cases. We treat this situation as a
+	 * kind of "use some reasonable autodetected values".
+	 */
+	if (!var->red.offset     && !var->green.offset    &&
+	    !var->blue.offset    && !var->transp.offset   &&
+	    !var->red.length     && !var->green.length    &&
+	    !var->blue.length    && !var->transp.length   &&
+	    !var->red.msb_right  && !var->green.msb_right &&
+	    !var->blue.msb_right && !var->transp.msb_right) {
+		drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
+	}
+
+	/*
 	 * drm fbdev emulation doesn't support changing the pixel format at all,
 	 * so reject all pixel format changing requests.
 	 */
@@ -1942,59 +2038,7 @@
 	info->var.yoffset = 0;
 	info->var.activate = FB_ACTIVATE_NOW;
 
-	switch (fb->format->depth) {
-	case 8:
-		info->var.red.offset = 0;
-		info->var.green.offset = 0;
-		info->var.blue.offset = 0;
-		info->var.red.length = 8; /* 8bit DAC */
-		info->var.green.length = 8;
-		info->var.blue.length = 8;
-		info->var.transp.offset = 0;
-		info->var.transp.length = 0;
-		break;
-	case 15:
-		info->var.red.offset = 10;
-		info->var.green.offset = 5;
-		info->var.blue.offset = 0;
-		info->var.red.length = 5;
-		info->var.green.length = 5;
-		info->var.blue.length = 5;
-		info->var.transp.offset = 15;
-		info->var.transp.length = 1;
-		break;
-	case 16:
-		info->var.red.offset = 11;
-		info->var.green.offset = 5;
-		info->var.blue.offset = 0;
-		info->var.red.length = 5;
-		info->var.green.length = 6;
-		info->var.blue.length = 5;
-		info->var.transp.offset = 0;
-		break;
-	case 24:
-		info->var.red.offset = 16;
-		info->var.green.offset = 8;
-		info->var.blue.offset = 0;
-		info->var.red.length = 8;
-		info->var.green.length = 8;
-		info->var.blue.length = 8;
-		info->var.transp.offset = 0;
-		info->var.transp.length = 0;
-		break;
-	case 32:
-		info->var.red.offset = 16;
-		info->var.green.offset = 8;
-		info->var.blue.offset = 0;
-		info->var.red.length = 8;
-		info->var.green.length = 8;
-		info->var.blue.length = 8;
-		info->var.transp.offset = 24;
-		info->var.transp.length = 8;
-		break;
-	default:
-		break;
-	}
+	drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
 
 	info->var.xres = fb_width;
 	info->var.yres = fb_height;
@@ -3041,6 +3085,12 @@
 	fbi->screen_size = fb->height * fb->pitches[0];
 	fbi->fix.smem_len = fbi->screen_size;
 	fbi->screen_buffer = buffer->vaddr;
+	/* Shamelessly leak the physical address to user-space */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+	if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
+		fbi->fix.smem_start =
+			page_to_phys(virt_to_page(fbi->screen_buffer));
+#endif
 	strcpy(fbi->fix.id, "DRM emulated");
 
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ea10e9a..ba129b6 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -37,6 +37,7 @@
 
 #include <linux/pci.h>
 #include <linux/export.h>
+#include <linux/nospec.h>
 
 /**
  * DOC: getunique and setversion story
@@ -794,13 +795,17 @@
 
 	if (is_driver_ioctl) {
 		/* driver ioctl */
-		if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+		unsigned int index = nr - DRM_COMMAND_BASE;
+
+		if (index >= dev->driver->num_ioctls)
 			goto err_i1;
-		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+		index = array_index_nospec(index, dev->driver->num_ioctls);
+		ioctl = &dev->driver->ioctls[index];
 	} else {
 		/* core ioctl */
 		if (nr >= DRM_CORE_IOCTL_COUNT)
 			goto err_i1;
+		nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
 		ioctl = &drm_ioctls[nr];
 	}
 
@@ -882,6 +887,7 @@
 
 	if (nr >= DRM_CORE_IOCTL_COUNT)
 		return false;
+	nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
 
 	*flags = drm_ioctls[nr].flags;
 	return true;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index fe6bfaf..086f2ad 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -521,7 +521,8 @@
 
 	object_count = cl->object_count;
 
-	object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
+	object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+			array_size(object_count, sizeof(__u32)));
 	if (IS_ERR(object_ids))
 		return PTR_ERR(object_ids);
 
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 02db9ac..a3104d7 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -758,7 +758,7 @@
 	if (mode->hsync)
 		return mode->hsync;
 
-	if (mode->htotal < 0)
+	if (mode->htotal <= 0)
 		return 0;
 
 	calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 9ad89e3..12e4203 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -996,7 +996,7 @@
 {
 	unsigned int index;
 	u64 virtaddr;
-	unsigned long req_size, pgoff = 0;
+	unsigned long req_size, pgoff, req_start;
 	pgprot_t pg_prot;
 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
 
@@ -1014,7 +1014,17 @@
 	pg_prot = vma->vm_page_prot;
 	virtaddr = vma->vm_start;
 	req_size = vma->vm_end - vma->vm_start;
-	pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+	pgoff = vma->vm_pgoff &
+		((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+	req_start = pgoff << PAGE_SHIFT;
+
+	if (!intel_vgpu_in_aperture(vgpu, req_start))
+		return -EINVAL;
+	if (req_start + req_size >
+	    vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
+		return -EINVAL;
+
+	pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
 
 	return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 47cc932..280c851 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1821,6 +1821,16 @@
 	return 0;
 }
 
+static inline bool
+__vma_matches(struct vm_area_struct *vma, struct file *filp,
+	      unsigned long addr, unsigned long size)
+{
+	if (vma->vm_file != filp)
+		return false;
+
+	return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
+}
+
 /**
  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
  *			 it is mapped to.
@@ -1879,7 +1889,7 @@
 			return -EINTR;
 		}
 		vma = find_vma(mm, addr);
-		if (vma)
+		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
 			vma->vm_page_prot =
 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 		else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5f57f4e..87411a5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2128,6 +2128,7 @@
 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
 {
 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+	int err;
 
 	/*
 	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2143,9 +2144,17 @@
 	 * allocator works in address space sizes, so it's multiplied by page
 	 * size. We allocate at the top of the GTT to avoid fragmentation.
 	 */
-	return i915_vma_pin(ppgtt->vma,
-			    0, GEN6_PD_ALIGN,
-			    PIN_GLOBAL | PIN_HIGH);
+	err = i915_vma_pin(ppgtt->vma,
+			   0, GEN6_PD_ALIGN,
+			   PIN_GLOBAL | PIN_HIGH);
+	if (err)
+		goto unpin;
+
+	return 0;
+
+unpin:
+	ppgtt->pin_count = 0;
+	return err;
 }
 
 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index c9af348..b4b1f9c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1085,7 +1085,7 @@
 			return DDI_CLK_SEL_TBT_810;
 		default:
 			MISSING_CASE(clock);
-			break;
+			return DDI_CLK_SEL_NONE;
 		}
 	case DPLL_ID_ICL_MGPLL1:
 	case DPLL_ID_ICL_MGPLL2:
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8fc61e9..50d5649 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,16 @@
 	unsigned long vma_flags;
 	async_cookie_t cookie;
 	int preferred_bpp;
+
+	/* Whether or not fbdev hpd processing is temporarily suspended */
+	bool hpd_suspended : 1;
+	/* Set when a hotplug was received while HPD processing was
+	 * suspended
+	 */
+	bool hpd_waiting : 1;
+
+	/* Protects hpd_suspended */
+	struct mutex hpd_lock;
 };
 
 struct intel_encoder {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb2f9fc..6f91634 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -334,8 +334,8 @@
 				    bool *enabled, int width, int height)
 {
 	struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
-	unsigned long conn_configured, conn_seq, mask;
 	unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+	unsigned long conn_configured, conn_seq;
 	int i, j;
 	bool *save_enabled;
 	bool fallback = true, ret = true;
@@ -353,10 +353,9 @@
 		drm_modeset_backoff(&ctx);
 
 	memcpy(save_enabled, enabled, count);
-	mask = GENMASK(count - 1, 0);
+	conn_seq = GENMASK(count - 1, 0);
 	conn_configured = 0;
 retry:
-	conn_seq = conn_configured;
 	for (i = 0; i < count; i++) {
 		struct drm_fb_helper_connector *fb_conn;
 		struct drm_connector *connector;
@@ -369,7 +368,8 @@
 		if (conn_configured & BIT(i))
 			continue;
 
-		if (conn_seq == 0 && !connector->has_tile)
+		/* First pass, only consider tiled connectors */
+		if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
 			continue;
 
 		if (connector->status == connector_status_connected)
@@ -473,8 +473,10 @@
 		conn_configured |= BIT(i);
 	}
 
-	if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+	if (conn_configured != conn_seq) { /* repeat until no more are found */
+		conn_seq = conn_configured;
 		goto retry;
+	}
 
 	/*
 	 * If the BIOS didn't enable everything it could, fall back to have the
@@ -677,6 +679,7 @@
 	if (ifbdev == NULL)
 		return -ENOMEM;
 
+	mutex_init(&ifbdev->hpd_lock);
 	drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
 
 	if (!intel_fbdev_init_bios(dev, ifbdev))
@@ -750,6 +753,26 @@
 	intel_fbdev_destroy(ifbdev);
 }
 
+/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
+ * processing, fbdev will perform a full connector reprobe if a hotplug event
+ * was received while HPD was suspended.
+ */
+static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+{
+	bool send_hpd = false;
+
+	mutex_lock(&ifbdev->hpd_lock);
+	ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
+	send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
+	ifbdev->hpd_waiting = false;
+	mutex_unlock(&ifbdev->hpd_lock);
+
+	if (send_hpd) {
+		DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+		drm_fb_helper_hotplug_event(&ifbdev->helper);
+	}
+}
+
 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -771,6 +794,7 @@
 		 */
 		if (state != FBINFO_STATE_RUNNING)
 			flush_work(&dev_priv->fbdev_suspend_work);
+
 		console_lock();
 	} else {
 		/*
@@ -798,17 +822,26 @@
 
 	drm_fb_helper_set_suspend(&ifbdev->helper, state);
 	console_unlock();
+
+	intel_fbdev_hpd_set_suspend(ifbdev, state);
 }
 
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
 	struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+	bool send_hpd;
 
 	if (!ifbdev)
 		return;
 
 	intel_fbdev_sync(ifbdev);
-	if (ifbdev->vma || ifbdev->helper.deferred_setup)
+
+	mutex_lock(&ifbdev->hpd_lock);
+	send_hpd = !ifbdev->hpd_suspended;
+	ifbdev->hpd_waiting = true;
+	mutex_unlock(&ifbdev->hpd_lock);
+
+	if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
 		drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 191b314..709475d 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -45,7 +45,6 @@
 	struct drm_crtc base;
 	struct drm_pending_vblank_event *event;
 	struct meson_drm *priv;
-	bool enabled;
 };
 #define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
 
@@ -81,7 +80,8 @@
 
 };
 
-static void meson_crtc_enable(struct drm_crtc *crtc)
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_state)
 {
 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
 	struct drm_crtc_state *crtc_state = crtc->state;
@@ -103,20 +103,6 @@
 
 	drm_crtc_vblank_on(crtc);
 
-	meson_crtc->enabled = true;
-}
-
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
-				     struct drm_crtc_state *old_state)
-{
-	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
-	struct meson_drm *priv = meson_crtc->priv;
-
-	DRM_DEBUG_DRIVER("\n");
-
-	if (!meson_crtc->enabled)
-		meson_crtc_enable(crtc);
-
 	priv->viu.osd1_enabled = true;
 }
 
@@ -142,8 +128,6 @@
 
 		crtc->state->event = NULL;
 	}
-
-	meson_crtc->enabled = false;
 }
 
 static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -152,9 +136,6 @@
 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
 	unsigned long flags;
 
-	if (crtc->state->enable && !meson_crtc->enabled)
-		meson_crtc_enable(crtc);
-
 	if (crtc->state->event) {
 		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index d344312..611ac34 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -82,6 +82,10 @@
 	.fb_create           = drm_gem_fb_create,
 };
 
+static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
+	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
 static irqreturn_t meson_irq(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
@@ -246,6 +250,7 @@
 	drm->mode_config.max_width = 3840;
 	drm->mode_config.max_height = 2160;
 	drm->mode_config.funcs = &meson_mode_config_funcs;
+	drm->mode_config.helper_private	= &meson_mode_config_helpers;
 
 	/* Hardware Initialization */
 
@@ -363,8 +368,10 @@
 		remote_node = of_graph_get_remote_port_parent(ep);
 		if (!remote_node ||
 		    remote_node == parent || /* Ignore parent endpoint */
-		    !of_device_is_available(remote_node))
+		    !of_device_is_available(remote_node)) {
+			of_node_put(remote_node);
 			continue;
+		}
 
 		count += meson_probe_remote(pdev, match, remote, remote_node);
 
@@ -383,10 +390,13 @@
 
 	for_each_endpoint_of_node(np, ep) {
 		remote = of_graph_get_remote_port_parent(ep);
-		if (!remote || !of_device_is_available(remote))
+		if (!remote || !of_device_is_available(remote)) {
+			of_node_put(remote);
 			continue;
+		}
 
 		count += meson_probe_remote(pdev, &match, np, remote);
+		of_node_put(remote);
 	}
 
 	if (count && !match)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 94093bc..578c43d 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -15,6 +15,7 @@
 	select WANT_DEV_COREDUMP
 	select SND_SOC_HDMI_CODEC if SND_SOC
 	select SYNC_FILE
+	select MSM_EXT_DISPLAY
 	select PM_OPP
 	default y
 	help
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index bbb8126..9acb9df 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -896,7 +896,7 @@
 	np = dev_pm_opp_get_of_node(opp);
 
 	if (np) {
-		of_property_read_u32(np, "qcom,level", &val);
+		of_property_read_u32(np, "opp-level", &val);
 		of_node_put(np);
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
new file mode 100644
index 0000000..879c13f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_trace.h"
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg:		private data of callback handler
+ * @irq_idx:		interrupt index
+ */
+static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
+{
+	struct dpu_kms *dpu_kms = arg;
+	struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
+	struct dpu_irq_callback *cb;
+	unsigned long irq_flags;
+
+	pr_debug("irq_idx=%d\n", irq_idx);
+
+	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+		DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
+			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
+	}
+
+	atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+	/*
+	 * Perform registered function callback
+	 */
+	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+		if (cb->func)
+			cb->func(cb->arg, irq_idx);
+	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+	/*
+	 * Clear pending interrupt status in HW.
+	 * NOTE: dpu_core_irq_callback_handler is protected by top-level
+	 *       spinlock, so it is safe to clear any interrupt status here.
+	 */
+	dpu_kms->hw_intr->ops.clear_intr_status_nolock(
+			dpu_kms->hw_intr,
+			irq_idx);
+}
+
+int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
+		enum dpu_intr_type intr_type, u32 instance_idx)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.irq_idx_lookup)
+		return -EINVAL;
+
+	return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+			instance_idx);
+}
+
+/**
+ * _dpu_core_irq_enable - enable core interrupt given by the index
+ * @dpu_kms:		Pointer to dpu kms context
+ * @irq_idx:		interrupt index
+ */
+static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+	unsigned long irq_flags;
+	int ret = 0, enable_count;
+
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->irq_obj.enable_counts ||
+			!dpu_kms->irq_obj.irq_counts) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+	trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
+
+	if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+		ret = dpu_kms->hw_intr->ops.enable_irq(
+				dpu_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+					irq_idx);
+
+		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+		spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+		/* empty callback list but interrupt is enabled */
+		if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
+			DPU_ERROR("irq_idx=%d enabled with no callback\n",
+					irq_idx);
+		spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	}
+
+	return ret;
+}
+
+int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+	int i, ret = 0, counts;
+
+	if (!dpu_kms || !irq_idxs || !irq_count) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+	if (counts)
+		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
+
+	return ret;
+}
+
+/**
+ * _dpu_core_irq_disable - disable core interrupt given by the index
+ * @dpu_kms:		Pointer to dpu kms context
+ * @irq_idx:		interrupt index
+ */
+static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+	int ret = 0, enable_count;
+
+	if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+	trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
+
+	if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+		ret = dpu_kms->hw_intr->ops.disable_irq(
+				dpu_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+					irq_idx);
+		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+	}
+
+	return ret;
+}
+
+int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+	int i, ret = 0, counts;
+
+	if (!dpu_kms || !irq_idxs || !irq_count) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+	if (counts == 2)
+		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
+
+	return ret;
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.get_interrupt_status)
+		return 0;
+
+	if (irq_idx < 0) {
+		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+				__builtin_return_address(0), irq_idx);
+		return 0;
+	}
+
+	return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
+			irq_idx, clear);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+		struct dpu_irq_callback *register_irq_cb)
+{
+	unsigned long irq_flags;
+
+	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!register_irq_cb || !register_irq_cb->func) {
+		DPU_ERROR("invalid irq_cb:%d func:%d\n",
+				register_irq_cb != NULL,
+				register_irq_cb ?
+					register_irq_cb->func != NULL : -1);
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	list_add_tail(&register_irq_cb->list,
+			&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+	return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+		struct dpu_irq_callback *register_irq_cb)
+{
+	unsigned long irq_flags;
+
+	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!register_irq_cb || !register_irq_cb->func) {
+		DPU_ERROR("invalid irq_cb:%d func:%d\n",
+				register_irq_cb != NULL,
+				register_irq_cb ?
+					register_irq_cb->func != NULL : -1);
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	/* empty callback list but interrupt is still enabled */
+	if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
+		DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+	return 0;
+}
+
+static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.clear_all_irqs)
+		return;
+
+	dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.disable_all_irqs)
+		return;
+
+	dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+	struct dpu_irq *irq_obj = s->private;
+	struct dpu_irq_callback *cb;
+	unsigned long irq_flags;
+	int i, irq_count, enable_count, cb_count;
+
+	if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+		DPU_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	for (i = 0; i < irq_obj->total_irqs; i++) {
+		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+		cb_count = 0;
+		irq_count = atomic_read(&irq_obj->irq_counts[i]);
+		enable_count = atomic_read(&irq_obj->enable_counts[i]);
+		list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+			cb_count++;
+		spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+		if (irq_count || enable_count || cb_count)
+			seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+					i, irq_count, enable_count, cb_count);
+	}
+
+	return 0;
+}
+
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent)
+{
+	dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
+			parent, &dpu_kms->irq_obj,
+			&dpu_debugfs_core_irq_fops);
+
+	return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+	debugfs_remove(dpu_kms->irq_obj.debugfs_file);
+	dpu_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent)
+{
+	return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	} else if (!dpu_kms->dev) {
+		DPU_ERROR("invalid drm device\n");
+		return;
+	} else if (!dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid device private\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	dpu_clear_all_irqs(dpu_kms);
+	dpu_disable_all_irqs(dpu_kms);
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	spin_lock_init(&dpu_kms->irq_obj.cb_lock);
+
+	/* Create irq callbacks for all possible irq_idx */
+	dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+	dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+			sizeof(struct list_head), GFP_KERNEL);
+	dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
+		INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
+		atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
+		atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+	}
+}
+
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
+{
+	return 0;
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	} else if (!dpu_kms->dev) {
+		DPU_ERROR("invalid drm device\n");
+		return;
+	} else if (!dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid device private\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+		if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
+				!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+	dpu_clear_all_irqs(dpu_kms);
+	dpu_disable_all_irqs(dpu_kms);
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	kfree(dpu_kms->irq_obj.irq_cb_tbl);
+	kfree(dpu_kms->irq_obj.enable_counts);
+	kfree(dpu_kms->irq_obj.irq_counts);
+	dpu_kms->irq_obj.irq_cb_tbl = NULL;
+	dpu_kms->irq_obj.enable_counts = NULL;
+	dpu_kms->irq_obj.irq_counts = NULL;
+	dpu_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+{
+	/*
+	 * Read interrupt status from all sources. Interrupt status are
+	 * stored within hw_intr.
+	 * Function will also clear the interrupt status after reading.
+	 * Individual interrupt status bit will only get stored if it
+	 * is enabled.
+	 */
+	dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
+
+	/*
+	 * Dispatch to HW driver to handle interrupt lookup that is being
+	 * fired. When matching interrupt is located, HW driver will call to
+	 * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
+	 * dpu_core_irq_callback_handler will perform the registered function
+	 * callback, and do the interrupt status clearing once the registered
+	 * callback is finished.
+	 */
+	dpu_kms->hw_intr->ops.dispatch_irqs(
+			dpu_kms->hw_intr,
+			dpu_core_irq_callback_handler,
+			dpu_kms);
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 0000000..5e98bba
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		none
+ */
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		0 if success; error code otherwise
+ */
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		none
+ */
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ *                      interrupt mapping table.
+ * @dpu_kms:		DPU handle
+ * @intr_type:		DPU HW interrupt type for lookup
+ * @instance_idx:	DPU HW block instance defined in dpu_hw_mdss.h
+ * @return:		irq_idx or -EINVAL when fail to lookup
+ */
+int dpu_core_irq_idx_lookup(
+		struct dpu_kms *dpu_kms,
+		enum dpu_intr_type intr_type,
+		uint32_t instance_idx);
+
+/**
+ * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @dpu_kms:		DPU handle
+ * @irq_idxs:		Array of irq index
+ * @irq_count:		Number of irq_idx provided in the array
+ * @return:		0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is enabled if count is 0 before increment.
+ */
+int dpu_core_irq_enable(
+		struct dpu_kms *dpu_kms,
+		int *irq_idxs,
+		uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @dpu_kms:		DPU handle
+ * @irq_idxs:		Array of irq index
+ * @irq_count:		Number of irq_idx provided in the array
+ * @return:		0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable(
+		struct dpu_kms *dpu_kms,
+		int *irq_idxs,
+		uint32_t irq_count);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @clear:		True to clear the irq after read
+ * @return:		non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		bool clear);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ *                             interrupt
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @irq_cb:		IRQ callback structure, containing callback function
+ *			and argument. Passing NULL for irq_cb will unregister
+ *			the callback for the given irq_idx
+ *			This must exist until un-registration.
+ * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ *                             interrupt
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @irq_cb:		IRQ callback structure, containing callback function
+ *			and argument. Passing NULL for irq_cb will unregister
+ *			the callback for the given irq_idx
+ *			This must match with registration.
+ * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent);
+
+/**
+ * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @dpu_kms: pointer to kms
+ */
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 0000000..41c5191
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,637 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+#define DPU_PERF_MODE_STRING_SIZE	128
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum dpu_perf_mode {
+	DPU_PERF_MODE_NORMAL,
+	DPU_PERF_MODE_MINIMUM,
+	DPU_PERF_MODE_FIXED,
+	DPU_PERF_MODE_MAX
+};
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv;
+
+	if (!crtc->dev || !crtc->dev->dev_private) {
+		DPU_ERROR("invalid device\n");
+		return NULL;
+	}
+
+	priv = crtc->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid kms\n");
+		return NULL;
+	}
+
+	return to_dpu_kms(priv->kms);
+}
+
+static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+	return dpu_crtc_is_enabled(crtc);
+}
+
+static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+	struct drm_crtc *tmp_crtc;
+	bool intf_connected = false;
+
+	if (!crtc)
+		goto end;
+
+	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+		if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+				_dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+			DPU_DEBUG("video interface connected crtc:%d\n",
+				tmp_crtc->base.id);
+			intf_connected = true;
+			goto end;
+		}
+	}
+
+end:
+	return intf_connected;
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+		struct drm_crtc *crtc,
+		struct drm_crtc_state *state,
+		struct dpu_core_perf_params *perf)
+{
+	struct dpu_crtc_state *dpu_cstate;
+	int i;
+
+	if (!kms || !kms->catalog || !crtc || !state || !perf) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	dpu_cstate = to_dpu_crtc_state(state);
+	memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+	if (!dpu_cstate->bw_control) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+					1000ULL;
+			perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+		}
+		perf->core_clk_rate = kms->perf.max_core_clk_rate;
+	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = 0;
+			perf->max_per_pipe_ib[i] = 0;
+		}
+		perf->core_clk_rate = 0;
+	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+			perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+		}
+		perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+	}
+
+	DPU_DEBUG(
+		"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+			crtc->base.id, perf->core_clk_rate,
+			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
+			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	u32 bw, threshold;
+	u64 bw_sum_of_intfs = 0;
+	enum dpu_crtc_client_type curr_client_type;
+	bool is_video_mode;
+	struct dpu_crtc_state *dpu_cstate;
+	struct drm_crtc *tmp_crtc;
+	struct dpu_kms *kms;
+	int i;
+
+	if (!crtc || !state) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	kms = _dpu_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DPU_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	/* we only need bandwidth check on real-time clients (interfaces) */
+	if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+		return 0;
+
+	dpu_cstate = to_dpu_crtc_state(state);
+
+	/* obtain new values */
+	_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+	for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+			i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
+		curr_client_type = dpu_crtc_get_client_type(crtc);
+
+		drm_for_each_crtc(tmp_crtc, crtc->dev) {
+			if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+			    (dpu_crtc_get_client_type(tmp_crtc) ==
+					    curr_client_type) &&
+			    (tmp_crtc != crtc)) {
+				struct dpu_crtc_state *tmp_cstate =
+					to_dpu_crtc_state(tmp_crtc->state);
+
+				DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+					tmp_crtc->base.id,
+					tmp_cstate->new_perf.bw_ctl[i],
+					tmp_cstate->bw_control);
+				/*
+				 * For bw check only use the bw if the
+				 * atomic property has been already set
+				 */
+				if (tmp_cstate->bw_control)
+					bw_sum_of_intfs +=
+						tmp_cstate->new_perf.bw_ctl[i];
+			}
+		}
+
+		/* convert bandwidth to kb */
+		bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+		DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+
+		is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+		threshold = (is_video_mode ||
+			_dpu_core_video_mode_intf_connected(crtc)) ?
+			kms->catalog->perf.max_bw_low :
+			kms->catalog->perf.max_bw_high;
+
+		DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+
+		if (!dpu_cstate->bw_control) {
+			DPU_DEBUG("bypass bandwidth check\n");
+		} else if (!threshold) {
+			DPU_ERROR("no bandwidth limits specified\n");
+			return -E2BIG;
+		} else if (bw > threshold) {
+			DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+					threshold);
+			return -E2BIG;
+		}
+	}
+
+	return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+		struct drm_crtc *crtc, u32 bus_id)
+{
+	struct dpu_core_perf_params perf = { { 0 } };
+	enum dpu_crtc_client_type curr_client_type
+					= dpu_crtc_get_client_type(crtc);
+	struct drm_crtc *tmp_crtc;
+	struct dpu_crtc_state *dpu_cstate;
+	int ret = 0;
+
+	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+		if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+			curr_client_type ==
+				dpu_crtc_get_client_type(tmp_crtc)) {
+			dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+			perf.max_per_pipe_ib[bus_id] =
+				max(perf.max_per_pipe_ib[bus_id],
+				dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+
+			DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+				tmp_crtc->base.id, bus_id,
+				dpu_cstate->new_perf.bw_ctl[bus_id]);
+		}
+	}
+	return ret;
+}
+
+/**
+ * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+	struct drm_crtc *tmp_crtc;
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *dpu_cstate;
+	struct dpu_kms *kms;
+	int i;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	kms = _dpu_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DPU_ERROR("invalid kms\n");
+		return;
+	}
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+	/* only do this for command mode rt client */
+	if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+		return;
+
+	/*
+	 * If video interface present, cmd panel bandwidth cannot be
+	 * released.
+	 */
+	if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+		drm_for_each_crtc(tmp_crtc, crtc->dev) {
+			if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+				dpu_crtc_get_intf_mode(tmp_crtc) ==
+						INTF_MODE_VIDEO)
+				return;
+		}
+
+	/* Release the bandwidth */
+	if (kms->perf.enable_bw_release) {
+		trace_dpu_cmd_release_bw(crtc->base.id);
+		DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			dpu_crtc->cur_perf.bw_ctl[i] = 0;
+			_dpu_core_perf_crtc_update_bus(kms, crtc, i);
+		}
+	}
+}
+
+static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
+{
+	struct dss_clk *core_clk = kms->perf.core_clk;
+
+	if (core_clk->max_rate && (rate > core_clk->max_rate))
+		rate = core_clk->max_rate;
+
+	core_clk->rate = rate;
+	return msm_dss_clk_set_rate(core_clk, 1);
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+	u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+	struct drm_crtc *crtc;
+	struct dpu_crtc_state *dpu_cstate;
+
+	drm_for_each_crtc(crtc, kms->dev) {
+		if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+			dpu_cstate = to_dpu_crtc_state(crtc->state);
+			clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+							clk_rate);
+			clk_rate = clk_round_rate(kms->perf.core_clk->clk,
+					clk_rate);
+		}
+	}
+
+	if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+		clk_rate = kms->perf.fix_core_clk_rate;
+
+	DPU_DEBUG("clk:%llu\n", clk_rate);
+
+	return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+		int params_changed, bool stop_req)
+{
+	struct dpu_core_perf_params *new, *old;
+	int update_bus = 0, update_clk = 0;
+	u64 clk_rate = 0;
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *dpu_cstate;
+	int i;
+	struct msm_drm_private *priv;
+	struct dpu_kms *kms;
+	int ret;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	kms = _dpu_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+	priv = kms->dev->dev_private;
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+	DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+			crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+	old = &dpu_crtc->cur_perf;
+	new = &dpu_cstate->new_perf;
+
+	if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			/*
+			 * cases for bus bandwidth update.
+			 * 1. new bandwidth vote - "ab or ib vote" is higher
+			 *    than current vote for update request.
+			 * 2. new bandwidth vote - "ab or ib vote" is lower
+			 *    than current vote at end of commit or stop.
+			 */
+			if ((params_changed && ((new->bw_ctl[i] >
+						old->bw_ctl[i]) ||
+				  (new->max_per_pipe_ib[i] >
+						old->max_per_pipe_ib[i]))) ||
+			    (!params_changed && ((new->bw_ctl[i] <
+						old->bw_ctl[i]) ||
+				  (new->max_per_pipe_ib[i] <
+						old->max_per_pipe_ib[i])))) {
+				DPU_DEBUG(
+					"crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+					crtc->base.id, params_changed,
+					new->bw_ctl[i], old->bw_ctl[i]);
+				old->bw_ctl[i] = new->bw_ctl[i];
+				old->max_per_pipe_ib[i] =
+						new->max_per_pipe_ib[i];
+				update_bus |= BIT(i);
+			}
+		}
+
+		if ((params_changed &&
+				(new->core_clk_rate > old->core_clk_rate)) ||
+				(!params_changed &&
+				(new->core_clk_rate < old->core_clk_rate))) {
+			old->core_clk_rate = new->core_clk_rate;
+			update_clk = 1;
+		}
+	} else {
+		DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+		memset(old, 0, sizeof(*old));
+		memset(new, 0, sizeof(*new));
+		update_bus = ~0;
+		update_clk = 1;
+	}
+	trace_dpu_perf_crtc_update(crtc->base.id,
+				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+				new->core_clk_rate, stop_req,
+				update_bus, update_clk);
+
+	for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		if (update_bus & BIT(i)) {
+			ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+			if (ret) {
+				DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
+					  crtc->base.id, i);
+				return ret;
+			}
+		}
+	}
+
+	/*
+	 * Update the clock after bandwidth vote to ensure
+	 * bandwidth is available before clock rate is increased.
+	 */
+	if (update_clk) {
+		clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+		trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+		ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
+		if (ret) {
+			DPU_ERROR("failed to set %s clock rate %llu\n",
+					kms->perf.core_clk->clk_name, clk_rate);
+			return ret;
+		}
+
+		kms->perf.core_clk_rate = clk_rate;
+		DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct dpu_core_perf *perf = file->private_data;
+	struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+	u32 perf_mode = 0;
+	char buf[10];
+
+	if (!perf)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtouint(buf, 0, &perf_mode))
+		return -EFAULT;
+
+	if (perf_mode >= DPU_PERF_MODE_MAX)
+		return -EFAULT;
+
+	if (perf_mode == DPU_PERF_MODE_FIXED) {
+		DRM_INFO("fix performance mode\n");
+	} else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+		/* run the driver with max clk and BW vote */
+		perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+		perf->perf_tune.min_bus_vote =
+				(u64) cfg->max_bw_high * 1000;
+		DRM_INFO("minimum performance mode\n");
+	} else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+		/* reset the perf tune params to 0 */
+		perf->perf_tune.min_core_clk = 0;
+		perf->perf_tune.min_bus_vote = 0;
+		DRM_INFO("normal performance mode\n");
+	}
+	perf->perf_tune.mode = perf_mode;
+
+	return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct dpu_core_perf *perf = file->private_data;
+	int len = 0;
+	char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+
+	if (!perf)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(buf, sizeof(buf),
+			"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+			perf->perf_tune.mode,
+			perf->perf_tune.min_core_clk,
+			perf->perf_tune.min_bus_vote);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+	.open = simple_open,
+	.read = _dpu_core_perf_mode_read,
+	.write = _dpu_core_perf_mode_write,
+};
+
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+	debugfs_remove_recursive(perf->debugfs_root);
+	perf->debugfs_root = NULL;
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+		struct dentry *parent)
+{
+	struct dpu_mdss_cfg *catalog = perf->catalog;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	priv = perf->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+	if (!perf->debugfs_root) {
+		DPU_ERROR("failed to create core perf debugfs\n");
+		return -EINVAL;
+	}
+
+	debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+			&perf->max_core_clk_rate);
+	debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+			&perf->core_clk_rate);
+	debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+			(u32 *)&perf->enable_bw_release);
+	debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.max_bw_low);
+	debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.max_bw_high);
+	debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.min_core_ib);
+	debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.min_llcc_ib);
+	debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.min_dram_ib);
+	debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+			(u32 *)perf, &dpu_core_perf_mode_fops);
+	debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+			&perf->fix_core_clk_rate);
+	debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+			&perf->fix_core_ib_vote);
+	debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+			&perf->fix_core_ab_vote);
+
+	return 0;
+}
+#else
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+		struct dentry *parent)
+{
+	return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+	if (!perf) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	dpu_core_perf_debugfs_destroy(perf);
+	perf->max_core_clk_rate = 0;
+	perf->core_clk = NULL;
+	perf->phandle = NULL;
+	perf->catalog = NULL;
+	perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+		struct drm_device *dev,
+		struct dpu_mdss_cfg *catalog,
+		struct dpu_power_handle *phandle,
+		struct dss_clk *core_clk)
+{
+	perf->dev = dev;
+	perf->catalog = catalog;
+	perf->phandle = phandle;
+	perf->core_clk = core_clk;
+
+	perf->max_core_clk_rate = core_clk->max_rate;
+	if (!perf->max_core_clk_rate) {
+		DPU_DEBUG("optional max core clk rate, use default\n");
+		perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 0000000..fbcbe0c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+#include "dpu_power_handle.h"
+
+#define	DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE	412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+	u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
+	u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+	u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+	u32 mode;
+	u64 min_core_clk;
+	u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+	struct drm_device *dev;
+	struct dentry *debugfs_root;
+	struct dpu_mdss_cfg *catalog;
+	struct dpu_power_handle *phandle;
+	struct dss_clk *core_clk;
+	u64 core_clk_rate;
+	u64 max_core_clk_rate;
+	struct dpu_core_perf_tune perf_tune;
+	u32 enable_bw_release;
+	u64 fix_core_clk_rate;
+	u64 fix_core_ib_vote;
+	u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+		int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+		struct drm_device *dev,
+		struct dpu_mdss_cfg *catalog,
+		struct dpu_power_handle *phandle,
+		struct dss_clk *core_clk);
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @perf: Pointer to core performance context
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+		struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 02eb073..4752f08 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -280,287 +280,6 @@
 	mutex_unlock(rp_lock);
 }
 
-/**
- * _dpu_crtc_rp_add_no_lock - add given resource to resource pool without lock
- * @rp: Pointer to original resource pool
- * @type: Resource type
- * @tag: Search tag for given resource
- * @val: Resource handle
- * @ops: Resource callback operations
- * return: 0 if success; error code otherwise
- */
-static int _dpu_crtc_rp_add_no_lock(struct dpu_crtc_respool *rp, u32 type,
-		u64 tag, void *val, struct dpu_crtc_res_ops *ops)
-{
-	struct dpu_crtc_res *res;
-	struct drm_crtc *crtc;
-
-	if (!rp || !ops) {
-		DPU_ERROR("invalid resource pool/ops\n");
-		return -EINVAL;
-	}
-
-	crtc = _dpu_crtc_rp_to_crtc(rp);
-	if (!crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-
-	list_for_each_entry(res, &rp->res_list, list) {
-		if (res->type != type || res->tag != tag)
-			continue;
-		DPU_ERROR("crtc%d.%u already exist res:0x%x/0x%llx/%pK/%d\n",
-				crtc->base.id, rp->sequence_id,
-				res->type, res->tag, res->val,
-				atomic_read(&res->refcount));
-		return -EEXIST;
-	}
-	res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
-	if (!res)
-		return -ENOMEM;
-	INIT_LIST_HEAD(&res->list);
-	atomic_set(&res->refcount, 1);
-	res->type = type;
-	res->tag = tag;
-	res->val = val;
-	res->ops = *ops;
-	list_add_tail(&res->list, &rp->res_list);
-	DPU_DEBUG("crtc%d.%u added res:0x%x/0x%llx\n",
-			crtc->base.id, rp->sequence_id, type, tag);
-	return 0;
-}
-
-/**
- * _dpu_crtc_rp_add - add given resource to resource pool
- * @rp: Pointer to original resource pool
- * @type: Resource type
- * @tag: Search tag for given resource
- * @val: Resource handle
- * @ops: Resource callback operations
- * return: 0 if success; error code otherwise
- */
-static int _dpu_crtc_rp_add(struct dpu_crtc_respool *rp, u32 type, u64 tag,
-		void *val, struct dpu_crtc_res_ops *ops)
-{
-	int rc;
-
-	if (!rp) {
-		DPU_ERROR("invalid resource pool\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(rp->rp_lock);
-	rc = _dpu_crtc_rp_add_no_lock(rp, type, tag, val, ops);
-	mutex_unlock(rp->rp_lock);
-	return rc;
-}
-
-/**
- * _dpu_crtc_rp_get - lookup the resource from given resource pool and obtain
- *	if available; otherwise, obtain resource from global pool
- * @rp: Pointer to original resource pool
- * @type: Resource type
- * @tag:  Search tag for given resource
- * return: Resource handle if success; pointer error or null otherwise
- */
-static void *_dpu_crtc_rp_get(struct dpu_crtc_respool *rp, u32 type, u64 tag)
-{
-	struct dpu_crtc_respool *old_rp;
-	struct dpu_crtc_res *res;
-	void *val = NULL;
-	int rc;
-	struct drm_crtc *crtc;
-
-	if (!rp) {
-		DPU_ERROR("invalid resource pool\n");
-		return NULL;
-	}
-
-	crtc = _dpu_crtc_rp_to_crtc(rp);
-	if (!crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return NULL;
-	}
-
-	mutex_lock(rp->rp_lock);
-	list_for_each_entry(res, &rp->res_list, list) {
-		if (res->type != type || res->tag != tag)
-			continue;
-		DPU_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
-				crtc->base.id, rp->sequence_id,
-				res->type, res->tag, res->val,
-				atomic_read(&res->refcount));
-		atomic_inc(&res->refcount);
-		res->flags &= ~DPU_CRTC_RES_FLAG_FREE;
-		mutex_unlock(rp->rp_lock);
-		return res->val;
-	}
-	list_for_each_entry(res, &rp->res_list, list) {
-		if (res->type != type || !(res->flags & DPU_CRTC_RES_FLAG_FREE))
-			continue;
-		DPU_DEBUG("crtc%d.%u retag res:0x%x/0x%llx/%pK/%d\n",
-				crtc->base.id, rp->sequence_id,
-				res->type, res->tag, res->val,
-				atomic_read(&res->refcount));
-		atomic_inc(&res->refcount);
-		res->tag = tag;
-		res->flags &= ~DPU_CRTC_RES_FLAG_FREE;
-		mutex_unlock(rp->rp_lock);
-		return res->val;
-	}
-	/* not in this rp, try to grab from global pool */
-	if (rp->ops.get)
-		val = rp->ops.get(NULL, type, -1);
-	if (!IS_ERR_OR_NULL(val))
-		goto add_res;
-	/*
-	 * Search older resource pools for hw blk with matching type,
-	 * necessary when resource is being used by this object,
-	 * but in previous states not yet cleaned up.
-	 *
-	 * This enables searching of all resources currently owned
-	 * by this crtc even though the resource might not be used
-	 * in the current atomic state. This allows those resources
-	 * to be re-acquired by the new atomic state immediately
-	 * without waiting for the resources to be fully released.
-	 */
-	else if (IS_ERR_OR_NULL(val) && (type < DPU_HW_BLK_MAX)) {
-		list_for_each_entry(old_rp, rp->rp_head, rp_list) {
-			if (old_rp == rp)
-				continue;
-
-			list_for_each_entry(res, &old_rp->res_list, list) {
-				if (res->type != type)
-					continue;
-				DRM_DEBUG_KMS("crtc%d.%u in crtc%d.%d\n",
-					      crtc->base.id, rp->sequence_id,
-					      crtc->base.id,
-					      old_rp->sequence_id);
-				if (res->ops.get)
-					res->ops.get(res->val, 0, -1);
-				val = res->val;
-				break;
-			}
-
-			if (!IS_ERR_OR_NULL(val))
-				break;
-		}
-	}
-	if (IS_ERR_OR_NULL(val)) {
-		DPU_DEBUG("crtc%d.%u failed to get res:0x%x//\n",
-				crtc->base.id, rp->sequence_id, type);
-		mutex_unlock(rp->rp_lock);
-		return NULL;
-	}
-add_res:
-	rc = _dpu_crtc_rp_add_no_lock(rp, type, tag, val, &rp->ops);
-	if (rc) {
-		DPU_ERROR("crtc%d.%u failed to add res:0x%x/0x%llx\n",
-				crtc->base.id, rp->sequence_id, type, tag);
-		if (rp->ops.put)
-			rp->ops.put(val);
-		val = NULL;
-	}
-	mutex_unlock(rp->rp_lock);
-	return val;
-}
-
-/**
- * _dpu_crtc_rp_put - return given resource to resource pool
- * @rp: Pointer to original resource pool
- * @type: Resource type
- * @tag: Search tag for given resource
- * return: None
- */
-static void _dpu_crtc_rp_put(struct dpu_crtc_respool *rp, u32 type, u64 tag)
-{
-	struct dpu_crtc_res *res, *next;
-	struct drm_crtc *crtc;
-
-	if (!rp) {
-		DPU_ERROR("invalid resource pool\n");
-		return;
-	}
-
-	crtc = _dpu_crtc_rp_to_crtc(rp);
-	if (!crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return;
-	}
-
-	mutex_lock(rp->rp_lock);
-	list_for_each_entry_safe(res, next, &rp->res_list, list) {
-		if (res->type != type || res->tag != tag)
-			continue;
-		DPU_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
-				crtc->base.id, rp->sequence_id,
-				res->type, res->tag, res->val,
-				atomic_read(&res->refcount));
-		if (res->flags & DPU_CRTC_RES_FLAG_FREE)
-			DPU_ERROR(
-				"crtc%d.%u already free res:0x%x/0x%llx/%pK/%d\n",
-					crtc->base.id, rp->sequence_id,
-					res->type, res->tag, res->val,
-					atomic_read(&res->refcount));
-		else if (atomic_dec_return(&res->refcount) == 0)
-			res->flags |= DPU_CRTC_RES_FLAG_FREE;
-
-		mutex_unlock(rp->rp_lock);
-		return;
-	}
-	DPU_ERROR("crtc%d.%u not found res:0x%x/0x%llx\n",
-			crtc->base.id, rp->sequence_id, type, tag);
-	mutex_unlock(rp->rp_lock);
-}
-
-int dpu_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
-		void *val, struct dpu_crtc_res_ops *ops)
-{
-	struct dpu_crtc_respool *rp;
-
-	if (!state) {
-		DPU_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	rp = &to_dpu_crtc_state(state)->rp;
-	return _dpu_crtc_rp_add(rp, type, tag, val, ops);
-}
-
-void *dpu_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag)
-{
-	struct dpu_crtc_respool *rp;
-	void *val;
-
-	if (!state) {
-		DPU_ERROR("invalid parameters\n");
-		return NULL;
-	}
-
-	rp = &to_dpu_crtc_state(state)->rp;
-	val = _dpu_crtc_rp_get(rp, type, tag);
-	if (IS_ERR(val)) {
-		DPU_ERROR("failed to get res type:0x%x:0x%llx\n",
-				type, tag);
-		return NULL;
-	}
-
-	return val;
-}
-
-void dpu_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag)
-{
-	struct dpu_crtc_respool *rp;
-
-	if (!state) {
-		DPU_ERROR("invalid parameters\n");
-		return;
-	}
-
-	rp = &to_dpu_crtc_state(state)->rp;
-	_dpu_crtc_rp_put(rp, type, tag);
-}
-
 static void dpu_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
@@ -1604,6 +1323,7 @@
 	struct drm_encoder *encoder;
 	struct msm_drm_private *priv;
 	int ret;
+	unsigned long flags;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
 		DPU_ERROR("invalid crtc\n");
@@ -1619,6 +1339,9 @@
 	if (dpu_kms_is_suspend_state(crtc->dev))
 		_dpu_crtc_set_suspend(crtc, true);
 
+	/* Disable/save vblank irq handling */
+	drm_crtc_vblank_off(crtc);
+
 	mutex_lock(&dpu_crtc->crtc_lock);
 
 	/* wait for frame_event_done completion */
@@ -1656,7 +1379,6 @@
 		dpu_power_handle_unregister_event(dpu_crtc->phandle,
 				dpu_crtc->power_event);
 
-
 	memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
 	dpu_crtc->num_mixers = 0;
 	dpu_crtc->mixers_swapped = false;
@@ -1666,6 +1388,13 @@
 	cstate->bw_split_vote = false;
 
 	mutex_unlock(&dpu_crtc->crtc_lock);
+
+	if (crtc->state->event && !crtc->state->active) {
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+		crtc->state->event = NULL;
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	}
 }
 
 static void dpu_crtc_enable(struct drm_crtc *crtc,
@@ -1705,6 +1434,9 @@
 
 	mutex_unlock(&dpu_crtc->crtc_lock);
 
+	/* Enable/restore vblank irq handling */
+	drm_crtc_vblank_on(crtc);
+
 	dpu_crtc->power_event = dpu_power_handle_register_event(
 		dpu_crtc->phandle,
 		DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
@@ -1803,8 +1535,7 @@
 		cnt++;
 
 		dst = drm_plane_state_dest(pstate);
-		if (!drm_rect_intersect(&clip, &dst) ||
-		    !drm_rect_equals(&clip, &dst)) {
+		if (!drm_rect_intersect(&clip, &dst)) {
 			DPU_ERROR("invalid vertical/horizontal destination\n");
 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
@@ -2349,97 +2080,6 @@
 	.atomic_flush = dpu_crtc_atomic_flush,
 };
 
-static void _dpu_crtc_event_cb(struct kthread_work *work)
-{
-	struct dpu_crtc_event *event;
-	struct dpu_crtc *dpu_crtc;
-	unsigned long irq_flags;
-
-	if (!work) {
-		DPU_ERROR("invalid work item\n");
-		return;
-	}
-
-	event = container_of(work, struct dpu_crtc_event, kt_work);
-
-	/* set dpu_crtc to NULL for static work structures */
-	dpu_crtc = event->dpu_crtc;
-	if (!dpu_crtc)
-		return;
-
-	if (event->cb_func)
-		event->cb_func(&dpu_crtc->base, event->usr);
-
-	spin_lock_irqsave(&dpu_crtc->event_lock, irq_flags);
-	list_add_tail(&event->list, &dpu_crtc->event_free_list);
-	spin_unlock_irqrestore(&dpu_crtc->event_lock, irq_flags);
-}
-
-int dpu_crtc_event_queue(struct drm_crtc *crtc,
-		void (*func)(struct drm_crtc *crtc, void *usr), void *usr)
-{
-	unsigned long irq_flags;
-	struct dpu_crtc *dpu_crtc;
-	struct msm_drm_private *priv;
-	struct dpu_crtc_event *event = NULL;
-	u32 crtc_id;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !func) {
-		DPU_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-	dpu_crtc = to_dpu_crtc(crtc);
-	priv = crtc->dev->dev_private;
-	crtc_id = drm_crtc_index(crtc);
-
-	/*
-	 * Obtain an event struct from the private cache. This event
-	 * queue may be called from ISR contexts, so use a private
-	 * cache to avoid calling any memory allocation functions.
-	 */
-	spin_lock_irqsave(&dpu_crtc->event_lock, irq_flags);
-	if (!list_empty(&dpu_crtc->event_free_list)) {
-		event = list_first_entry(&dpu_crtc->event_free_list,
-				struct dpu_crtc_event, list);
-		list_del_init(&event->list);
-	}
-	spin_unlock_irqrestore(&dpu_crtc->event_lock, irq_flags);
-
-	if (!event)
-		return -ENOMEM;
-
-	/* populate event node */
-	event->dpu_crtc = dpu_crtc;
-	event->cb_func = func;
-	event->usr = usr;
-
-	/* queue new event request */
-	kthread_init_work(&event->kt_work, _dpu_crtc_event_cb);
-	kthread_queue_work(&priv->event_thread[crtc_id].worker,
-			&event->kt_work);
-
-	return 0;
-}
-
-static int _dpu_crtc_init_events(struct dpu_crtc *dpu_crtc)
-{
-	int i, rc = 0;
-
-	if (!dpu_crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-
-	spin_lock_init(&dpu_crtc->event_lock);
-
-	INIT_LIST_HEAD(&dpu_crtc->event_free_list);
-	for (i = 0; i < DPU_CRTC_MAX_EVENT_COUNT; ++i)
-		list_add_tail(&dpu_crtc->event_cache[i].list,
-				&dpu_crtc->event_free_list);
-
-	return rc;
-}
-
 /* initialize crtc */
 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
 {
@@ -2447,7 +2087,7 @@
 	struct dpu_crtc *dpu_crtc = NULL;
 	struct msm_drm_private *priv = NULL;
 	struct dpu_kms *kms = NULL;
-	int i, rc;
+	int i;
 
 	priv = dev->dev_private;
 	kms = to_dpu_kms(priv->kms);
@@ -2487,12 +2127,7 @@
 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
 
 	/* initialize event handling */
-	rc = _dpu_crtc_init_events(dpu_crtc);
-	if (rc) {
-		drm_crtc_cleanup(crtc);
-		kfree(dpu_crtc);
-		return ERR_PTR(rc);
-	}
+	spin_lock_init(&dpu_crtc->event_lock);
 
 	dpu_crtc->phandle = &kms->phandle;
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 0000000..e87109e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+#include "dpu_hw_blk.h"
+
+#define DPU_CRTC_NAME_SIZE	12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE	4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT:	RealTime client like video/cmd mode display
+ *              voting through apps rsc
+ * @NRT_CLIENT:	Non-RealTime client like WB display
+ *              voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+	RT_CLIENT,
+	NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state:	smmu state
+ * @ATTACHED:	 all the context banks are attached.
+ * @DETACHED:	 all the context banks are detached.
+ * @ATTACH_ALL_REQ:	 transient state of attaching context banks.
+ * @DETACH_ALL_REQ:	 transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+	ATTACHED = 0,
+	DETACHED,
+	ATTACH_ALL_REQ,
+	DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+	NONE,
+	PRE_COMMIT,
+	POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+	uint32_t state;
+	uint32_t transition_type;
+	uint32_t transition_error;
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm:	LM HW Driver context
+ * @hw_ctl:	CTL Path HW driver context
+ * @encoder:	Encoder attached to this lm & ctl
+ * @mixer_op_mode:	mixer blending operation mode
+ * @flush_mask:	mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+	struct dpu_hw_mixer *hw_lm;
+	struct dpu_hw_ctl *hw_ctl;
+	struct drm_encoder *encoder;
+	u32 mixer_op_mode;
+	u32 flush_mask;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work:	base work structure
+ * @crtc:	Pointer to crtc handling this event
+ * @list:	event list
+ * @ts:		timestamp at queue entry
+ * @event:	event identifier
+ */
+struct dpu_crtc_frame_event {
+	struct kthread_work work;
+	struct drm_crtc *crtc;
+	struct list_head list;
+	ktime_t ts;
+	u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT	16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base          : Base drm crtc structure
+ * @name          : ASCII description of this crtc
+ * @num_ctls      : Number of ctl paths in use
+ * @num_mixers    : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ *                  especially in the case of DSC Merge.
+ * @mixers        : List of active mixers
+ * @event         : Pointer to last received drm vblank event. If there is a
+ *                  pending vblank event, this will be non-null.
+ * @vsync_count   : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg     : H/w mixer stage configuration
+ * @debugfs_root  : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count    : frame count between crtc enable and disable
+ * @vblank_cb_time  : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend         : whether or not a suspend operation is in progress
+ * @enabled       : whether the DPU CRTC is currently enabled. updated in the
+ *                  commit-thread, not state-swap time which is earlier, so
+ *                  safe to make decisions on during VBLANK on/off work
+ * @feature_list  : list of color processing features supported on a crtc
+ * @active_list   : list of color processing features are active
+ * @dirty_list    : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @crtc_lock     : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events  : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock     : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp    : for frame_event_done synchronization
+ * @event_thread  : Pointer to event handler thread
+ * @event_worker  : Event worker queue
+ * @event_lock    : Spinlock around event handling code
+ * @misr_enable   : boolean entry indicates misr enable/disable status.
+ * @misr_frame_count  : misr frame count provided by client
+ * @misr_data     : store misr data before turning off the clocks.
+ * @phandle: Pointer to power handler
+ * @power_event   : registered power event handle
+ * @cur_perf      : current performance committed to clock/bandwidth driver
+ * @rp_lock       : serialization lock for resource pool
+ * @rp_head       : list of active resource pool
+ * @scl3_cfg_lut  : qseed3 lut config
+ */
+struct dpu_crtc {
+	struct drm_crtc base;
+	char name[DPU_CRTC_NAME_SIZE];
+
+	/* HW Resources reserved for the crtc */
+	u32 num_ctls;
+	u32 num_mixers;
+	bool mixers_swapped;
+	struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+	struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
+
+	struct drm_pending_vblank_event *event;
+	u32 vsync_count;
+
+	struct dpu_hw_stage_cfg stage_cfg;
+	struct dentry *debugfs_root;
+
+	u32 vblank_cb_count;
+	u64 play_count;
+	ktime_t vblank_cb_time;
+	bool vblank_requested;
+	bool suspend;
+	bool enabled;
+
+	struct list_head feature_list;
+	struct list_head active_list;
+	struct list_head dirty_list;
+	struct list_head ad_dirty;
+	struct list_head ad_active;
+
+	struct mutex crtc_lock;
+
+	atomic_t frame_pending;
+	struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+	struct list_head frame_event_list;
+	spinlock_t spin_lock;
+	struct completion frame_done_comp;
+
+	/* for handling internal event thread */
+	spinlock_t event_lock;
+	bool misr_enable;
+	u32 misr_frame_count;
+	u32 misr_data[CRTC_DUAL_MIXERS];
+
+	struct dpu_power_handle *phandle;
+	struct dpu_power_event *power_event;
+
+	struct dpu_core_perf_params cur_perf;
+
+	struct mutex rp_lock;
+	struct list_head rp_head;
+
+	struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct dpu_crtc_res_ops {
+	void *(*get)(void *val, u32 type, u64 tag);
+	void (*put)(void *val);
+};
+
+#define DPU_CRTC_RES_FLAG_FREE		BIT(0)
+
+/**
+ * struct dpu_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct dpu_crtc_res {
+	struct list_head list;
+	u32 type;
+	u64 tag;
+	atomic_t refcount;
+	struct dpu_crtc_res_ops ops;
+	void *val;
+	u32 flags;
+};
+
+/**
+ * dpu_crtc_respool - crtc resource pool
+ * @rp_lock: pointer to serialization lock
+ * @rp_head: pointer to head of active resource pools of this crtc
+ * @rp_list: list of crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct dpu_crtc_respool {
+	struct mutex *rp_lock;
+	struct list_head *rp_head;
+	struct list_head rp_list;
+	u32 sequence_id;
+	struct list_head res_list;
+	struct dpu_crtc_res_ops ops;
+};
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @is_ppsplit    : Whether current topology requires PPSplit special handling
+ * @bw_control    : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
+ *                  Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ */
+struct dpu_crtc_state {
+	struct drm_crtc_state base;
+
+	bool bw_control;
+	bool bw_split_vote;
+
+	bool is_ppsplit;
+	struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+	uint64_t input_fence_timeout_ns;
+
+	struct dpu_core_perf_params new_perf;
+	struct dpu_crtc_respool rp;
+};
+
+#define to_dpu_crtc_state(x) \
+	container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ */
+static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
+	struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+	u32 mixer_width;
+
+	if (!dpu_crtc || !cstate || !mode)
+		return 0;
+
+	mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+			mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+	return mixer_width;
+}
+
+/**
+ * dpu_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height
+ */
+static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
+		struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+	if (!dpu_crtc || !cstate || !mode)
+		return 0;
+
+	return mode->vdisplay;
+}
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+
+	if (!crtc)
+		return -EINVAL;
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	return atomic_read(&dpu_crtc->frame_pending);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+		struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+						struct drm_crtc *crtc)
+{
+	struct dpu_crtc_state *cstate =
+			crtc ? to_dpu_crtc_state(crtc->state) : NULL;
+
+	if (!cstate)
+		return NRT_CLIENT;
+
+	return RT_CLIENT;
+}
+
+/**
+ * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
+{
+	return crtc ? crtc->enabled : false;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
new file mode 100644
index 0000000..e741d26
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
@@ -0,0 +1,2393 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm_runtime.h>
+
+#include "dpu_dbg.h"
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+
+#define DEFAULT_DBGBUS_DPU	DPU_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT	DPU_DBG_DUMP_IN_MEM
+#define REG_BASE_NAME_LEN	80
+
+#define DBGBUS_FLAGS_DSPP	BIT(0)
+#define DBGBUS_DSPP_STATUS	0x34C
+
+#define DBGBUS_NAME_DPU		"dpu"
+#define DBGBUS_NAME_VBIF_RT	"vbif_rt"
+
+/* offsets from dpu top address for the debug buses */
+#define DBGBUS_SSPP0	0x188
+#define DBGBUS_AXI_INTF	0x194
+#define DBGBUS_SSPP1	0x298
+#define DBGBUS_DSPP	0x348
+#define DBGBUS_PERIPH	0x418
+
+#define TEST_MASK(id, tp)	((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON			0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL	0x210
+#define MMSS_VBIF_TEST_BUS_OUT		0x230
+
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR		0x190
+#define MMSS_VBIF_SRC_ERR		0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1	0x204
+#define MMSS_VBIF_ERR_INFO		0X1a0
+#define MMSS_VBIF_ERR_INFO_1		0x1a4
+#define MMSS_VBIF_CLIENT_NUM		14
+
+/**
+ * struct dpu_dbg_reg_base - register region base.
+ *	may sub-ranges: sub-ranges are used for dumping
+ *	or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len:  buffer length used for manual register dumping
+ * @cb: callback for external dump function, null if not defined
+ * @cb_ptr: private pointer to callback function
+ */
+struct dpu_dbg_reg_base {
+	struct list_head reg_base_head;
+	char name[REG_BASE_NAME_LEN];
+	void __iomem *base;
+	size_t off;
+	size_t cnt;
+	size_t max_offset;
+	char *buf;
+	size_t buf_len;
+	void (*cb)(void *ptr);
+	void *cb_ptr;
+};
+
+struct dpu_debug_bus_entry {
+	u32 wr_addr;
+	u32 block_id;
+	u32 test_id;
+	void (*analyzer)(void __iomem *mem_base,
+				struct dpu_debug_bus_entry *entry, u32 val);
+};
+
+struct vbif_debug_bus_entry {
+	u32 disable_bus_addr;
+	u32 block_bus_addr;
+	u32 bit_offset;
+	u32 block_cnt;
+	u32 test_pnt_start;
+	u32 test_pnt_cnt;
+};
+
+struct dpu_dbg_debug_bus_common {
+	char *name;
+	u32 enable_mask;
+	bool include_in_deferred_work;
+	u32 flags;
+	u32 entries_size;
+	u32 *dumped_content;
+};
+
+struct dpu_dbg_dpu_debug_bus {
+	struct dpu_dbg_debug_bus_common cmn;
+	struct dpu_debug_bus_entry *entries;
+	u32 top_blk_off;
+};
+
+struct dpu_dbg_vbif_debug_bus {
+	struct dpu_dbg_debug_bus_common cmn;
+	struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct dpu_dbg_base - global dpu debug base structure
+ * @reg_base_list: list of register dumping regions
+ * @dev: device pointer
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @dbgbus_dpu: debug bus structure for the dpu
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ */
+static struct dpu_dbg_base {
+	struct list_head reg_base_list;
+	struct device *dev;
+
+	struct work_struct dump_work;
+
+	struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
+	struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
+} dpu_dbg_base;
+
+static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	if (!(val & 0xFFF000))
+		return;
+
+	dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	if (!(val & BIT(15)))
+		return;
+
+	dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	if (!(val & BIT(15)))
+		return;
+
+	dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
+
+	/* Unpack 0 sspp 0*/
+	{ DBGBUS_SSPP0, 50, 2 },
+	{ DBGBUS_SSPP0, 60, 2 },
+	{ DBGBUS_SSPP0, 70, 2 },
+	{ DBGBUS_SSPP0, 85, 2 },
+
+	/* Upack 0 sspp 1*/
+	{ DBGBUS_SSPP1, 50, 2 },
+	{ DBGBUS_SSPP1, 60, 2 },
+	{ DBGBUS_SSPP1, 70, 2 },
+	{ DBGBUS_SSPP1, 85, 2 },
+
+	/* scheduler */
+	{ DBGBUS_DSPP, 130, 0 },
+	{ DBGBUS_DSPP, 130, 1 },
+	{ DBGBUS_DSPP, 130, 2 },
+	{ DBGBUS_DSPP, 130, 3 },
+	{ DBGBUS_DSPP, 130, 4 },
+	{ DBGBUS_DSPP, 130, 5 },
+
+	/* qseed */
+	{ DBGBUS_SSPP0, 6, 0},
+	{ DBGBUS_SSPP0, 6, 1},
+	{ DBGBUS_SSPP0, 26, 0},
+	{ DBGBUS_SSPP0, 26, 1},
+	{ DBGBUS_SSPP1, 6, 0},
+	{ DBGBUS_SSPP1, 6, 1},
+	{ DBGBUS_SSPP1, 26, 0},
+	{ DBGBUS_SSPP1, 26, 1},
+
+	/* scale */
+	{ DBGBUS_SSPP0, 16, 0},
+	{ DBGBUS_SSPP0, 16, 1},
+	{ DBGBUS_SSPP0, 36, 0},
+	{ DBGBUS_SSPP0, 36, 1},
+	{ DBGBUS_SSPP1, 16, 0},
+	{ DBGBUS_SSPP1, 16, 1},
+	{ DBGBUS_SSPP1, 36, 0},
+	{ DBGBUS_SSPP1, 36, 1},
+
+	/* fetch sspp0 */
+
+	/* vig 0 */
+	{ DBGBUS_SSPP0, 0, 0 },
+	{ DBGBUS_SSPP0, 0, 1 },
+	{ DBGBUS_SSPP0, 0, 2 },
+	{ DBGBUS_SSPP0, 0, 3 },
+	{ DBGBUS_SSPP0, 0, 4 },
+	{ DBGBUS_SSPP0, 0, 5 },
+	{ DBGBUS_SSPP0, 0, 6 },
+	{ DBGBUS_SSPP0, 0, 7 },
+
+	{ DBGBUS_SSPP0, 1, 0 },
+	{ DBGBUS_SSPP0, 1, 1 },
+	{ DBGBUS_SSPP0, 1, 2 },
+	{ DBGBUS_SSPP0, 1, 3 },
+	{ DBGBUS_SSPP0, 1, 4 },
+	{ DBGBUS_SSPP0, 1, 5 },
+	{ DBGBUS_SSPP0, 1, 6 },
+	{ DBGBUS_SSPP0, 1, 7 },
+
+	{ DBGBUS_SSPP0, 2, 0 },
+	{ DBGBUS_SSPP0, 2, 1 },
+	{ DBGBUS_SSPP0, 2, 2 },
+	{ DBGBUS_SSPP0, 2, 3 },
+	{ DBGBUS_SSPP0, 2, 4 },
+	{ DBGBUS_SSPP0, 2, 5 },
+	{ DBGBUS_SSPP0, 2, 6 },
+	{ DBGBUS_SSPP0, 2, 7 },
+
+	{ DBGBUS_SSPP0, 4, 0 },
+	{ DBGBUS_SSPP0, 4, 1 },
+	{ DBGBUS_SSPP0, 4, 2 },
+	{ DBGBUS_SSPP0, 4, 3 },
+	{ DBGBUS_SSPP0, 4, 4 },
+	{ DBGBUS_SSPP0, 4, 5 },
+	{ DBGBUS_SSPP0, 4, 6 },
+	{ DBGBUS_SSPP0, 4, 7 },
+
+	{ DBGBUS_SSPP0, 5, 0 },
+	{ DBGBUS_SSPP0, 5, 1 },
+	{ DBGBUS_SSPP0, 5, 2 },
+	{ DBGBUS_SSPP0, 5, 3 },
+	{ DBGBUS_SSPP0, 5, 4 },
+	{ DBGBUS_SSPP0, 5, 5 },
+	{ DBGBUS_SSPP0, 5, 6 },
+	{ DBGBUS_SSPP0, 5, 7 },
+
+	/* vig 2 */
+	{ DBGBUS_SSPP0, 20, 0 },
+	{ DBGBUS_SSPP0, 20, 1 },
+	{ DBGBUS_SSPP0, 20, 2 },
+	{ DBGBUS_SSPP0, 20, 3 },
+	{ DBGBUS_SSPP0, 20, 4 },
+	{ DBGBUS_SSPP0, 20, 5 },
+	{ DBGBUS_SSPP0, 20, 6 },
+	{ DBGBUS_SSPP0, 20, 7 },
+
+	{ DBGBUS_SSPP0, 21, 0 },
+	{ DBGBUS_SSPP0, 21, 1 },
+	{ DBGBUS_SSPP0, 21, 2 },
+	{ DBGBUS_SSPP0, 21, 3 },
+	{ DBGBUS_SSPP0, 21, 4 },
+	{ DBGBUS_SSPP0, 21, 5 },
+	{ DBGBUS_SSPP0, 21, 6 },
+	{ DBGBUS_SSPP0, 21, 7 },
+
+	{ DBGBUS_SSPP0, 22, 0 },
+	{ DBGBUS_SSPP0, 22, 1 },
+	{ DBGBUS_SSPP0, 22, 2 },
+	{ DBGBUS_SSPP0, 22, 3 },
+	{ DBGBUS_SSPP0, 22, 4 },
+	{ DBGBUS_SSPP0, 22, 5 },
+	{ DBGBUS_SSPP0, 22, 6 },
+	{ DBGBUS_SSPP0, 22, 7 },
+
+	{ DBGBUS_SSPP0, 24, 0 },
+	{ DBGBUS_SSPP0, 24, 1 },
+	{ DBGBUS_SSPP0, 24, 2 },
+	{ DBGBUS_SSPP0, 24, 3 },
+	{ DBGBUS_SSPP0, 24, 4 },
+	{ DBGBUS_SSPP0, 24, 5 },
+	{ DBGBUS_SSPP0, 24, 6 },
+	{ DBGBUS_SSPP0, 24, 7 },
+
+	{ DBGBUS_SSPP0, 25, 0 },
+	{ DBGBUS_SSPP0, 25, 1 },
+	{ DBGBUS_SSPP0, 25, 2 },
+	{ DBGBUS_SSPP0, 25, 3 },
+	{ DBGBUS_SSPP0, 25, 4 },
+	{ DBGBUS_SSPP0, 25, 5 },
+	{ DBGBUS_SSPP0, 25, 6 },
+	{ DBGBUS_SSPP0, 25, 7 },
+
+	/* dma 2 */
+	{ DBGBUS_SSPP0, 30, 0 },
+	{ DBGBUS_SSPP0, 30, 1 },
+	{ DBGBUS_SSPP0, 30, 2 },
+	{ DBGBUS_SSPP0, 30, 3 },
+	{ DBGBUS_SSPP0, 30, 4 },
+	{ DBGBUS_SSPP0, 30, 5 },
+	{ DBGBUS_SSPP0, 30, 6 },
+	{ DBGBUS_SSPP0, 30, 7 },
+
+	{ DBGBUS_SSPP0, 31, 0 },
+	{ DBGBUS_SSPP0, 31, 1 },
+	{ DBGBUS_SSPP0, 31, 2 },
+	{ DBGBUS_SSPP0, 31, 3 },
+	{ DBGBUS_SSPP0, 31, 4 },
+	{ DBGBUS_SSPP0, 31, 5 },
+	{ DBGBUS_SSPP0, 31, 6 },
+	{ DBGBUS_SSPP0, 31, 7 },
+
+	{ DBGBUS_SSPP0, 32, 0 },
+	{ DBGBUS_SSPP0, 32, 1 },
+	{ DBGBUS_SSPP0, 32, 2 },
+	{ DBGBUS_SSPP0, 32, 3 },
+	{ DBGBUS_SSPP0, 32, 4 },
+	{ DBGBUS_SSPP0, 32, 5 },
+	{ DBGBUS_SSPP0, 32, 6 },
+	{ DBGBUS_SSPP0, 32, 7 },
+
+	{ DBGBUS_SSPP0, 33, 0 },
+	{ DBGBUS_SSPP0, 33, 1 },
+	{ DBGBUS_SSPP0, 33, 2 },
+	{ DBGBUS_SSPP0, 33, 3 },
+	{ DBGBUS_SSPP0, 33, 4 },
+	{ DBGBUS_SSPP0, 33, 5 },
+	{ DBGBUS_SSPP0, 33, 6 },
+	{ DBGBUS_SSPP0, 33, 7 },
+
+	{ DBGBUS_SSPP0, 34, 0 },
+	{ DBGBUS_SSPP0, 34, 1 },
+	{ DBGBUS_SSPP0, 34, 2 },
+	{ DBGBUS_SSPP0, 34, 3 },
+	{ DBGBUS_SSPP0, 34, 4 },
+	{ DBGBUS_SSPP0, 34, 5 },
+	{ DBGBUS_SSPP0, 34, 6 },
+	{ DBGBUS_SSPP0, 34, 7 },
+
+	{ DBGBUS_SSPP0, 35, 0 },
+	{ DBGBUS_SSPP0, 35, 1 },
+	{ DBGBUS_SSPP0, 35, 2 },
+	{ DBGBUS_SSPP0, 35, 3 },
+
+	/* dma 0 */
+	{ DBGBUS_SSPP0, 40, 0 },
+	{ DBGBUS_SSPP0, 40, 1 },
+	{ DBGBUS_SSPP0, 40, 2 },
+	{ DBGBUS_SSPP0, 40, 3 },
+	{ DBGBUS_SSPP0, 40, 4 },
+	{ DBGBUS_SSPP0, 40, 5 },
+	{ DBGBUS_SSPP0, 40, 6 },
+	{ DBGBUS_SSPP0, 40, 7 },
+
+	{ DBGBUS_SSPP0, 41, 0 },
+	{ DBGBUS_SSPP0, 41, 1 },
+	{ DBGBUS_SSPP0, 41, 2 },
+	{ DBGBUS_SSPP0, 41, 3 },
+	{ DBGBUS_SSPP0, 41, 4 },
+	{ DBGBUS_SSPP0, 41, 5 },
+	{ DBGBUS_SSPP0, 41, 6 },
+	{ DBGBUS_SSPP0, 41, 7 },
+
+	{ DBGBUS_SSPP0, 42, 0 },
+	{ DBGBUS_SSPP0, 42, 1 },
+	{ DBGBUS_SSPP0, 42, 2 },
+	{ DBGBUS_SSPP0, 42, 3 },
+	{ DBGBUS_SSPP0, 42, 4 },
+	{ DBGBUS_SSPP0, 42, 5 },
+	{ DBGBUS_SSPP0, 42, 6 },
+	{ DBGBUS_SSPP0, 42, 7 },
+
+	{ DBGBUS_SSPP0, 44, 0 },
+	{ DBGBUS_SSPP0, 44, 1 },
+	{ DBGBUS_SSPP0, 44, 2 },
+	{ DBGBUS_SSPP0, 44, 3 },
+	{ DBGBUS_SSPP0, 44, 4 },
+	{ DBGBUS_SSPP0, 44, 5 },
+	{ DBGBUS_SSPP0, 44, 6 },
+	{ DBGBUS_SSPP0, 44, 7 },
+
+	{ DBGBUS_SSPP0, 45, 0 },
+	{ DBGBUS_SSPP0, 45, 1 },
+	{ DBGBUS_SSPP0, 45, 2 },
+	{ DBGBUS_SSPP0, 45, 3 },
+	{ DBGBUS_SSPP0, 45, 4 },
+	{ DBGBUS_SSPP0, 45, 5 },
+	{ DBGBUS_SSPP0, 45, 6 },
+	{ DBGBUS_SSPP0, 45, 7 },
+
+	/* fetch sspp1 */
+	/* vig 1 */
+	{ DBGBUS_SSPP1, 0, 0 },
+	{ DBGBUS_SSPP1, 0, 1 },
+	{ DBGBUS_SSPP1, 0, 2 },
+	{ DBGBUS_SSPP1, 0, 3 },
+	{ DBGBUS_SSPP1, 0, 4 },
+	{ DBGBUS_SSPP1, 0, 5 },
+	{ DBGBUS_SSPP1, 0, 6 },
+	{ DBGBUS_SSPP1, 0, 7 },
+
+	{ DBGBUS_SSPP1, 1, 0 },
+	{ DBGBUS_SSPP1, 1, 1 },
+	{ DBGBUS_SSPP1, 1, 2 },
+	{ DBGBUS_SSPP1, 1, 3 },
+	{ DBGBUS_SSPP1, 1, 4 },
+	{ DBGBUS_SSPP1, 1, 5 },
+	{ DBGBUS_SSPP1, 1, 6 },
+	{ DBGBUS_SSPP1, 1, 7 },
+
+	{ DBGBUS_SSPP1, 2, 0 },
+	{ DBGBUS_SSPP1, 2, 1 },
+	{ DBGBUS_SSPP1, 2, 2 },
+	{ DBGBUS_SSPP1, 2, 3 },
+	{ DBGBUS_SSPP1, 2, 4 },
+	{ DBGBUS_SSPP1, 2, 5 },
+	{ DBGBUS_SSPP1, 2, 6 },
+	{ DBGBUS_SSPP1, 2, 7 },
+
+	{ DBGBUS_SSPP1, 4, 0 },
+	{ DBGBUS_SSPP1, 4, 1 },
+	{ DBGBUS_SSPP1, 4, 2 },
+	{ DBGBUS_SSPP1, 4, 3 },
+	{ DBGBUS_SSPP1, 4, 4 },
+	{ DBGBUS_SSPP1, 4, 5 },
+	{ DBGBUS_SSPP1, 4, 6 },
+	{ DBGBUS_SSPP1, 4, 7 },
+
+	{ DBGBUS_SSPP1, 5, 0 },
+	{ DBGBUS_SSPP1, 5, 1 },
+	{ DBGBUS_SSPP1, 5, 2 },
+	{ DBGBUS_SSPP1, 5, 3 },
+	{ DBGBUS_SSPP1, 5, 4 },
+	{ DBGBUS_SSPP1, 5, 5 },
+	{ DBGBUS_SSPP1, 5, 6 },
+	{ DBGBUS_SSPP1, 5, 7 },
+
+	/* vig 3 */
+	{ DBGBUS_SSPP1, 20, 0 },
+	{ DBGBUS_SSPP1, 20, 1 },
+	{ DBGBUS_SSPP1, 20, 2 },
+	{ DBGBUS_SSPP1, 20, 3 },
+	{ DBGBUS_SSPP1, 20, 4 },
+	{ DBGBUS_SSPP1, 20, 5 },
+	{ DBGBUS_SSPP1, 20, 6 },
+	{ DBGBUS_SSPP1, 20, 7 },
+
+	{ DBGBUS_SSPP1, 21, 0 },
+	{ DBGBUS_SSPP1, 21, 1 },
+	{ DBGBUS_SSPP1, 21, 2 },
+	{ DBGBUS_SSPP1, 21, 3 },
+	{ DBGBUS_SSPP1, 21, 4 },
+	{ DBGBUS_SSPP1, 21, 5 },
+	{ DBGBUS_SSPP1, 21, 6 },
+	{ DBGBUS_SSPP1, 21, 7 },
+
+	{ DBGBUS_SSPP1, 22, 0 },
+	{ DBGBUS_SSPP1, 22, 1 },
+	{ DBGBUS_SSPP1, 22, 2 },
+	{ DBGBUS_SSPP1, 22, 3 },
+	{ DBGBUS_SSPP1, 22, 4 },
+	{ DBGBUS_SSPP1, 22, 5 },
+	{ DBGBUS_SSPP1, 22, 6 },
+	{ DBGBUS_SSPP1, 22, 7 },
+
+	{ DBGBUS_SSPP1, 24, 0 },
+	{ DBGBUS_SSPP1, 24, 1 },
+	{ DBGBUS_SSPP1, 24, 2 },
+	{ DBGBUS_SSPP1, 24, 3 },
+	{ DBGBUS_SSPP1, 24, 4 },
+	{ DBGBUS_SSPP1, 24, 5 },
+	{ DBGBUS_SSPP1, 24, 6 },
+	{ DBGBUS_SSPP1, 24, 7 },
+
+	{ DBGBUS_SSPP1, 25, 0 },
+	{ DBGBUS_SSPP1, 25, 1 },
+	{ DBGBUS_SSPP1, 25, 2 },
+	{ DBGBUS_SSPP1, 25, 3 },
+	{ DBGBUS_SSPP1, 25, 4 },
+	{ DBGBUS_SSPP1, 25, 5 },
+	{ DBGBUS_SSPP1, 25, 6 },
+	{ DBGBUS_SSPP1, 25, 7 },
+
+	/* dma 3 */
+	{ DBGBUS_SSPP1, 30, 0 },
+	{ DBGBUS_SSPP1, 30, 1 },
+	{ DBGBUS_SSPP1, 30, 2 },
+	{ DBGBUS_SSPP1, 30, 3 },
+	{ DBGBUS_SSPP1, 30, 4 },
+	{ DBGBUS_SSPP1, 30, 5 },
+	{ DBGBUS_SSPP1, 30, 6 },
+	{ DBGBUS_SSPP1, 30, 7 },
+
+	{ DBGBUS_SSPP1, 31, 0 },
+	{ DBGBUS_SSPP1, 31, 1 },
+	{ DBGBUS_SSPP1, 31, 2 },
+	{ DBGBUS_SSPP1, 31, 3 },
+	{ DBGBUS_SSPP1, 31, 4 },
+	{ DBGBUS_SSPP1, 31, 5 },
+	{ DBGBUS_SSPP1, 31, 6 },
+	{ DBGBUS_SSPP1, 31, 7 },
+
+	{ DBGBUS_SSPP1, 32, 0 },
+	{ DBGBUS_SSPP1, 32, 1 },
+	{ DBGBUS_SSPP1, 32, 2 },
+	{ DBGBUS_SSPP1, 32, 3 },
+	{ DBGBUS_SSPP1, 32, 4 },
+	{ DBGBUS_SSPP1, 32, 5 },
+	{ DBGBUS_SSPP1, 32, 6 },
+	{ DBGBUS_SSPP1, 32, 7 },
+
+	{ DBGBUS_SSPP1, 33, 0 },
+	{ DBGBUS_SSPP1, 33, 1 },
+	{ DBGBUS_SSPP1, 33, 2 },
+	{ DBGBUS_SSPP1, 33, 3 },
+	{ DBGBUS_SSPP1, 33, 4 },
+	{ DBGBUS_SSPP1, 33, 5 },
+	{ DBGBUS_SSPP1, 33, 6 },
+	{ DBGBUS_SSPP1, 33, 7 },
+
+	{ DBGBUS_SSPP1, 34, 0 },
+	{ DBGBUS_SSPP1, 34, 1 },
+	{ DBGBUS_SSPP1, 34, 2 },
+	{ DBGBUS_SSPP1, 34, 3 },
+	{ DBGBUS_SSPP1, 34, 4 },
+	{ DBGBUS_SSPP1, 34, 5 },
+	{ DBGBUS_SSPP1, 34, 6 },
+	{ DBGBUS_SSPP1, 34, 7 },
+
+	{ DBGBUS_SSPP1, 35, 0 },
+	{ DBGBUS_SSPP1, 35, 1 },
+	{ DBGBUS_SSPP1, 35, 2 },
+
+	/* dma 1 */
+	{ DBGBUS_SSPP1, 40, 0 },
+	{ DBGBUS_SSPP1, 40, 1 },
+	{ DBGBUS_SSPP1, 40, 2 },
+	{ DBGBUS_SSPP1, 40, 3 },
+	{ DBGBUS_SSPP1, 40, 4 },
+	{ DBGBUS_SSPP1, 40, 5 },
+	{ DBGBUS_SSPP1, 40, 6 },
+	{ DBGBUS_SSPP1, 40, 7 },
+
+	{ DBGBUS_SSPP1, 41, 0 },
+	{ DBGBUS_SSPP1, 41, 1 },
+	{ DBGBUS_SSPP1, 41, 2 },
+	{ DBGBUS_SSPP1, 41, 3 },
+	{ DBGBUS_SSPP1, 41, 4 },
+	{ DBGBUS_SSPP1, 41, 5 },
+	{ DBGBUS_SSPP1, 41, 6 },
+	{ DBGBUS_SSPP1, 41, 7 },
+
+	{ DBGBUS_SSPP1, 42, 0 },
+	{ DBGBUS_SSPP1, 42, 1 },
+	{ DBGBUS_SSPP1, 42, 2 },
+	{ DBGBUS_SSPP1, 42, 3 },
+	{ DBGBUS_SSPP1, 42, 4 },
+	{ DBGBUS_SSPP1, 42, 5 },
+	{ DBGBUS_SSPP1, 42, 6 },
+	{ DBGBUS_SSPP1, 42, 7 },
+
+	{ DBGBUS_SSPP1, 44, 0 },
+	{ DBGBUS_SSPP1, 44, 1 },
+	{ DBGBUS_SSPP1, 44, 2 },
+	{ DBGBUS_SSPP1, 44, 3 },
+	{ DBGBUS_SSPP1, 44, 4 },
+	{ DBGBUS_SSPP1, 44, 5 },
+	{ DBGBUS_SSPP1, 44, 6 },
+	{ DBGBUS_SSPP1, 44, 7 },
+
+	{ DBGBUS_SSPP1, 45, 0 },
+	{ DBGBUS_SSPP1, 45, 1 },
+	{ DBGBUS_SSPP1, 45, 2 },
+	{ DBGBUS_SSPP1, 45, 3 },
+	{ DBGBUS_SSPP1, 45, 4 },
+	{ DBGBUS_SSPP1, 45, 5 },
+	{ DBGBUS_SSPP1, 45, 6 },
+	{ DBGBUS_SSPP1, 45, 7 },
+
+	/* cursor 1 */
+	{ DBGBUS_SSPP1, 80, 0 },
+	{ DBGBUS_SSPP1, 80, 1 },
+	{ DBGBUS_SSPP1, 80, 2 },
+	{ DBGBUS_SSPP1, 80, 3 },
+	{ DBGBUS_SSPP1, 80, 4 },
+	{ DBGBUS_SSPP1, 80, 5 },
+	{ DBGBUS_SSPP1, 80, 6 },
+	{ DBGBUS_SSPP1, 80, 7 },
+
+	{ DBGBUS_SSPP1, 81, 0 },
+	{ DBGBUS_SSPP1, 81, 1 },
+	{ DBGBUS_SSPP1, 81, 2 },
+	{ DBGBUS_SSPP1, 81, 3 },
+	{ DBGBUS_SSPP1, 81, 4 },
+	{ DBGBUS_SSPP1, 81, 5 },
+	{ DBGBUS_SSPP1, 81, 6 },
+	{ DBGBUS_SSPP1, 81, 7 },
+
+	{ DBGBUS_SSPP1, 82, 0 },
+	{ DBGBUS_SSPP1, 82, 1 },
+	{ DBGBUS_SSPP1, 82, 2 },
+	{ DBGBUS_SSPP1, 82, 3 },
+	{ DBGBUS_SSPP1, 82, 4 },
+	{ DBGBUS_SSPP1, 82, 5 },
+	{ DBGBUS_SSPP1, 82, 6 },
+	{ DBGBUS_SSPP1, 82, 7 },
+
+	{ DBGBUS_SSPP1, 83, 0 },
+	{ DBGBUS_SSPP1, 83, 1 },
+	{ DBGBUS_SSPP1, 83, 2 },
+	{ DBGBUS_SSPP1, 83, 3 },
+	{ DBGBUS_SSPP1, 83, 4 },
+	{ DBGBUS_SSPP1, 83, 5 },
+	{ DBGBUS_SSPP1, 83, 6 },
+	{ DBGBUS_SSPP1, 83, 7 },
+
+	{ DBGBUS_SSPP1, 84, 0 },
+	{ DBGBUS_SSPP1, 84, 1 },
+	{ DBGBUS_SSPP1, 84, 2 },
+	{ DBGBUS_SSPP1, 84, 3 },
+	{ DBGBUS_SSPP1, 84, 4 },
+	{ DBGBUS_SSPP1, 84, 5 },
+	{ DBGBUS_SSPP1, 84, 6 },
+	{ DBGBUS_SSPP1, 84, 7 },
+
+	/* dspp */
+	{ DBGBUS_DSPP, 13, 0 },
+	{ DBGBUS_DSPP, 19, 0 },
+	{ DBGBUS_DSPP, 14, 0 },
+	{ DBGBUS_DSPP, 14, 1 },
+	{ DBGBUS_DSPP, 14, 3 },
+	{ DBGBUS_DSPP, 20, 0 },
+	{ DBGBUS_DSPP, 20, 1 },
+	{ DBGBUS_DSPP, 20, 3 },
+
+	/* ppb_0 */
+	{ DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+	/* ppb_1 */
+	{ DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+	/* lm_lut */
+	{ DBGBUS_DSPP, 109, 0 },
+	{ DBGBUS_DSPP, 105, 0 },
+	{ DBGBUS_DSPP, 103, 0 },
+
+	/* tear-check */
+	{ DBGBUS_PERIPH, 63, 0 },
+	{ DBGBUS_PERIPH, 64, 0 },
+	{ DBGBUS_PERIPH, 65, 0 },
+	{ DBGBUS_PERIPH, 73, 0 },
+	{ DBGBUS_PERIPH, 74, 0 },
+
+	/* crossbar */
+	{ DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+	/* rotator */
+	{ DBGBUS_DSPP, 9, 0},
+
+	/* blend */
+	/* LM0 */
+	{ DBGBUS_DSPP, 63, 0},
+	{ DBGBUS_DSPP, 63, 1},
+	{ DBGBUS_DSPP, 63, 2},
+	{ DBGBUS_DSPP, 63, 3},
+	{ DBGBUS_DSPP, 63, 4},
+	{ DBGBUS_DSPP, 63, 5},
+	{ DBGBUS_DSPP, 63, 6},
+	{ DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 64, 0},
+	{ DBGBUS_DSPP, 64, 1},
+	{ DBGBUS_DSPP, 64, 2},
+	{ DBGBUS_DSPP, 64, 3},
+	{ DBGBUS_DSPP, 64, 4},
+	{ DBGBUS_DSPP, 64, 5},
+	{ DBGBUS_DSPP, 64, 6},
+	{ DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 65, 0},
+	{ DBGBUS_DSPP, 65, 1},
+	{ DBGBUS_DSPP, 65, 2},
+	{ DBGBUS_DSPP, 65, 3},
+	{ DBGBUS_DSPP, 65, 4},
+	{ DBGBUS_DSPP, 65, 5},
+	{ DBGBUS_DSPP, 65, 6},
+	{ DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 66, 0},
+	{ DBGBUS_DSPP, 66, 1},
+	{ DBGBUS_DSPP, 66, 2},
+	{ DBGBUS_DSPP, 66, 3},
+	{ DBGBUS_DSPP, 66, 4},
+	{ DBGBUS_DSPP, 66, 5},
+	{ DBGBUS_DSPP, 66, 6},
+	{ DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 67, 0},
+	{ DBGBUS_DSPP, 67, 1},
+	{ DBGBUS_DSPP, 67, 2},
+	{ DBGBUS_DSPP, 67, 3},
+	{ DBGBUS_DSPP, 67, 4},
+	{ DBGBUS_DSPP, 67, 5},
+	{ DBGBUS_DSPP, 67, 6},
+	{ DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 68, 0},
+	{ DBGBUS_DSPP, 68, 1},
+	{ DBGBUS_DSPP, 68, 2},
+	{ DBGBUS_DSPP, 68, 3},
+	{ DBGBUS_DSPP, 68, 4},
+	{ DBGBUS_DSPP, 68, 5},
+	{ DBGBUS_DSPP, 68, 6},
+	{ DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 69, 0},
+	{ DBGBUS_DSPP, 69, 1},
+	{ DBGBUS_DSPP, 69, 2},
+	{ DBGBUS_DSPP, 69, 3},
+	{ DBGBUS_DSPP, 69, 4},
+	{ DBGBUS_DSPP, 69, 5},
+	{ DBGBUS_DSPP, 69, 6},
+	{ DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM1 */
+	{ DBGBUS_DSPP, 70, 0},
+	{ DBGBUS_DSPP, 70, 1},
+	{ DBGBUS_DSPP, 70, 2},
+	{ DBGBUS_DSPP, 70, 3},
+	{ DBGBUS_DSPP, 70, 4},
+	{ DBGBUS_DSPP, 70, 5},
+	{ DBGBUS_DSPP, 70, 6},
+	{ DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 71, 0},
+	{ DBGBUS_DSPP, 71, 1},
+	{ DBGBUS_DSPP, 71, 2},
+	{ DBGBUS_DSPP, 71, 3},
+	{ DBGBUS_DSPP, 71, 4},
+	{ DBGBUS_DSPP, 71, 5},
+	{ DBGBUS_DSPP, 71, 6},
+	{ DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 72, 0},
+	{ DBGBUS_DSPP, 72, 1},
+	{ DBGBUS_DSPP, 72, 2},
+	{ DBGBUS_DSPP, 72, 3},
+	{ DBGBUS_DSPP, 72, 4},
+	{ DBGBUS_DSPP, 72, 5},
+	{ DBGBUS_DSPP, 72, 6},
+	{ DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 73, 0},
+	{ DBGBUS_DSPP, 73, 1},
+	{ DBGBUS_DSPP, 73, 2},
+	{ DBGBUS_DSPP, 73, 3},
+	{ DBGBUS_DSPP, 73, 4},
+	{ DBGBUS_DSPP, 73, 5},
+	{ DBGBUS_DSPP, 73, 6},
+	{ DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 74, 0},
+	{ DBGBUS_DSPP, 74, 1},
+	{ DBGBUS_DSPP, 74, 2},
+	{ DBGBUS_DSPP, 74, 3},
+	{ DBGBUS_DSPP, 74, 4},
+	{ DBGBUS_DSPP, 74, 5},
+	{ DBGBUS_DSPP, 74, 6},
+	{ DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 75, 0},
+	{ DBGBUS_DSPP, 75, 1},
+	{ DBGBUS_DSPP, 75, 2},
+	{ DBGBUS_DSPP, 75, 3},
+	{ DBGBUS_DSPP, 75, 4},
+	{ DBGBUS_DSPP, 75, 5},
+	{ DBGBUS_DSPP, 75, 6},
+	{ DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 76, 0},
+	{ DBGBUS_DSPP, 76, 1},
+	{ DBGBUS_DSPP, 76, 2},
+	{ DBGBUS_DSPP, 76, 3},
+	{ DBGBUS_DSPP, 76, 4},
+	{ DBGBUS_DSPP, 76, 5},
+	{ DBGBUS_DSPP, 76, 6},
+	{ DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM2 */
+	{ DBGBUS_DSPP, 77, 0},
+	{ DBGBUS_DSPP, 77, 1},
+	{ DBGBUS_DSPP, 77, 2},
+	{ DBGBUS_DSPP, 77, 3},
+	{ DBGBUS_DSPP, 77, 4},
+	{ DBGBUS_DSPP, 77, 5},
+	{ DBGBUS_DSPP, 77, 6},
+	{ DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 78, 0},
+	{ DBGBUS_DSPP, 78, 1},
+	{ DBGBUS_DSPP, 78, 2},
+	{ DBGBUS_DSPP, 78, 3},
+	{ DBGBUS_DSPP, 78, 4},
+	{ DBGBUS_DSPP, 78, 5},
+	{ DBGBUS_DSPP, 78, 6},
+	{ DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 79, 0},
+	{ DBGBUS_DSPP, 79, 1},
+	{ DBGBUS_DSPP, 79, 2},
+	{ DBGBUS_DSPP, 79, 3},
+	{ DBGBUS_DSPP, 79, 4},
+	{ DBGBUS_DSPP, 79, 5},
+	{ DBGBUS_DSPP, 79, 6},
+	{ DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 80, 0},
+	{ DBGBUS_DSPP, 80, 1},
+	{ DBGBUS_DSPP, 80, 2},
+	{ DBGBUS_DSPP, 80, 3},
+	{ DBGBUS_DSPP, 80, 4},
+	{ DBGBUS_DSPP, 80, 5},
+	{ DBGBUS_DSPP, 80, 6},
+	{ DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 81, 0},
+	{ DBGBUS_DSPP, 81, 1},
+	{ DBGBUS_DSPP, 81, 2},
+	{ DBGBUS_DSPP, 81, 3},
+	{ DBGBUS_DSPP, 81, 4},
+	{ DBGBUS_DSPP, 81, 5},
+	{ DBGBUS_DSPP, 81, 6},
+	{ DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 82, 0},
+	{ DBGBUS_DSPP, 82, 1},
+	{ DBGBUS_DSPP, 82, 2},
+	{ DBGBUS_DSPP, 82, 3},
+	{ DBGBUS_DSPP, 82, 4},
+	{ DBGBUS_DSPP, 82, 5},
+	{ DBGBUS_DSPP, 82, 6},
+	{ DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 83, 0},
+	{ DBGBUS_DSPP, 83, 1},
+	{ DBGBUS_DSPP, 83, 2},
+	{ DBGBUS_DSPP, 83, 3},
+	{ DBGBUS_DSPP, 83, 4},
+	{ DBGBUS_DSPP, 83, 5},
+	{ DBGBUS_DSPP, 83, 6},
+	{ DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+	/* csc */
+	{ DBGBUS_SSPP0, 7, 0},
+	{ DBGBUS_SSPP0, 7, 1},
+	{ DBGBUS_SSPP0, 27, 0},
+	{ DBGBUS_SSPP0, 27, 1},
+	{ DBGBUS_SSPP1, 7, 0},
+	{ DBGBUS_SSPP1, 7, 1},
+	{ DBGBUS_SSPP1, 27, 0},
+	{ DBGBUS_SSPP1, 27, 1},
+
+	/* pcc */
+	{ DBGBUS_SSPP0, 3,  3},
+	{ DBGBUS_SSPP0, 23, 3},
+	{ DBGBUS_SSPP0, 33, 3},
+	{ DBGBUS_SSPP0, 43, 3},
+	{ DBGBUS_SSPP1, 3,  3},
+	{ DBGBUS_SSPP1, 23, 3},
+	{ DBGBUS_SSPP1, 33, 3},
+	{ DBGBUS_SSPP1, 43, 3},
+
+	/* spa */
+	{ DBGBUS_SSPP0, 8,  0},
+	{ DBGBUS_SSPP0, 28, 0},
+	{ DBGBUS_SSPP1, 8,  0},
+	{ DBGBUS_SSPP1, 28, 0},
+	{ DBGBUS_DSPP, 13, 0},
+	{ DBGBUS_DSPP, 19, 0},
+
+	/* igc */
+	{ DBGBUS_SSPP0, 9,  0},
+	{ DBGBUS_SSPP0, 9,  1},
+	{ DBGBUS_SSPP0, 9,  3},
+	{ DBGBUS_SSPP0, 29, 0},
+	{ DBGBUS_SSPP0, 29, 1},
+	{ DBGBUS_SSPP0, 29, 3},
+	{ DBGBUS_SSPP0, 17, 0},
+	{ DBGBUS_SSPP0, 17, 1},
+	{ DBGBUS_SSPP0, 17, 3},
+	{ DBGBUS_SSPP0, 37, 0},
+	{ DBGBUS_SSPP0, 37, 1},
+	{ DBGBUS_SSPP0, 37, 3},
+	{ DBGBUS_SSPP0, 46, 0},
+	{ DBGBUS_SSPP0, 46, 1},
+	{ DBGBUS_SSPP0, 46, 3},
+
+	{ DBGBUS_SSPP1, 9,  0},
+	{ DBGBUS_SSPP1, 9,  1},
+	{ DBGBUS_SSPP1, 9,  3},
+	{ DBGBUS_SSPP1, 29, 0},
+	{ DBGBUS_SSPP1, 29, 1},
+	{ DBGBUS_SSPP1, 29, 3},
+	{ DBGBUS_SSPP1, 17, 0},
+	{ DBGBUS_SSPP1, 17, 1},
+	{ DBGBUS_SSPP1, 17, 3},
+	{ DBGBUS_SSPP1, 37, 0},
+	{ DBGBUS_SSPP1, 37, 1},
+	{ DBGBUS_SSPP1, 37, 3},
+	{ DBGBUS_SSPP1, 46, 0},
+	{ DBGBUS_SSPP1, 46, 1},
+	{ DBGBUS_SSPP1, 46, 3},
+
+	{ DBGBUS_DSPP, 14, 0},
+	{ DBGBUS_DSPP, 14, 1},
+	{ DBGBUS_DSPP, 14, 3},
+	{ DBGBUS_DSPP, 20, 0},
+	{ DBGBUS_DSPP, 20, 1},
+	{ DBGBUS_DSPP, 20, 3},
+
+	{ DBGBUS_PERIPH, 60, 0},
+};
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
+
+	/* Unpack 0 sspp 0*/
+	{ DBGBUS_SSPP0, 50, 2 },
+	{ DBGBUS_SSPP0, 60, 2 },
+	{ DBGBUS_SSPP0, 70, 2 },
+
+	/* Upack 0 sspp 1*/
+	{ DBGBUS_SSPP1, 50, 2 },
+	{ DBGBUS_SSPP1, 60, 2 },
+	{ DBGBUS_SSPP1, 70, 2 },
+
+	/* scheduler */
+	{ DBGBUS_DSPP, 130, 0 },
+	{ DBGBUS_DSPP, 130, 1 },
+	{ DBGBUS_DSPP, 130, 2 },
+	{ DBGBUS_DSPP, 130, 3 },
+	{ DBGBUS_DSPP, 130, 4 },
+	{ DBGBUS_DSPP, 130, 5 },
+
+	/* qseed */
+	{ DBGBUS_SSPP0, 6, 0},
+	{ DBGBUS_SSPP0, 6, 1},
+	{ DBGBUS_SSPP0, 26, 0},
+	{ DBGBUS_SSPP0, 26, 1},
+	{ DBGBUS_SSPP1, 6, 0},
+	{ DBGBUS_SSPP1, 6, 1},
+	{ DBGBUS_SSPP1, 26, 0},
+	{ DBGBUS_SSPP1, 26, 1},
+
+	/* scale */
+	{ DBGBUS_SSPP0, 16, 0},
+	{ DBGBUS_SSPP0, 16, 1},
+	{ DBGBUS_SSPP0, 36, 0},
+	{ DBGBUS_SSPP0, 36, 1},
+	{ DBGBUS_SSPP1, 16, 0},
+	{ DBGBUS_SSPP1, 16, 1},
+	{ DBGBUS_SSPP1, 36, 0},
+	{ DBGBUS_SSPP1, 36, 1},
+
+	/* fetch sspp0 */
+
+	/* vig 0 */
+	{ DBGBUS_SSPP0, 0, 0 },
+	{ DBGBUS_SSPP0, 0, 1 },
+	{ DBGBUS_SSPP0, 0, 2 },
+	{ DBGBUS_SSPP0, 0, 3 },
+	{ DBGBUS_SSPP0, 0, 4 },
+	{ DBGBUS_SSPP0, 0, 5 },
+	{ DBGBUS_SSPP0, 0, 6 },
+	{ DBGBUS_SSPP0, 0, 7 },
+
+	{ DBGBUS_SSPP0, 1, 0 },
+	{ DBGBUS_SSPP0, 1, 1 },
+	{ DBGBUS_SSPP0, 1, 2 },
+	{ DBGBUS_SSPP0, 1, 3 },
+	{ DBGBUS_SSPP0, 1, 4 },
+	{ DBGBUS_SSPP0, 1, 5 },
+	{ DBGBUS_SSPP0, 1, 6 },
+	{ DBGBUS_SSPP0, 1, 7 },
+
+	{ DBGBUS_SSPP0, 2, 0 },
+	{ DBGBUS_SSPP0, 2, 1 },
+	{ DBGBUS_SSPP0, 2, 2 },
+	{ DBGBUS_SSPP0, 2, 3 },
+	{ DBGBUS_SSPP0, 2, 4 },
+	{ DBGBUS_SSPP0, 2, 5 },
+	{ DBGBUS_SSPP0, 2, 6 },
+	{ DBGBUS_SSPP0, 2, 7 },
+
+	{ DBGBUS_SSPP0, 4, 0 },
+	{ DBGBUS_SSPP0, 4, 1 },
+	{ DBGBUS_SSPP0, 4, 2 },
+	{ DBGBUS_SSPP0, 4, 3 },
+	{ DBGBUS_SSPP0, 4, 4 },
+	{ DBGBUS_SSPP0, 4, 5 },
+	{ DBGBUS_SSPP0, 4, 6 },
+	{ DBGBUS_SSPP0, 4, 7 },
+
+	{ DBGBUS_SSPP0, 5, 0 },
+	{ DBGBUS_SSPP0, 5, 1 },
+	{ DBGBUS_SSPP0, 5, 2 },
+	{ DBGBUS_SSPP0, 5, 3 },
+	{ DBGBUS_SSPP0, 5, 4 },
+	{ DBGBUS_SSPP0, 5, 5 },
+	{ DBGBUS_SSPP0, 5, 6 },
+	{ DBGBUS_SSPP0, 5, 7 },
+
+	/* vig 2 */
+	{ DBGBUS_SSPP0, 20, 0 },
+	{ DBGBUS_SSPP0, 20, 1 },
+	{ DBGBUS_SSPP0, 20, 2 },
+	{ DBGBUS_SSPP0, 20, 3 },
+	{ DBGBUS_SSPP0, 20, 4 },
+	{ DBGBUS_SSPP0, 20, 5 },
+	{ DBGBUS_SSPP0, 20, 6 },
+	{ DBGBUS_SSPP0, 20, 7 },
+
+	{ DBGBUS_SSPP0, 21, 0 },
+	{ DBGBUS_SSPP0, 21, 1 },
+	{ DBGBUS_SSPP0, 21, 2 },
+	{ DBGBUS_SSPP0, 21, 3 },
+	{ DBGBUS_SSPP0, 21, 4 },
+	{ DBGBUS_SSPP0, 21, 5 },
+	{ DBGBUS_SSPP0, 21, 6 },
+	{ DBGBUS_SSPP0, 21, 7 },
+
+	{ DBGBUS_SSPP0, 22, 0 },
+	{ DBGBUS_SSPP0, 22, 1 },
+	{ DBGBUS_SSPP0, 22, 2 },
+	{ DBGBUS_SSPP0, 22, 3 },
+	{ DBGBUS_SSPP0, 22, 4 },
+	{ DBGBUS_SSPP0, 22, 5 },
+	{ DBGBUS_SSPP0, 22, 6 },
+	{ DBGBUS_SSPP0, 22, 7 },
+
+	{ DBGBUS_SSPP0, 24, 0 },
+	{ DBGBUS_SSPP0, 24, 1 },
+	{ DBGBUS_SSPP0, 24, 2 },
+	{ DBGBUS_SSPP0, 24, 3 },
+	{ DBGBUS_SSPP0, 24, 4 },
+	{ DBGBUS_SSPP0, 24, 5 },
+	{ DBGBUS_SSPP0, 24, 6 },
+	{ DBGBUS_SSPP0, 24, 7 },
+
+	{ DBGBUS_SSPP0, 25, 0 },
+	{ DBGBUS_SSPP0, 25, 1 },
+	{ DBGBUS_SSPP0, 25, 2 },
+	{ DBGBUS_SSPP0, 25, 3 },
+	{ DBGBUS_SSPP0, 25, 4 },
+	{ DBGBUS_SSPP0, 25, 5 },
+	{ DBGBUS_SSPP0, 25, 6 },
+	{ DBGBUS_SSPP0, 25, 7 },
+
+	/* dma 2 */
+	{ DBGBUS_SSPP0, 30, 0 },
+	{ DBGBUS_SSPP0, 30, 1 },
+	{ DBGBUS_SSPP0, 30, 2 },
+	{ DBGBUS_SSPP0, 30, 3 },
+	{ DBGBUS_SSPP0, 30, 4 },
+	{ DBGBUS_SSPP0, 30, 5 },
+	{ DBGBUS_SSPP0, 30, 6 },
+	{ DBGBUS_SSPP0, 30, 7 },
+
+	{ DBGBUS_SSPP0, 31, 0 },
+	{ DBGBUS_SSPP0, 31, 1 },
+	{ DBGBUS_SSPP0, 31, 2 },
+	{ DBGBUS_SSPP0, 31, 3 },
+	{ DBGBUS_SSPP0, 31, 4 },
+	{ DBGBUS_SSPP0, 31, 5 },
+	{ DBGBUS_SSPP0, 31, 6 },
+	{ DBGBUS_SSPP0, 31, 7 },
+
+	{ DBGBUS_SSPP0, 32, 0 },
+	{ DBGBUS_SSPP0, 32, 1 },
+	{ DBGBUS_SSPP0, 32, 2 },
+	{ DBGBUS_SSPP0, 32, 3 },
+	{ DBGBUS_SSPP0, 32, 4 },
+	{ DBGBUS_SSPP0, 32, 5 },
+	{ DBGBUS_SSPP0, 32, 6 },
+	{ DBGBUS_SSPP0, 32, 7 },
+
+	{ DBGBUS_SSPP0, 33, 0 },
+	{ DBGBUS_SSPP0, 33, 1 },
+	{ DBGBUS_SSPP0, 33, 2 },
+	{ DBGBUS_SSPP0, 33, 3 },
+	{ DBGBUS_SSPP0, 33, 4 },
+	{ DBGBUS_SSPP0, 33, 5 },
+	{ DBGBUS_SSPP0, 33, 6 },
+	{ DBGBUS_SSPP0, 33, 7 },
+
+	{ DBGBUS_SSPP0, 34, 0 },
+	{ DBGBUS_SSPP0, 34, 1 },
+	{ DBGBUS_SSPP0, 34, 2 },
+	{ DBGBUS_SSPP0, 34, 3 },
+	{ DBGBUS_SSPP0, 34, 4 },
+	{ DBGBUS_SSPP0, 34, 5 },
+	{ DBGBUS_SSPP0, 34, 6 },
+	{ DBGBUS_SSPP0, 34, 7 },
+
+	{ DBGBUS_SSPP0, 35, 0 },
+	{ DBGBUS_SSPP0, 35, 1 },
+	{ DBGBUS_SSPP0, 35, 2 },
+	{ DBGBUS_SSPP0, 35, 3 },
+
+	/* dma 0 */
+	{ DBGBUS_SSPP0, 40, 0 },
+	{ DBGBUS_SSPP0, 40, 1 },
+	{ DBGBUS_SSPP0, 40, 2 },
+	{ DBGBUS_SSPP0, 40, 3 },
+	{ DBGBUS_SSPP0, 40, 4 },
+	{ DBGBUS_SSPP0, 40, 5 },
+	{ DBGBUS_SSPP0, 40, 6 },
+	{ DBGBUS_SSPP0, 40, 7 },
+
+	{ DBGBUS_SSPP0, 41, 0 },
+	{ DBGBUS_SSPP0, 41, 1 },
+	{ DBGBUS_SSPP0, 41, 2 },
+	{ DBGBUS_SSPP0, 41, 3 },
+	{ DBGBUS_SSPP0, 41, 4 },
+	{ DBGBUS_SSPP0, 41, 5 },
+	{ DBGBUS_SSPP0, 41, 6 },
+	{ DBGBUS_SSPP0, 41, 7 },
+
+	{ DBGBUS_SSPP0, 42, 0 },
+	{ DBGBUS_SSPP0, 42, 1 },
+	{ DBGBUS_SSPP0, 42, 2 },
+	{ DBGBUS_SSPP0, 42, 3 },
+	{ DBGBUS_SSPP0, 42, 4 },
+	{ DBGBUS_SSPP0, 42, 5 },
+	{ DBGBUS_SSPP0, 42, 6 },
+	{ DBGBUS_SSPP0, 42, 7 },
+
+	{ DBGBUS_SSPP0, 44, 0 },
+	{ DBGBUS_SSPP0, 44, 1 },
+	{ DBGBUS_SSPP0, 44, 2 },
+	{ DBGBUS_SSPP0, 44, 3 },
+	{ DBGBUS_SSPP0, 44, 4 },
+	{ DBGBUS_SSPP0, 44, 5 },
+	{ DBGBUS_SSPP0, 44, 6 },
+	{ DBGBUS_SSPP0, 44, 7 },
+
+	{ DBGBUS_SSPP0, 45, 0 },
+	{ DBGBUS_SSPP0, 45, 1 },
+	{ DBGBUS_SSPP0, 45, 2 },
+	{ DBGBUS_SSPP0, 45, 3 },
+	{ DBGBUS_SSPP0, 45, 4 },
+	{ DBGBUS_SSPP0, 45, 5 },
+	{ DBGBUS_SSPP0, 45, 6 },
+	{ DBGBUS_SSPP0, 45, 7 },
+
+	/* fetch sspp1 */
+	/* vig 1 */
+	{ DBGBUS_SSPP1, 0, 0 },
+	{ DBGBUS_SSPP1, 0, 1 },
+	{ DBGBUS_SSPP1, 0, 2 },
+	{ DBGBUS_SSPP1, 0, 3 },
+	{ DBGBUS_SSPP1, 0, 4 },
+	{ DBGBUS_SSPP1, 0, 5 },
+	{ DBGBUS_SSPP1, 0, 6 },
+	{ DBGBUS_SSPP1, 0, 7 },
+
+	{ DBGBUS_SSPP1, 1, 0 },
+	{ DBGBUS_SSPP1, 1, 1 },
+	{ DBGBUS_SSPP1, 1, 2 },
+	{ DBGBUS_SSPP1, 1, 3 },
+	{ DBGBUS_SSPP1, 1, 4 },
+	{ DBGBUS_SSPP1, 1, 5 },
+	{ DBGBUS_SSPP1, 1, 6 },
+	{ DBGBUS_SSPP1, 1, 7 },
+
+	{ DBGBUS_SSPP1, 2, 0 },
+	{ DBGBUS_SSPP1, 2, 1 },
+	{ DBGBUS_SSPP1, 2, 2 },
+	{ DBGBUS_SSPP1, 2, 3 },
+	{ DBGBUS_SSPP1, 2, 4 },
+	{ DBGBUS_SSPP1, 2, 5 },
+	{ DBGBUS_SSPP1, 2, 6 },
+	{ DBGBUS_SSPP1, 2, 7 },
+
+	{ DBGBUS_SSPP1, 4, 0 },
+	{ DBGBUS_SSPP1, 4, 1 },
+	{ DBGBUS_SSPP1, 4, 2 },
+	{ DBGBUS_SSPP1, 4, 3 },
+	{ DBGBUS_SSPP1, 4, 4 },
+	{ DBGBUS_SSPP1, 4, 5 },
+	{ DBGBUS_SSPP1, 4, 6 },
+	{ DBGBUS_SSPP1, 4, 7 },
+
+	{ DBGBUS_SSPP1, 5, 0 },
+	{ DBGBUS_SSPP1, 5, 1 },
+	{ DBGBUS_SSPP1, 5, 2 },
+	{ DBGBUS_SSPP1, 5, 3 },
+	{ DBGBUS_SSPP1, 5, 4 },
+	{ DBGBUS_SSPP1, 5, 5 },
+	{ DBGBUS_SSPP1, 5, 6 },
+	{ DBGBUS_SSPP1, 5, 7 },
+
+	/* vig 3 */
+	{ DBGBUS_SSPP1, 20, 0 },
+	{ DBGBUS_SSPP1, 20, 1 },
+	{ DBGBUS_SSPP1, 20, 2 },
+	{ DBGBUS_SSPP1, 20, 3 },
+	{ DBGBUS_SSPP1, 20, 4 },
+	{ DBGBUS_SSPP1, 20, 5 },
+	{ DBGBUS_SSPP1, 20, 6 },
+	{ DBGBUS_SSPP1, 20, 7 },
+
+	{ DBGBUS_SSPP1, 21, 0 },
+	{ DBGBUS_SSPP1, 21, 1 },
+	{ DBGBUS_SSPP1, 21, 2 },
+	{ DBGBUS_SSPP1, 21, 3 },
+	{ DBGBUS_SSPP1, 21, 4 },
+	{ DBGBUS_SSPP1, 21, 5 },
+	{ DBGBUS_SSPP1, 21, 6 },
+	{ DBGBUS_SSPP1, 21, 7 },
+
+	{ DBGBUS_SSPP1, 22, 0 },
+	{ DBGBUS_SSPP1, 22, 1 },
+	{ DBGBUS_SSPP1, 22, 2 },
+	{ DBGBUS_SSPP1, 22, 3 },
+	{ DBGBUS_SSPP1, 22, 4 },
+	{ DBGBUS_SSPP1, 22, 5 },
+	{ DBGBUS_SSPP1, 22, 6 },
+	{ DBGBUS_SSPP1, 22, 7 },
+
+	{ DBGBUS_SSPP1, 24, 0 },
+	{ DBGBUS_SSPP1, 24, 1 },
+	{ DBGBUS_SSPP1, 24, 2 },
+	{ DBGBUS_SSPP1, 24, 3 },
+	{ DBGBUS_SSPP1, 24, 4 },
+	{ DBGBUS_SSPP1, 24, 5 },
+	{ DBGBUS_SSPP1, 24, 6 },
+	{ DBGBUS_SSPP1, 24, 7 },
+
+	{ DBGBUS_SSPP1, 25, 0 },
+	{ DBGBUS_SSPP1, 25, 1 },
+	{ DBGBUS_SSPP1, 25, 2 },
+	{ DBGBUS_SSPP1, 25, 3 },
+	{ DBGBUS_SSPP1, 25, 4 },
+	{ DBGBUS_SSPP1, 25, 5 },
+	{ DBGBUS_SSPP1, 25, 6 },
+	{ DBGBUS_SSPP1, 25, 7 },
+
+	/* dma 3 */
+	{ DBGBUS_SSPP1, 30, 0 },
+	{ DBGBUS_SSPP1, 30, 1 },
+	{ DBGBUS_SSPP1, 30, 2 },
+	{ DBGBUS_SSPP1, 30, 3 },
+	{ DBGBUS_SSPP1, 30, 4 },
+	{ DBGBUS_SSPP1, 30, 5 },
+	{ DBGBUS_SSPP1, 30, 6 },
+	{ DBGBUS_SSPP1, 30, 7 },
+
+	{ DBGBUS_SSPP1, 31, 0 },
+	{ DBGBUS_SSPP1, 31, 1 },
+	{ DBGBUS_SSPP1, 31, 2 },
+	{ DBGBUS_SSPP1, 31, 3 },
+	{ DBGBUS_SSPP1, 31, 4 },
+	{ DBGBUS_SSPP1, 31, 5 },
+	{ DBGBUS_SSPP1, 31, 6 },
+	{ DBGBUS_SSPP1, 31, 7 },
+
+	{ DBGBUS_SSPP1, 32, 0 },
+	{ DBGBUS_SSPP1, 32, 1 },
+	{ DBGBUS_SSPP1, 32, 2 },
+	{ DBGBUS_SSPP1, 32, 3 },
+	{ DBGBUS_SSPP1, 32, 4 },
+	{ DBGBUS_SSPP1, 32, 5 },
+	{ DBGBUS_SSPP1, 32, 6 },
+	{ DBGBUS_SSPP1, 32, 7 },
+
+	{ DBGBUS_SSPP1, 33, 0 },
+	{ DBGBUS_SSPP1, 33, 1 },
+	{ DBGBUS_SSPP1, 33, 2 },
+	{ DBGBUS_SSPP1, 33, 3 },
+	{ DBGBUS_SSPP1, 33, 4 },
+	{ DBGBUS_SSPP1, 33, 5 },
+	{ DBGBUS_SSPP1, 33, 6 },
+	{ DBGBUS_SSPP1, 33, 7 },
+
+	{ DBGBUS_SSPP1, 34, 0 },
+	{ DBGBUS_SSPP1, 34, 1 },
+	{ DBGBUS_SSPP1, 34, 2 },
+	{ DBGBUS_SSPP1, 34, 3 },
+	{ DBGBUS_SSPP1, 34, 4 },
+	{ DBGBUS_SSPP1, 34, 5 },
+	{ DBGBUS_SSPP1, 34, 6 },
+	{ DBGBUS_SSPP1, 34, 7 },
+
+	{ DBGBUS_SSPP1, 35, 0 },
+	{ DBGBUS_SSPP1, 35, 1 },
+	{ DBGBUS_SSPP1, 35, 2 },
+
+	/* dma 1 */
+	{ DBGBUS_SSPP1, 40, 0 },
+	{ DBGBUS_SSPP1, 40, 1 },
+	{ DBGBUS_SSPP1, 40, 2 },
+	{ DBGBUS_SSPP1, 40, 3 },
+	{ DBGBUS_SSPP1, 40, 4 },
+	{ DBGBUS_SSPP1, 40, 5 },
+	{ DBGBUS_SSPP1, 40, 6 },
+	{ DBGBUS_SSPP1, 40, 7 },
+
+	{ DBGBUS_SSPP1, 41, 0 },
+	{ DBGBUS_SSPP1, 41, 1 },
+	{ DBGBUS_SSPP1, 41, 2 },
+	{ DBGBUS_SSPP1, 41, 3 },
+	{ DBGBUS_SSPP1, 41, 4 },
+	{ DBGBUS_SSPP1, 41, 5 },
+	{ DBGBUS_SSPP1, 41, 6 },
+	{ DBGBUS_SSPP1, 41, 7 },
+
+	{ DBGBUS_SSPP1, 42, 0 },
+	{ DBGBUS_SSPP1, 42, 1 },
+	{ DBGBUS_SSPP1, 42, 2 },
+	{ DBGBUS_SSPP1, 42, 3 },
+	{ DBGBUS_SSPP1, 42, 4 },
+	{ DBGBUS_SSPP1, 42, 5 },
+	{ DBGBUS_SSPP1, 42, 6 },
+	{ DBGBUS_SSPP1, 42, 7 },
+
+	{ DBGBUS_SSPP1, 44, 0 },
+	{ DBGBUS_SSPP1, 44, 1 },
+	{ DBGBUS_SSPP1, 44, 2 },
+	{ DBGBUS_SSPP1, 44, 3 },
+	{ DBGBUS_SSPP1, 44, 4 },
+	{ DBGBUS_SSPP1, 44, 5 },
+	{ DBGBUS_SSPP1, 44, 6 },
+	{ DBGBUS_SSPP1, 44, 7 },
+
+	{ DBGBUS_SSPP1, 45, 0 },
+	{ DBGBUS_SSPP1, 45, 1 },
+	{ DBGBUS_SSPP1, 45, 2 },
+	{ DBGBUS_SSPP1, 45, 3 },
+	{ DBGBUS_SSPP1, 45, 4 },
+	{ DBGBUS_SSPP1, 45, 5 },
+	{ DBGBUS_SSPP1, 45, 6 },
+	{ DBGBUS_SSPP1, 45, 7 },
+
+	/* dspp */
+	{ DBGBUS_DSPP, 13, 0 },
+	{ DBGBUS_DSPP, 19, 0 },
+	{ DBGBUS_DSPP, 14, 0 },
+	{ DBGBUS_DSPP, 14, 1 },
+	{ DBGBUS_DSPP, 14, 3 },
+	{ DBGBUS_DSPP, 20, 0 },
+	{ DBGBUS_DSPP, 20, 1 },
+	{ DBGBUS_DSPP, 20, 3 },
+
+	/* ppb_0 */
+	{ DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+	/* ppb_1 */
+	{ DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+	/* lm_lut */
+	{ DBGBUS_DSPP, 109, 0 },
+	{ DBGBUS_DSPP, 105, 0 },
+	{ DBGBUS_DSPP, 103, 0 },
+
+	/* crossbar */
+	{ DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+	/* rotator */
+	{ DBGBUS_DSPP, 9, 0},
+
+	/* blend */
+	/* LM0 */
+	{ DBGBUS_DSPP, 63, 1},
+	{ DBGBUS_DSPP, 63, 2},
+	{ DBGBUS_DSPP, 63, 3},
+	{ DBGBUS_DSPP, 63, 4},
+	{ DBGBUS_DSPP, 63, 5},
+	{ DBGBUS_DSPP, 63, 6},
+	{ DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 64, 1},
+	{ DBGBUS_DSPP, 64, 2},
+	{ DBGBUS_DSPP, 64, 3},
+	{ DBGBUS_DSPP, 64, 4},
+	{ DBGBUS_DSPP, 64, 5},
+	{ DBGBUS_DSPP, 64, 6},
+	{ DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 65, 1},
+	{ DBGBUS_DSPP, 65, 2},
+	{ DBGBUS_DSPP, 65, 3},
+	{ DBGBUS_DSPP, 65, 4},
+	{ DBGBUS_DSPP, 65, 5},
+	{ DBGBUS_DSPP, 65, 6},
+	{ DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 66, 1},
+	{ DBGBUS_DSPP, 66, 2},
+	{ DBGBUS_DSPP, 66, 3},
+	{ DBGBUS_DSPP, 66, 4},
+	{ DBGBUS_DSPP, 66, 5},
+	{ DBGBUS_DSPP, 66, 6},
+	{ DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 67, 1},
+	{ DBGBUS_DSPP, 67, 2},
+	{ DBGBUS_DSPP, 67, 3},
+	{ DBGBUS_DSPP, 67, 4},
+	{ DBGBUS_DSPP, 67, 5},
+	{ DBGBUS_DSPP, 67, 6},
+	{ DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 68, 1},
+	{ DBGBUS_DSPP, 68, 2},
+	{ DBGBUS_DSPP, 68, 3},
+	{ DBGBUS_DSPP, 68, 4},
+	{ DBGBUS_DSPP, 68, 5},
+	{ DBGBUS_DSPP, 68, 6},
+	{ DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 69, 1},
+	{ DBGBUS_DSPP, 69, 2},
+	{ DBGBUS_DSPP, 69, 3},
+	{ DBGBUS_DSPP, 69, 4},
+	{ DBGBUS_DSPP, 69, 5},
+	{ DBGBUS_DSPP, 69, 6},
+	{ DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 84, 1},
+	{ DBGBUS_DSPP, 84, 2},
+	{ DBGBUS_DSPP, 84, 3},
+	{ DBGBUS_DSPP, 84, 4},
+	{ DBGBUS_DSPP, 84, 5},
+	{ DBGBUS_DSPP, 84, 6},
+	{ DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
+
+
+	{ DBGBUS_DSPP, 85, 1},
+	{ DBGBUS_DSPP, 85, 2},
+	{ DBGBUS_DSPP, 85, 3},
+	{ DBGBUS_DSPP, 85, 4},
+	{ DBGBUS_DSPP, 85, 5},
+	{ DBGBUS_DSPP, 85, 6},
+	{ DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
+
+
+	{ DBGBUS_DSPP, 86, 1},
+	{ DBGBUS_DSPP, 86, 2},
+	{ DBGBUS_DSPP, 86, 3},
+	{ DBGBUS_DSPP, 86, 4},
+	{ DBGBUS_DSPP, 86, 5},
+	{ DBGBUS_DSPP, 86, 6},
+	{ DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
+
+
+	{ DBGBUS_DSPP, 87, 1},
+	{ DBGBUS_DSPP, 87, 2},
+	{ DBGBUS_DSPP, 87, 3},
+	{ DBGBUS_DSPP, 87, 4},
+	{ DBGBUS_DSPP, 87, 5},
+	{ DBGBUS_DSPP, 87, 6},
+	{ DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM1 */
+	{ DBGBUS_DSPP, 70, 1},
+	{ DBGBUS_DSPP, 70, 2},
+	{ DBGBUS_DSPP, 70, 3},
+	{ DBGBUS_DSPP, 70, 4},
+	{ DBGBUS_DSPP, 70, 5},
+	{ DBGBUS_DSPP, 70, 6},
+	{ DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 71, 1},
+	{ DBGBUS_DSPP, 71, 2},
+	{ DBGBUS_DSPP, 71, 3},
+	{ DBGBUS_DSPP, 71, 4},
+	{ DBGBUS_DSPP, 71, 5},
+	{ DBGBUS_DSPP, 71, 6},
+	{ DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 72, 1},
+	{ DBGBUS_DSPP, 72, 2},
+	{ DBGBUS_DSPP, 72, 3},
+	{ DBGBUS_DSPP, 72, 4},
+	{ DBGBUS_DSPP, 72, 5},
+	{ DBGBUS_DSPP, 72, 6},
+	{ DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 73, 1},
+	{ DBGBUS_DSPP, 73, 2},
+	{ DBGBUS_DSPP, 73, 3},
+	{ DBGBUS_DSPP, 73, 4},
+	{ DBGBUS_DSPP, 73, 5},
+	{ DBGBUS_DSPP, 73, 6},
+	{ DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 74, 1},
+	{ DBGBUS_DSPP, 74, 2},
+	{ DBGBUS_DSPP, 74, 3},
+	{ DBGBUS_DSPP, 74, 4},
+	{ DBGBUS_DSPP, 74, 5},
+	{ DBGBUS_DSPP, 74, 6},
+	{ DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 75, 1},
+	{ DBGBUS_DSPP, 75, 2},
+	{ DBGBUS_DSPP, 75, 3},
+	{ DBGBUS_DSPP, 75, 4},
+	{ DBGBUS_DSPP, 75, 5},
+	{ DBGBUS_DSPP, 75, 6},
+	{ DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 76, 1},
+	{ DBGBUS_DSPP, 76, 2},
+	{ DBGBUS_DSPP, 76, 3},
+	{ DBGBUS_DSPP, 76, 4},
+	{ DBGBUS_DSPP, 76, 5},
+	{ DBGBUS_DSPP, 76, 6},
+	{ DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 88, 1},
+	{ DBGBUS_DSPP, 88, 2},
+	{ DBGBUS_DSPP, 88, 3},
+	{ DBGBUS_DSPP, 88, 4},
+	{ DBGBUS_DSPP, 88, 5},
+	{ DBGBUS_DSPP, 88, 6},
+	{ DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 89, 1},
+	{ DBGBUS_DSPP, 89, 2},
+	{ DBGBUS_DSPP, 89, 3},
+	{ DBGBUS_DSPP, 89, 4},
+	{ DBGBUS_DSPP, 89, 5},
+	{ DBGBUS_DSPP, 89, 6},
+	{ DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 90, 1},
+	{ DBGBUS_DSPP, 90, 2},
+	{ DBGBUS_DSPP, 90, 3},
+	{ DBGBUS_DSPP, 90, 4},
+	{ DBGBUS_DSPP, 90, 5},
+	{ DBGBUS_DSPP, 90, 6},
+	{ DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 91, 1},
+	{ DBGBUS_DSPP, 91, 2},
+	{ DBGBUS_DSPP, 91, 3},
+	{ DBGBUS_DSPP, 91, 4},
+	{ DBGBUS_DSPP, 91, 5},
+	{ DBGBUS_DSPP, 91, 6},
+	{ DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM2 */
+	{ DBGBUS_DSPP, 77, 0},
+	{ DBGBUS_DSPP, 77, 1},
+	{ DBGBUS_DSPP, 77, 2},
+	{ DBGBUS_DSPP, 77, 3},
+	{ DBGBUS_DSPP, 77, 4},
+	{ DBGBUS_DSPP, 77, 5},
+	{ DBGBUS_DSPP, 77, 6},
+	{ DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 78, 0},
+	{ DBGBUS_DSPP, 78, 1},
+	{ DBGBUS_DSPP, 78, 2},
+	{ DBGBUS_DSPP, 78, 3},
+	{ DBGBUS_DSPP, 78, 4},
+	{ DBGBUS_DSPP, 78, 5},
+	{ DBGBUS_DSPP, 78, 6},
+	{ DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 79, 0},
+	{ DBGBUS_DSPP, 79, 1},
+	{ DBGBUS_DSPP, 79, 2},
+	{ DBGBUS_DSPP, 79, 3},
+	{ DBGBUS_DSPP, 79, 4},
+	{ DBGBUS_DSPP, 79, 5},
+	{ DBGBUS_DSPP, 79, 6},
+	{ DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 80, 0},
+	{ DBGBUS_DSPP, 80, 1},
+	{ DBGBUS_DSPP, 80, 2},
+	{ DBGBUS_DSPP, 80, 3},
+	{ DBGBUS_DSPP, 80, 4},
+	{ DBGBUS_DSPP, 80, 5},
+	{ DBGBUS_DSPP, 80, 6},
+	{ DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 81, 0},
+	{ DBGBUS_DSPP, 81, 1},
+	{ DBGBUS_DSPP, 81, 2},
+	{ DBGBUS_DSPP, 81, 3},
+	{ DBGBUS_DSPP, 81, 4},
+	{ DBGBUS_DSPP, 81, 5},
+	{ DBGBUS_DSPP, 81, 6},
+	{ DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 82, 0},
+	{ DBGBUS_DSPP, 82, 1},
+	{ DBGBUS_DSPP, 82, 2},
+	{ DBGBUS_DSPP, 82, 3},
+	{ DBGBUS_DSPP, 82, 4},
+	{ DBGBUS_DSPP, 82, 5},
+	{ DBGBUS_DSPP, 82, 6},
+	{ DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 83, 0},
+	{ DBGBUS_DSPP, 83, 1},
+	{ DBGBUS_DSPP, 83, 2},
+	{ DBGBUS_DSPP, 83, 3},
+	{ DBGBUS_DSPP, 83, 4},
+	{ DBGBUS_DSPP, 83, 5},
+	{ DBGBUS_DSPP, 83, 6},
+	{ DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 92, 1},
+	{ DBGBUS_DSPP, 92, 2},
+	{ DBGBUS_DSPP, 92, 3},
+	{ DBGBUS_DSPP, 92, 4},
+	{ DBGBUS_DSPP, 92, 5},
+	{ DBGBUS_DSPP, 92, 6},
+	{ DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 93, 1},
+	{ DBGBUS_DSPP, 93, 2},
+	{ DBGBUS_DSPP, 93, 3},
+	{ DBGBUS_DSPP, 93, 4},
+	{ DBGBUS_DSPP, 93, 5},
+	{ DBGBUS_DSPP, 93, 6},
+	{ DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 94, 1},
+	{ DBGBUS_DSPP, 94, 2},
+	{ DBGBUS_DSPP, 94, 3},
+	{ DBGBUS_DSPP, 94, 4},
+	{ DBGBUS_DSPP, 94, 5},
+	{ DBGBUS_DSPP, 94, 6},
+	{ DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 95, 1},
+	{ DBGBUS_DSPP, 95, 2},
+	{ DBGBUS_DSPP, 95, 3},
+	{ DBGBUS_DSPP, 95, 4},
+	{ DBGBUS_DSPP, 95, 5},
+	{ DBGBUS_DSPP, 95, 6},
+	{ DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM5 */
+	{ DBGBUS_DSPP, 110, 1},
+	{ DBGBUS_DSPP, 110, 2},
+	{ DBGBUS_DSPP, 110, 3},
+	{ DBGBUS_DSPP, 110, 4},
+	{ DBGBUS_DSPP, 110, 5},
+	{ DBGBUS_DSPP, 110, 6},
+	{ DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 111, 1},
+	{ DBGBUS_DSPP, 111, 2},
+	{ DBGBUS_DSPP, 111, 3},
+	{ DBGBUS_DSPP, 111, 4},
+	{ DBGBUS_DSPP, 111, 5},
+	{ DBGBUS_DSPP, 111, 6},
+	{ DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 112, 1},
+	{ DBGBUS_DSPP, 112, 2},
+	{ DBGBUS_DSPP, 112, 3},
+	{ DBGBUS_DSPP, 112, 4},
+	{ DBGBUS_DSPP, 112, 5},
+	{ DBGBUS_DSPP, 112, 6},
+	{ DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 113, 1},
+	{ DBGBUS_DSPP, 113, 2},
+	{ DBGBUS_DSPP, 113, 3},
+	{ DBGBUS_DSPP, 113, 4},
+	{ DBGBUS_DSPP, 113, 5},
+	{ DBGBUS_DSPP, 113, 6},
+	{ DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 114, 1},
+	{ DBGBUS_DSPP, 114, 2},
+	{ DBGBUS_DSPP, 114, 3},
+	{ DBGBUS_DSPP, 114, 4},
+	{ DBGBUS_DSPP, 114, 5},
+	{ DBGBUS_DSPP, 114, 6},
+	{ DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 115, 1},
+	{ DBGBUS_DSPP, 115, 2},
+	{ DBGBUS_DSPP, 115, 3},
+	{ DBGBUS_DSPP, 115, 4},
+	{ DBGBUS_DSPP, 115, 5},
+	{ DBGBUS_DSPP, 115, 6},
+	{ DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 116, 1},
+	{ DBGBUS_DSPP, 116, 2},
+	{ DBGBUS_DSPP, 116, 3},
+	{ DBGBUS_DSPP, 116, 4},
+	{ DBGBUS_DSPP, 116, 5},
+	{ DBGBUS_DSPP, 116, 6},
+	{ DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 117, 1},
+	{ DBGBUS_DSPP, 117, 2},
+	{ DBGBUS_DSPP, 117, 3},
+	{ DBGBUS_DSPP, 117, 4},
+	{ DBGBUS_DSPP, 117, 5},
+	{ DBGBUS_DSPP, 117, 6},
+	{ DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 118, 1},
+	{ DBGBUS_DSPP, 118, 2},
+	{ DBGBUS_DSPP, 118, 3},
+	{ DBGBUS_DSPP, 118, 4},
+	{ DBGBUS_DSPP, 118, 5},
+	{ DBGBUS_DSPP, 118, 6},
+	{ DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 119, 1},
+	{ DBGBUS_DSPP, 119, 2},
+	{ DBGBUS_DSPP, 119, 3},
+	{ DBGBUS_DSPP, 119, 4},
+	{ DBGBUS_DSPP, 119, 5},
+	{ DBGBUS_DSPP, 119, 6},
+	{ DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 120, 1},
+	{ DBGBUS_DSPP, 120, 2},
+	{ DBGBUS_DSPP, 120, 3},
+	{ DBGBUS_DSPP, 120, 4},
+	{ DBGBUS_DSPP, 120, 5},
+	{ DBGBUS_DSPP, 120, 6},
+	{ DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
+
+	/* csc */
+	{ DBGBUS_SSPP0, 7, 0},
+	{ DBGBUS_SSPP0, 7, 1},
+	{ DBGBUS_SSPP0, 27, 0},
+	{ DBGBUS_SSPP0, 27, 1},
+	{ DBGBUS_SSPP1, 7, 0},
+	{ DBGBUS_SSPP1, 7, 1},
+	{ DBGBUS_SSPP1, 27, 0},
+	{ DBGBUS_SSPP1, 27, 1},
+
+	/* pcc */
+	{ DBGBUS_SSPP0, 3,  3},
+	{ DBGBUS_SSPP0, 23, 3},
+	{ DBGBUS_SSPP0, 33, 3},
+	{ DBGBUS_SSPP0, 43, 3},
+	{ DBGBUS_SSPP1, 3,  3},
+	{ DBGBUS_SSPP1, 23, 3},
+	{ DBGBUS_SSPP1, 33, 3},
+	{ DBGBUS_SSPP1, 43, 3},
+
+	/* spa */
+	{ DBGBUS_SSPP0, 8,  0},
+	{ DBGBUS_SSPP0, 28, 0},
+	{ DBGBUS_SSPP1, 8,  0},
+	{ DBGBUS_SSPP1, 28, 0},
+	{ DBGBUS_DSPP, 13, 0},
+	{ DBGBUS_DSPP, 19, 0},
+
+	/* igc */
+	{ DBGBUS_SSPP0, 17, 0},
+	{ DBGBUS_SSPP0, 17, 1},
+	{ DBGBUS_SSPP0, 17, 3},
+	{ DBGBUS_SSPP0, 37, 0},
+	{ DBGBUS_SSPP0, 37, 1},
+	{ DBGBUS_SSPP0, 37, 3},
+	{ DBGBUS_SSPP0, 46, 0},
+	{ DBGBUS_SSPP0, 46, 1},
+	{ DBGBUS_SSPP0, 46, 3},
+
+	{ DBGBUS_SSPP1, 17, 0},
+	{ DBGBUS_SSPP1, 17, 1},
+	{ DBGBUS_SSPP1, 17, 3},
+	{ DBGBUS_SSPP1, 37, 0},
+	{ DBGBUS_SSPP1, 37, 1},
+	{ DBGBUS_SSPP1, 37, 3},
+	{ DBGBUS_SSPP1, 46, 0},
+	{ DBGBUS_SSPP1, 46, 1},
+	{ DBGBUS_SSPP1, 46, 3},
+
+	{ DBGBUS_DSPP, 14, 0},
+	{ DBGBUS_DSPP, 14, 1},
+	{ DBGBUS_DSPP, 14, 3},
+	{ DBGBUS_DSPP, 20, 0},
+	{ DBGBUS_DSPP, 20, 1},
+	{ DBGBUS_DSPP, 20, 3},
+
+	/* intf0-3 */
+	{ DBGBUS_PERIPH, 0, 0},
+	{ DBGBUS_PERIPH, 1, 0},
+	{ DBGBUS_PERIPH, 2, 0},
+	{ DBGBUS_PERIPH, 3, 0},
+
+	/* te counter wrapper */
+	{ DBGBUS_PERIPH, 60, 0},
+
+	/* dsc0 */
+	{ DBGBUS_PERIPH, 47, 0},
+	{ DBGBUS_PERIPH, 47, 1},
+	{ DBGBUS_PERIPH, 47, 2},
+	{ DBGBUS_PERIPH, 47, 3},
+	{ DBGBUS_PERIPH, 47, 4},
+	{ DBGBUS_PERIPH, 47, 5},
+	{ DBGBUS_PERIPH, 47, 6},
+	{ DBGBUS_PERIPH, 47, 7},
+
+	/* dsc1 */
+	{ DBGBUS_PERIPH, 48, 0},
+	{ DBGBUS_PERIPH, 48, 1},
+	{ DBGBUS_PERIPH, 48, 2},
+	{ DBGBUS_PERIPH, 48, 3},
+	{ DBGBUS_PERIPH, 48, 4},
+	{ DBGBUS_PERIPH, 48, 5},
+	{ DBGBUS_PERIPH, 48, 6},
+	{ DBGBUS_PERIPH, 48, 7},
+
+	/* dsc2 */
+	{ DBGBUS_PERIPH, 51, 0},
+	{ DBGBUS_PERIPH, 51, 1},
+	{ DBGBUS_PERIPH, 51, 2},
+	{ DBGBUS_PERIPH, 51, 3},
+	{ DBGBUS_PERIPH, 51, 4},
+	{ DBGBUS_PERIPH, 51, 5},
+	{ DBGBUS_PERIPH, 51, 6},
+	{ DBGBUS_PERIPH, 51, 7},
+
+	/* dsc3 */
+	{ DBGBUS_PERIPH, 52, 0},
+	{ DBGBUS_PERIPH, 52, 1},
+	{ DBGBUS_PERIPH, 52, 2},
+	{ DBGBUS_PERIPH, 52, 3},
+	{ DBGBUS_PERIPH, 52, 4},
+	{ DBGBUS_PERIPH, 52, 5},
+	{ DBGBUS_PERIPH, 52, 6},
+	{ DBGBUS_PERIPH, 52, 7},
+
+	/* tear-check */
+	{ DBGBUS_PERIPH, 63, 0 },
+	{ DBGBUS_PERIPH, 64, 0 },
+	{ DBGBUS_PERIPH, 65, 0 },
+	{ DBGBUS_PERIPH, 73, 0 },
+	{ DBGBUS_PERIPH, 74, 0 },
+
+	/* cdwn */
+	{ DBGBUS_PERIPH, 80, 0},
+	{ DBGBUS_PERIPH, 80, 1},
+	{ DBGBUS_PERIPH, 80, 2},
+
+	{ DBGBUS_PERIPH, 81, 0},
+	{ DBGBUS_PERIPH, 81, 1},
+	{ DBGBUS_PERIPH, 81, 2},
+
+	{ DBGBUS_PERIPH, 82, 0},
+	{ DBGBUS_PERIPH, 82, 1},
+	{ DBGBUS_PERIPH, 82, 2},
+	{ DBGBUS_PERIPH, 82, 3},
+	{ DBGBUS_PERIPH, 82, 4},
+	{ DBGBUS_PERIPH, 82, 5},
+	{ DBGBUS_PERIPH, 82, 6},
+	{ DBGBUS_PERIPH, 82, 7},
+
+	/* hdmi */
+	{ DBGBUS_PERIPH, 68, 0},
+	{ DBGBUS_PERIPH, 68, 1},
+	{ DBGBUS_PERIPH, 68, 2},
+	{ DBGBUS_PERIPH, 68, 3},
+	{ DBGBUS_PERIPH, 68, 4},
+	{ DBGBUS_PERIPH, 68, 5},
+
+	/* edp */
+	{ DBGBUS_PERIPH, 69, 0},
+	{ DBGBUS_PERIPH, 69, 1},
+	{ DBGBUS_PERIPH, 69, 2},
+	{ DBGBUS_PERIPH, 69, 3},
+	{ DBGBUS_PERIPH, 69, 4},
+	{ DBGBUS_PERIPH, 69, 5},
+
+	/* dsi0 */
+	{ DBGBUS_PERIPH, 70, 0},
+	{ DBGBUS_PERIPH, 70, 1},
+	{ DBGBUS_PERIPH, 70, 2},
+	{ DBGBUS_PERIPH, 70, 3},
+	{ DBGBUS_PERIPH, 70, 4},
+	{ DBGBUS_PERIPH, 70, 5},
+
+	/* dsi1 */
+	{ DBGBUS_PERIPH, 71, 0},
+	{ DBGBUS_PERIPH, 71, 1},
+	{ DBGBUS_PERIPH, 71, 2},
+	{ DBGBUS_PERIPH, 71, 3},
+	{ DBGBUS_PERIPH, 71, 4},
+	{ DBGBUS_PERIPH, 71, 5},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+	{0x214, 0x21c, 16, 2, 0x0, 0xd},     /* arb clients */
+	{0x214, 0x21c, 16, 2, 0x80, 0xc0},   /* arb clients */
+	{0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+	{0x214, 0x21c, 0, 16, 0x0, 0xf},     /* xin blocks - axi side */
+	{0x214, 0x21c, 0, 16, 0x80, 0xa4},   /* xin blocks - axi side */
+	{0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _dpu_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _dpu_dbg_enable_power(int enable)
+{
+	if (enable)
+		pm_runtime_get_sync(dpu_dbg_base.dev);
+	else
+		pm_runtime_put_sync(dpu_dbg_base.dev);
+}
+
+static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+{
+	bool in_log, in_mem;
+	u32 **dump_mem = NULL;
+	u32 *dump_addr = NULL;
+	u32 status = 0;
+	struct dpu_debug_bus_entry *head;
+	dma_addr_t dma = 0;
+	int list_size;
+	int i;
+	u32 offset;
+	void __iomem *mem_base = NULL;
+	struct dpu_dbg_reg_base *reg_base;
+
+	if (!bus || !bus->cmn.entries_size)
+		return;
+
+	list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+			reg_base_head)
+		if (strlen(reg_base->name) &&
+			!strcmp(reg_base->name, bus->cmn.name))
+			mem_base = reg_base->base + bus->top_blk_off;
+
+	if (!mem_base) {
+		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+		return;
+	}
+
+	dump_mem = &bus->cmn.dumped_content;
+
+	/* will keep in memory 4 entries of 4 bytes each */
+	list_size = (bus->cmn.entries_size * 4 * 4);
+
+	in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+	in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+	if (!in_log && !in_mem)
+		return;
+
+	dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+				list_size, &dma, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(dpu_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	_dpu_dbg_enable_power(true);
+	for (i = 0; i < bus->cmn.entries_size; i++) {
+		head = bus->entries + i;
+		writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+				mem_base + head->wr_addr);
+		wmb(); /* make sure test bits were written */
+
+		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
+			offset = DBGBUS_DSPP_STATUS;
+			/* keep DSPP test point enabled */
+			if (head->wr_addr != DBGBUS_DSPP)
+				writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+		} else {
+			offset = head->wr_addr + 0x4;
+		}
+
+		status = readl_relaxed(mem_base + offset);
+
+		if (in_log)
+			dev_info(dpu_dbg_base.dev,
+					"waddr=0x%x blk=%d tst=%d val=0x%x\n",
+					head->wr_addr, head->block_id,
+					head->test_id, status);
+
+		if (dump_addr && in_mem) {
+			dump_addr[i*4]     = head->wr_addr;
+			dump_addr[i*4 + 1] = head->block_id;
+			dump_addr[i*4 + 2] = head->test_id;
+			dump_addr[i*4 + 3] = status;
+		}
+
+		if (head->analyzer)
+			head->analyzer(mem_base, head, status);
+
+		/* Disable debug bus once we are done */
+		writel_relaxed(0, mem_base + head->wr_addr);
+		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
+						head->wr_addr != DBGBUS_DSPP)
+			writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
+	}
+	_dpu_dbg_enable_power(false);
+
+	dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
+}
+
+static void _dpu_dbg_dump_vbif_debug_bus_entry(
+		struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+		u32 *dump_addr, bool in_log)
+{
+	int i, j;
+	u32 val;
+
+	if (!dump_addr && !in_log)
+		return;
+
+	for (i = 0; i < head->block_cnt; i++) {
+		writel_relaxed(1 << (i + head->bit_offset),
+				mem_base + head->block_bus_addr);
+		/* make sure that current bus blcok enable */
+		wmb();
+		for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+			writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+			/* make sure that test point is enabled */
+			wmb();
+			val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+			if (dump_addr) {
+				*dump_addr++ = head->block_bus_addr;
+				*dump_addr++ = i;
+				*dump_addr++ = j;
+				*dump_addr++ = val;
+			}
+			if (in_log)
+				dev_info(dpu_dbg_base.dev,
+					"testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+					head->block_bus_addr, i, j, val);
+		}
+	}
+}
+
+static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+{
+	bool in_log, in_mem;
+	u32 **dump_mem = NULL;
+	u32 *dump_addr = NULL;
+	u32 value, d0, d1;
+	unsigned long reg, reg1, reg2;
+	struct vbif_debug_bus_entry *head;
+	dma_addr_t dma = 0;
+	int i, list_size = 0;
+	void __iomem *mem_base = NULL;
+	struct vbif_debug_bus_entry *dbg_bus;
+	u32 bus_size;
+	struct dpu_dbg_reg_base *reg_base;
+
+	if (!bus || !bus->cmn.entries_size)
+		return;
+
+	list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+			reg_base_head)
+		if (strlen(reg_base->name) &&
+			!strcmp(reg_base->name, bus->cmn.name))
+			mem_base = reg_base->base;
+
+	if (!mem_base) {
+		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+		return;
+	}
+
+	dbg_bus = bus->entries;
+	bus_size = bus->cmn.entries_size;
+	list_size = bus->cmn.entries_size;
+	dump_mem = &bus->cmn.dumped_content;
+
+	dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
+
+	if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+		return;
+
+	/* allocate memory for each test point */
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+		list_size += (head->block_cnt * head->test_pnt_cnt);
+	}
+
+	/* 4 bytes * 4 entries for each test point*/
+	list_size *= 16;
+
+	in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+	in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+	if (!in_log && !in_mem)
+		return;
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+				list_size, &dma, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(dpu_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	_dpu_dbg_enable_power(true);
+
+	value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+	writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+	/* make sure that vbif core is on */
+	wmb();
+
+	/**
+	 * Extract VBIF error info based on XIN halt and error status.
+	 * If the XIN client is not in HALT state, or an error is detected,
+	 * then retrieve the VBIF error info for it.
+	 */
+	reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+	reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+	reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+	dev_err(dpu_dbg_base.dev,
+			"XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+			reg, reg1, reg2);
+	reg >>= 16;
+	reg &= ~(reg1 | reg2);
+	for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+		if (!test_bit(0, &reg)) {
+			writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+			/* make sure reg write goes through */
+			wmb();
+
+			d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+			d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+			dev_err(dpu_dbg_base.dev,
+					"Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+					i, d0, d1);
+		}
+		reg >>= 1;
+	}
+
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+
+		writel_relaxed(0, mem_base + head->disable_bus_addr);
+		writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+		/* make sure that other bus is off */
+		wmb();
+
+		_dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+				in_log);
+		if (dump_addr)
+			dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+	}
+
+	_dpu_dbg_enable_power(false);
+
+	dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
+}
+
+/**
+ * _dpu_dump_array - dump array of register bases
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_dpu: whether to dump the dpu debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
+			    bool dump_dbgbus_vbif_rt)
+{
+	if (dump_dbgbus_dpu)
+		_dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
+
+	if (dump_dbgbus_vbif_rt)
+		_dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
+}
+
+/**
+ * _dpu_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _dpu_dump_work(struct work_struct *work)
+{
+	_dpu_dump_array("dpudump_workitem",
+		dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
+		dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+}
+
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+		  bool dump_dbgbus_vbif_rt)
+{
+	if (queue_work && work_pending(&dpu_dbg_base.dump_work))
+		return;
+
+	if (!queue_work) {
+		_dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
+		return;
+	}
+
+	/* schedule work to dump later */
+	dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
+	dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+			dump_dbgbus_vbif_rt;
+	schedule_work(&dpu_dbg_base.dump_work);
+}
+
+/*
+ * dpu_dbg_debugfs_open - debugfs open handler for debug dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+/**
+ * dpu_dbg_dump_write - debugfs write handler for debug dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t dpu_dbg_dump_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	_dpu_dump_array("dump_debugfs", true, true);
+	return count;
+}
+
+static const struct file_operations dpu_dbg_dump_fops = {
+	.open = dpu_dbg_debugfs_open,
+	.write = dpu_dbg_dump_write,
+};
+
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+	static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+	char debug_name[80] = "";
+
+	if (!debugfs_root)
+		return -EINVAL;
+
+	debugfs_create_file("dump", 0600, debugfs_root, NULL,
+			&dpu_dbg_dump_fops);
+
+	if (dbg->dbgbus_dpu.entries) {
+		dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
+		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+				dbg->dbgbus_dpu.cmn.name);
+		dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
+		debugfs_create_u32(debug_name, 0600, debugfs_root,
+				&dbg->dbgbus_dpu.cmn.enable_mask);
+	}
+
+	if (dbg->dbgbus_vbif_rt.entries) {
+		dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+				dbg->dbgbus_vbif_rt.cmn.name);
+		dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+		debugfs_create_u32(debug_name, 0600, debugfs_root,
+				&dbg->dbgbus_vbif_rt.cmn.enable_mask);
+	}
+
+	return 0;
+}
+
+static void _dpu_dbg_debugfs_destroy(void)
+{
+}
+
+void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+	static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+
+	memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
+	memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+	if (IS_MSM8998_TARGET(hwversion)) {
+		dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
+		dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
+		dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+		dbg->dbgbus_vbif_rt.cmn.entries_size =
+				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+	} else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
+		dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
+		dbg->dbgbus_dpu.cmn.entries_size =
+				ARRAY_SIZE(dbg_bus_dpu_sdm845);
+		dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+		/* vbif is unchanged vs 8998 */
+		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+		dbg->dbgbus_vbif_rt.cmn.entries_size =
+				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+	} else {
+		pr_err("unsupported chipset id %X\n", hwversion);
+	}
+}
+
+int dpu_dbg_init(struct device *dev)
+{
+	if (!dev) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
+	dpu_dbg_base.dev = dev;
+
+	INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
+
+	return 0;
+}
+
+/**
+ * dpu_dbg_destroy - destroy dpu debug facilities
+ */
+void dpu_dbg_destroy(void)
+{
+	_dpu_dbg_debugfs_destroy();
+}
+
+void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+	dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
new file mode 100644
index 0000000..1e6fa94
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DPU_DBG_H_
+#define DPU_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+enum dpu_dbg_dump_flag {
+	DPU_DBG_DUMP_IN_LOG = BIT(0),
+	DPU_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion:		Chipset revision
+ */
+void dpu_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * dpu_dbg_init - initialize global dpu debug facilities: regdump
+ * @dev:		device handle
+ * Returns:		0 or -ERROR
+ */
+int dpu_dbg_init(struct device *dev);
+
+/**
+ * dpu_dbg_debugfs_register - register entries at the given debugfs dir
+ * @debugfs_root:	debugfs root in which to create dpu debug entries
+ * Returns:	0 or -ERROR
+ */
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
+
+/**
+ * dpu_dbg_destroy - destroy the global dpu debug facilities
+ * Returns:	none
+ */
+void dpu_dbg_destroy(void);
+
+/**
+ * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
+ * @queue_work:	  whether to queue the dumping work to the work_struct
+ * @name:	  string indicating origin of dump
+ * @dump_dbgbus:  dump the dpu debug bus
+ * @dump_vbif_rt: dump the vbif rt bus
+ * Returns:	none
+ */
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+		  bool dump_dbgbus_vbif_rt);
+
+/**
+ * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
+ *	address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void dpu_dbg_set_dpu_top_offset(u32 blk_off);
+
+#else
+
+static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int dpu_dbg_init(struct device *dev)
+{
+	return 0;
+}
+
+static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+	return 0;
+}
+
+static inline void dpu_dbg_destroy(void)
+{
+}
+
+static inline void dpu_dbg_dump(bool queue_work, const char *name,
+				bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
+{
+}
+
+static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 9a401ed..ec3fd67 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -264,6 +264,9 @@
 				DPU_ENCODER_FRAME_EVENT_ERROR);
 }
 
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+		int32_t hw_id, struct dpu_encoder_wait_info *info);
+
 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
 		enum dpu_intr_idx intr_idx,
 		struct dpu_encoder_wait_info *wait_info)
@@ -467,7 +470,7 @@
 	}
 }
 
-void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
 {
 	struct dpu_encoder_virt *dpu_enc = NULL;
 	int i = 0;
@@ -1514,7 +1517,7 @@
 	}
 }
 
-int dpu_encoder_helper_wait_event_timeout(
+static int dpu_encoder_helper_wait_event_timeout(
 		int32_t drm_id,
 		int32_t hw_id,
 		struct dpu_encoder_wait_info *info)
@@ -1625,22 +1628,6 @@
 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
 }
 
-bool dpu_encoder_check_mode(struct drm_encoder *drm_enc, u32 mode)
-{
-	struct dpu_encoder_virt *dpu_enc;
-	struct msm_display_info *disp_info;
-
-	if (!drm_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return false;
-	}
-
-	dpu_enc = to_dpu_encoder_virt(drm_enc);
-	disp_info = &dpu_enc->disp_info;
-
-	return (disp_info->capabilities & mode);
-}
-
 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
 {
 	struct dpu_encoder_virt *dpu_enc;
@@ -1901,70 +1888,6 @@
 	DPU_ATRACE_END("encoder_kickoff");
 }
 
-int dpu_encoder_helper_hw_release(struct dpu_encoder_phys *phys_enc,
-		struct drm_framebuffer *fb)
-{
-	struct drm_encoder *drm_enc;
-	struct dpu_hw_mixer_cfg mixer;
-	struct dpu_rm_hw_iter lm_iter;
-	bool lm_valid = false;
-
-	if (!phys_enc || !phys_enc->parent) {
-		DPU_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	drm_enc = phys_enc->parent;
-	memset(&mixer, 0, sizeof(mixer));
-
-	/* reset associated CTL/LMs */
-	if (phys_enc->hw_ctl->ops.clear_pending_flush)
-		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
-	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
-		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
-
-	dpu_rm_init_hw_iter(&lm_iter, drm_enc->base.id, DPU_HW_BLK_LM);
-	while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &lm_iter)) {
-		struct dpu_hw_mixer *hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
-
-		if (!hw_lm)
-			continue;
-
-		/* need to flush LM to remove it */
-		if (phys_enc->hw_ctl->ops.get_bitmask_mixer &&
-				phys_enc->hw_ctl->ops.update_pending_flush)
-			phys_enc->hw_ctl->ops.update_pending_flush(
-					phys_enc->hw_ctl,
-					phys_enc->hw_ctl->ops.get_bitmask_mixer(
-					phys_enc->hw_ctl, hw_lm->idx));
-
-		if (fb) {
-			/* assume a single LM if targeting a frame buffer */
-			if (lm_valid)
-				continue;
-
-			mixer.out_height = fb->height;
-			mixer.out_width = fb->width;
-
-			if (hw_lm->ops.setup_mixer_out)
-				hw_lm->ops.setup_mixer_out(hw_lm, &mixer);
-		}
-
-		lm_valid = true;
-
-		/* only enable border color on LM */
-		if (phys_enc->hw_ctl->ops.setup_blendstage)
-			phys_enc->hw_ctl->ops.setup_blendstage(
-					phys_enc->hw_ctl, hw_lm->idx, NULL);
-	}
-
-	if (!lm_valid) {
-		DPU_DEBUG_ENC(to_dpu_encoder_virt(drm_enc), "lm not found\n");
-		return -EFAULT;
-	}
-	return 0;
-}
-
 void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
 {
 	struct dpu_encoder_virt *dpu_enc;
@@ -2519,6 +2442,8 @@
 
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+		if (!phys)
+			continue;
 
 		switch (event) {
 		case MSM_ENC_COMMIT_DONE:
@@ -2536,7 +2461,7 @@
 			return -EINVAL;
 		};
 
-		if (phys && fn_wait) {
+		if (fn_wait) {
 			DPU_ATRACE_BEGIN("wait_for_completion_event");
 			ret = fn_wait(phys);
 			DPU_ATRACE_END("wait_for_completion_event");
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 0000000..60f809f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE			BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR			BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD		BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE			BIT(3)
+
+#define IDLE_TIMEOUT	(66 - 16/2)
+
+/**
+ * Encoder functions and data types
+ * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ *                          interface
+ * @topology:   Topology of the display
+ */
+struct dpu_encoder_hw_resources {
+	enum dpu_intf_mode intfs[INTF_MAX];
+	bool needs_cdm;
+	u32 display_num_of_h_tiles;
+};
+
+/**
+ * dpu_encoder_kickoff_params - info encoder requires at kickoff
+ * @affected_displays:  bitmask, bit set means the ROI of the commit lies within
+ *                      the bounds of the physical display at the bit index
+ */
+struct dpu_encoder_kickoff_params {
+	unsigned long affected_displays;
+};
+
+/**
+ * dpu_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder:	encoder pointer
+ * @hw_res:	resource table to populate with encoder required resources
+ * @conn_state:	report hw reqs based on this proposed connector state
+ */
+void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state);
+
+/**
+ * dpu_encoder_register_vblank_callback - provide callback to encoder that
+ *	will be called on the next vblank.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister and disable IRQs
+ * @data:	user data provided to callback
+ */
+void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
+		void (*cb)(void *), void *data);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ *	will be called after the request is complete, or other events.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister
+ * @data:	user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+		void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ *	path (i.e. ctl flush and start) at next appropriate time.
+ *	Immediately: if no previous commit is outstanding.
+ *	Delayed: Block until next trigger can be issued.
+ * @encoder:	encoder pointer
+ * @params:	kickoff time parameters
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+		struct dpu_encoder_kickoff_params *params);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ *        kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder:	encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ *	(i.e. ctl flush and start) immediately.
+ * @encoder:	encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder:	encoder pointer
+ * @event:      event to wait for
+ * MSM_ENC_COMMIT_DONE -  Wait for hardware to have flushed the current pending
+ *                        frames to hardware at a vblank or ctl_start
+ *                        Encoders will map this differently depending on the
+ *                        panel type.
+ *	                  vid mode -> vsync_irq
+ *                        cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE -  Wait for the hardware to transfer all the pixels to
+ *                        the panel. Encoders will map this differently
+ *                        depending on the panel type.
+ *                        vid mode -> vsync_irq
+ *                        cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+						enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_restore - restore the encoder configs
+ * @encoder:	encoder pointer
+ */
+void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev:        Pointer to drm device structure
+ * @disp_info:  Pointer to display information structure
+ * Returns:     Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+		struct drm_device *dev,
+		int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev:		Pointer to drm device structure
+ * @enc:		Pointer to the drm_encoder
+ * @disp_info:	Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+		struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ *	atomic commit, before any registers are written
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ *                    and command mode encoders.
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ * @idle_timeout:    idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+							u32 idle_timeout);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 0000000..c7df8aa
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_encoder.h"
+
+#define DPU_ENCODER_NAME_MAX	16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS		84
+#define KICKOFF_TIMEOUT_JIFFIES		msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ *	split-panel configuration, where one panel is master, and others slaves.
+ *	Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO:	This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER:	This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE:	This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+	ENC_ROLE_SOLO,
+	ENC_ROLE_MASTER,
+	ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING:	Encoder transitioning to disable state
+ *			Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED:	Encoder is disabled
+ * @DPU_ENC_ENABLING:	Encoder transitioning to enabled
+ *			Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED:	Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET:	Encoder is enabled, but requires a hw_reset
+ *				to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+	DPU_ENC_DISABLING,
+	DPU_ENC_DISABLED,
+	DPU_ENC_ENABLING,
+	DPU_ENC_ENABLED,
+	DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ *	provides for the physical encoders to use to callback.
+ * @handle_vblank_virt:	Notify virtual encoder of vblank IRQ reception
+ *			Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ *			Note: This is called from IRQ handler context.
+ * @handle_frame_done:	Notify virtual encoder that this phys encoder
+ *			completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+	void (*handle_vblank_virt)(struct drm_encoder *,
+			struct dpu_encoder_phys *phys);
+	void (*handle_underrun_virt)(struct drm_encoder *,
+			struct dpu_encoder_phys *phys);
+	void (*handle_frame_done)(struct drm_encoder *,
+			struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ *	the containing virtual encoder.
+ * @late_register:		DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit:		MSM Atomic Call, start of atomic commit sequence
+ * @is_master:			Whether this phys_enc is the current master
+ *				encoder. Can be switched at enable time. Based
+ *				on split_role and current mode (CMD/VID).
+ * @mode_fixup:			DRM Call. Fixup a DRM mode.
+ * @mode_set:			DRM Call. Set a DRM mode.
+ *				This likely caches the mode, for use at enable.
+ * @enable:			DRM Call. Enable a DRM mode.
+ * @disable:			DRM Call. Disable mode.
+ * @atomic_check:		DRM Call. Atomic check new DRM state.
+ * @destroy:			DRM Call. Destroy and release resources.
+ * @get_hw_resources:		Populate the structure with the hardware
+ *				resources that this phys_enc is using.
+ *				Expect no overlap between phys_encs.
+ * @control_vblank_irq		Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done:	Wait for hardware to have flushed the
+ *				current pending frames to hardware
+ * @wait_for_tx_complete:	Wait for hardware to transfer the pixels
+ *				to the panel
+ * @wait_for_vblank:		Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff:	Do any work necessary prior to a kickoff
+ *				For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff:	Do any work necessary post-kickoff work
+ * @trigger_start:		Process start event on physical encoder
+ * @needs_single_flush:		Whether encoder slaves need to be flushed
+ * @setup_misr:		Sets up MISR, enable and disables based on sysfs
+ * @collect_misr:		Collects MISR data on frame update
+ * @hw_reset:			Issue HW recovery such as CTL reset and clear
+ *				DPU_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control:		Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc:		phys encoder can update the vsync_enable status
+ *                              on idle power collapse prepare
+ * @restore:			Restore all the encoder configs.
+ * @get_line_count:		Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+	int (*late_register)(struct dpu_encoder_phys *encoder,
+			struct dentry *debugfs_root);
+	void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+	bool (*is_master)(struct dpu_encoder_phys *encoder);
+	bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
+			const struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+	void (*mode_set)(struct dpu_encoder_phys *encoder,
+			struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+	void (*enable)(struct dpu_encoder_phys *encoder);
+	void (*disable)(struct dpu_encoder_phys *encoder);
+	int (*atomic_check)(struct dpu_encoder_phys *encoder,
+			    struct drm_crtc_state *crtc_state,
+			    struct drm_connector_state *conn_state);
+	void (*destroy)(struct dpu_encoder_phys *encoder);
+	void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
+			struct dpu_encoder_hw_resources *hw_res,
+			struct drm_connector_state *conn_state);
+	int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+	int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+	int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+	int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+	void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
+			struct dpu_encoder_kickoff_params *params);
+	void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+	void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+	bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+
+	void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
+				bool enable, u32 frame_count);
+	u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
+	void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
+	void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+	void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+	void (*restore)(struct dpu_encoder_phys *phys);
+	int (*get_line_count)(struct dpu_encoder_phys *phys);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC:    Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR:    Readpointer done unterrupt for cmd mode panel
+ */
+enum dpu_intr_idx {
+	INTR_IDX_VSYNC,
+	INTR_IDX_PINGPONG,
+	INTR_IDX_UNDERRUN,
+	INTR_IDX_CTL_START,
+	INTR_IDX_RDPTR,
+	INTR_IDX_MAX,
+};
+
+/**
+ * dpu_encoder_irq - tracking structure for interrupts
+ * @name:		string name of interrupt
+ * @intr_type:		Encoder interrupt type
+ * @intr_idx:		Encoder interrupt enumeration
+ * @hw_idx:		HW Block ID
+ * @irq_idx:		IRQ interface lookup index from DPU IRQ framework
+ *			will be -EINVAL if IRQ is not registered
+ * @irq_cb:		interrupt callback
+ */
+struct dpu_encoder_irq {
+	const char *name;
+	enum dpu_intr_type intr_type;
+	enum dpu_intr_idx intr_idx;
+	int hw_idx;
+	int irq_idx;
+	struct dpu_irq_callback cb;
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ *	tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ *	phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent:		Pointer to the containing virtual encoder
+ * @connector:		If a mode is set, cached pointer to the active connector
+ * @ops:		Operations exposed to the virtual encoder
+ * @parent_ops:		Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop:		Hardware interface to the top registers
+ * @hw_ctl:		Hardware interface to the ctl registers
+ * @hw_cdm:		Hardware interface to the cdm registers
+ * @cdm_cfg:		Chroma-down hardware configuration
+ * @hw_pp:		Hardware interface to the ping pong registers
+ * @dpu_kms:		Pointer to the dpu_kms top level
+ * @cached_mode:	DRM mode cached at mode_set time, acted on in enable
+ * @enabled:		Whether the encoder has enabled and running a mode
+ * @split_role:		Role to play in a split-panel configuration
+ * @intf_mode:		Interface mode
+ * @intf_idx:		Interface index on dpu hardware
+ * @topology_name:	topology selected for the display
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state:	Enable state tracking
+ * @vblank_refcount:	Reference count of vblank request
+ * @vsync_cnt:		Vsync count for the physical encoder
+ * @underrun_cnt:	Underrun count for the physical encoder
+ * @pending_kickoff_cnt:	Atomic counter tracking the number of kickoffs
+ *				vs. the number of done/vblank irqs. Should hover
+ *				between 0-2 Incremented when a new kickoff is
+ *				scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
+ *                              pending.
+ * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
+ * @irq:			IRQ tracking structures
+ */
+struct dpu_encoder_phys {
+	struct drm_encoder *parent;
+	struct drm_connector *connector;
+	struct dpu_encoder_phys_ops ops;
+	const struct dpu_encoder_virt_ops *parent_ops;
+	struct dpu_hw_mdp *hw_mdptop;
+	struct dpu_hw_ctl *hw_ctl;
+	struct dpu_hw_cdm *hw_cdm;
+	struct dpu_hw_cdm_cfg cdm_cfg;
+	struct dpu_hw_pingpong *hw_pp;
+	struct dpu_kms *dpu_kms;
+	struct drm_display_mode cached_mode;
+	enum dpu_enc_split_role split_role;
+	enum dpu_intf_mode intf_mode;
+	enum dpu_intf intf_idx;
+	enum dpu_rm_topology_name topology_name;
+	spinlock_t *enc_spinlock;
+	enum dpu_enc_enable_state enable_state;
+	atomic_t vblank_refcount;
+	atomic_t vsync_cnt;
+	atomic_t underrun_cnt;
+	atomic_t pending_ctlstart_cnt;
+	atomic_t pending_kickoff_cnt;
+	wait_queue_head_t pending_kickoff_wq;
+	struct dpu_encoder_irq irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+	atomic_inc_return(&phys->pending_ctlstart_cnt);
+	return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
+ *	mode specific operations
+ * @base:	Baseclass physical encoder structure
+ * @hw_intf:	Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ */
+struct dpu_encoder_phys_vid {
+	struct dpu_encoder_phys base;
+	struct dpu_hw_intf *hw_intf;
+	struct intf_timing_params timing_params;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ *	mode specific operations
+ * @base:	Baseclass physical encoder structure
+ * @intf_idx:	Intf Block index used by this phys encoder
+ * @stream_sel:	Stream selection for multi-stream interfaces
+ * @serialize_wait4pp:	serialize wait4pp feature waits for pp_done interrupt
+ *			after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+	struct dpu_encoder_phys base;
+	int stream_sel;
+	bool serialize_wait4pp;
+	int pp_timeout_report_cnt;
+	atomic_t pending_vblank_cnt;
+	wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms:		Pointer to the dpu_kms top level
+ * @parent:		Pointer to the containing virtual encoder
+ * @parent_ops:		Callbacks exposed by the parent to the phys_enc
+ * @split_role:		Role to play in a split-panel configuration
+ * @intf_idx:		Interface index this phys_enc will control
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+	struct dpu_kms *dpu_kms;
+	struct drm_encoder *parent;
+	const struct dpu_encoder_virt_ops *parent_ops;
+	enum dpu_enc_split_role split_role;
+	enum dpu_intf intf_idx;
+	spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+	wait_queue_head_t *wq;
+	atomic_t *atomic_cnt;
+	s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p:	Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+		struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p:	Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+		struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ *	This helper function may be optionally specified by physical
+ *	encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_hw_reset - issue ctl hw reset
+ *	This helper function may be optionally specified by physical
+ *	encoders if they require ctl hw reset. If state is currently
+ *	DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+		struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+		return BLEND_3D_NONE;
+
+	if (phys_enc->split_role == ENC_ROLE_SOLO &&
+	    phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+		return BLEND_3D_H_ROW_INT;
+
+	return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ *	This helper function may be used by physical encoders to configure
+ *	the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+		struct dpu_encoder_phys *phys_enc,
+		enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ *	timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ *	note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx,
+		struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 0000000..3084675
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+	container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS	10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
+		struct dpu_encoder_phys_cmd *cmd_enc)
+{
+	return KICKOFF_TIMEOUT_MS;
+}
+
+static inline bool dpu_encoder_phys_cmd_is_master(
+		struct dpu_encoder_phys *phys_enc)
+{
+	return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool dpu_encoder_phys_cmd_mode_fixup(
+		struct dpu_encoder_phys *phys_enc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	if (phys_enc)
+		DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+	return true;
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_hw_ctl *ctl;
+	struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+	if (!phys_enc)
+		return;
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.setup_intf_cfg)
+		return;
+
+	intf_cfg.intf = phys_enc->intf_idx;
+	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+	intf_cfg.stream_sel = cmd_enc->stream_sel;
+	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	unsigned long lock_flags;
+	int new_cnt;
+	u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return;
+
+	DPU_ATRACE_BEGIN("pp_done_irq");
+	/* notify all synchronous clients first, then asynchronous clients */
+	if (phys_enc->parent_ops->handle_frame_done)
+		phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+				phys_enc, event);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+					  phys_enc->hw_pp->idx - PINGPONG_0,
+					  new_cnt, event);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+	DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return;
+
+	DPU_ATRACE_BEGIN("rd_ptr_irq");
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (phys_enc->parent_ops->handle_vblank_virt)
+		phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+			phys_enc);
+
+	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+	wake_up_all(&cmd_enc->pending_vblank_wq);
+	DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc || !phys_enc->hw_ctl)
+		return;
+
+	DPU_ATRACE_BEGIN("ctl_start_irq");
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+	/* Signal any waiting ctl start interrupt */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+	DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+
+	if (!phys_enc)
+		return;
+
+	if (phys_enc->parent_ops->handle_underrun_virt)
+		phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_irq *irq;
+
+	irq = &phys_enc->irq[INTR_IDX_CTL_START];
+	irq->hw_idx = phys_enc->hw_ctl->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+	irq->hw_idx = phys_enc->hw_pp->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_RDPTR];
+	irq->hw_idx = phys_enc->hw_pp->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->hw_idx = phys_enc->intf_idx;
+	irq->irq_idx = -EINVAL;
+}
+
+static void dpu_encoder_phys_cmd_mode_set(
+		struct dpu_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
+	struct dpu_rm_hw_iter iter;
+	int i, instance;
+
+	if (!phys_enc || !mode || !adj_mode) {
+		DPU_ERROR("invalid args\n");
+		return;
+	}
+	phys_enc->cached_mode = *adj_mode;
+	DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+	drm_mode_debug_printmodeline(adj_mode);
+
+	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+	/* Retrieve previously allocated HW Resources. Shouldn't fail */
+	dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+	for (i = 0; i <= instance; i++) {
+		if (dpu_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+	}
+
+	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+		DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
+		phys_enc->hw_ctl = NULL;
+		return;
+	}
+
+	_dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+	bool do_log = false;
+
+	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+		return -EINVAL;
+
+	cmd_enc->pp_timeout_report_cnt++;
+	if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+		frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+		do_log = true;
+	} else if (cmd_enc->pp_timeout_report_cnt == 1) {
+		do_log = true;
+	}
+
+	trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+		     phys_enc->hw_pp->idx - PINGPONG_0,
+		     cmd_enc->pp_timeout_report_cnt,
+		     atomic_read(&phys_enc->pending_kickoff_cnt),
+		     frame_event);
+
+	/* to avoid flooding, only log first time, and "dead" time */
+	if (do_log) {
+		DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+			  DRMID(phys_enc->parent),
+			  phys_enc->hw_pp->idx - PINGPONG_0,
+			  phys_enc->hw_ctl->idx - CTL_0,
+			  cmd_enc->pp_timeout_report_cnt,
+			  atomic_read(&phys_enc->pending_kickoff_cnt));
+
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+		dpu_dbg_dump(false, __func__, true, true);
+	}
+
+	atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+	/* request a ctl reset before the next kickoff */
+	phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+	if (phys_enc->parent_ops->handle_frame_done)
+		phys_enc->parent_ops->handle_frame_done(
+				phys_enc->parent, phys_enc, frame_event);
+
+	return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_encoder_wait_info wait_info;
+	int ret;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+			&wait_info);
+	if (ret == -ETIMEDOUT)
+		_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+	else if (!ret)
+		cmd_enc->pp_timeout_report_cnt = 0;
+
+	return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+		struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	int ret = 0;
+	int refcount;
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	refcount = atomic_read(&phys_enc->vblank_refcount);
+
+	/* Slave encoders don't report vblank */
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		goto end;
+
+	/* protect against negative */
+	if (!enable && refcount == 0) {
+		ret = -EINVAL;
+		goto end;
+	}
+
+	DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+		      phys_enc->hw_pp->idx - PINGPONG_0,
+		      enable ? "true" : "false", refcount);
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = dpu_encoder_helper_unregister_irq(phys_enc,
+				INTR_IDX_RDPTR);
+
+end:
+	if (ret) {
+		DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+			  DRMID(phys_enc->parent),
+			  phys_enc->hw_pp->idx - PINGPONG_0, ret,
+			  enable ? "true" : "false", refcount);
+	}
+
+	return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable) {
+		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+		if (dpu_encoder_phys_cmd_is_master(phys_enc))
+			dpu_encoder_helper_register_irq(phys_enc,
+					INTR_IDX_CTL_START);
+	} else {
+		if (dpu_encoder_phys_cmd_is_master(phys_enc))
+			dpu_encoder_helper_unregister_irq(phys_enc,
+					INTR_IDX_CTL_START);
+
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+	}
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_hw_tear_check tc_cfg = { 0 };
+	struct drm_display_mode *mode;
+	bool tc_enable = true;
+	u32 vsync_hz;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	mode = &phys_enc->cached_mode;
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+		!phys_enc->hw_pp->ops.enable_tearcheck) {
+		DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+		return;
+	}
+
+	dpu_kms = phys_enc->dpu_kms;
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid device\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	/*
+	 * TE default: dsi byte clock calculated base on 70 fps;
+	 * around 14 ms to complete a kickoff cycle if te disabled;
+	 * vclk_line base on 60 fps; write is faster than read;
+	 * init == start == rdptr;
+	 *
+	 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+	 * frequency divided by the no. of rows (lines) in the LCDpanel.
+	 */
+	vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+	if (vsync_hz <= 0) {
+		DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+				 vsync_hz);
+		return;
+	}
+
+	tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+
+	/* enable external TE after kickoff to avoid premature autorefresh */
+	tc_cfg.hw_vsync_mode = 0;
+
+	/*
+	 * By setting sync_cfg_height to near max register value, we essentially
+	 * disable dpu hw generated TE signal, since hw TE will arrive first.
+	 * Only caveat is if due to error, we hit wrap-around.
+	 */
+	tc_cfg.sync_cfg_height = 0xFFF0;
+	tc_cfg.vsync_init_val = mode->vdisplay;
+	tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+	tc_cfg.start_pos = mode->vdisplay;
+	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+		mode->vtotal, mode->vrefresh);
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+		tc_cfg.rd_ptr_irq);
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+		tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+	phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+	phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+			|| !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+		return;
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+			phys_enc->hw_pp->idx - PINGPONG_0);
+	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+	_dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+	dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+		struct dpu_encoder_phys *phys_enc)
+{
+	/**
+	 * we do separate flush for each CTL and let
+	 * CTL_START synchronize them
+	 */
+	return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_hw_ctl *ctl;
+	u32 flush_mask = 0;
+
+	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		goto skip_flush;
+
+	ctl = phys_enc->hw_ctl;
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+	return;
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid phys encoder\n");
+		return;
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+		DPU_ERROR("already enabled\n");
+		return;
+	}
+
+	dpu_encoder_phys_cmd_enable_helper(phys_enc);
+	phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+		struct dpu_encoder_phys *phys_enc, bool enable)
+{
+	if (!phys_enc || !phys_enc->hw_pp ||
+			!phys_enc->hw_pp->ops.connect_external_te)
+		return;
+
+	trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+	phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+		struct dpu_encoder_phys *phys_enc)
+{
+	_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_hw_pingpong *hw_pp;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return -EINVAL;
+
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		return -EINVAL;
+
+	hw_pp = phys_enc->hw_pp;
+	if (!hw_pp->ops.get_line_count)
+		return -EINVAL;
+
+	return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+		      phys_enc->hw_pp->idx - PINGPONG_0,
+		      phys_enc->enable_state);
+
+	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+		DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+		return;
+	}
+
+	if (phys_enc->hw_pp->ops.enable_tearcheck)
+		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_get_hw_resources(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+		DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+		return;
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "\n");
+	hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_kickoff_params *params)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	int ret;
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+		      phys_enc->hw_pp->idx - PINGPONG_0,
+		      atomic_read(&phys_enc->pending_kickoff_cnt));
+
+	/*
+	 * Mark kickoff request as outstanding. If there are more than one,
+	 * outstanding, then we have to wait for the previous one to complete
+	 */
+	ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+	if (ret) {
+		/* force pending_kickoff_cnt 0 to discard failed kickoff */
+		atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+		DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+			  DRMID(phys_enc->parent), ret,
+			  phys_enc->hw_pp->idx - PINGPONG_0);
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_encoder_wait_info wait_info;
+	int ret;
+
+	if (!phys_enc || !phys_enc->hw_ctl) {
+		DPU_ERROR("invalid argument(s)\n");
+		return -EINVAL;
+	}
+
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+			&wait_info);
+	if (ret == -ETIMEDOUT) {
+		DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+		ret = -EINVAL;
+	} else if (!ret)
+		ret = 0;
+
+	return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+		struct dpu_encoder_phys *phys_enc)
+{
+	int rc;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+	if (rc) {
+		DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+			  DRMID(phys_enc->parent), rc,
+			  phys_enc->intf_idx - INTF_0);
+	}
+
+	return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+		struct dpu_encoder_phys *phys_enc)
+{
+	int rc = 0;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	/* only required for master controller */
+	if (dpu_encoder_phys_cmd_is_master(phys_enc))
+		rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+	/* required for both controllers */
+	if (!rc && cmd_enc->serialize_wait4pp)
+		dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+
+	return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+		struct dpu_encoder_phys *phys_enc)
+{
+	int rc = 0;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+	struct dpu_encoder_wait_info wait_info;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	/* only required for master controller */
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		return rc;
+
+	wait_info.wq = &cmd_enc->pending_vblank_wq;
+	wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+	wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+	atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+	rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+			&wait_info);
+
+	return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+		struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return;
+
+	/**
+	 * re-enable external TE, either for the first time after enabling
+	 * or if disabled for Autorefresh
+	 */
+	_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+		struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return;
+
+	dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+		struct dpu_encoder_phys_ops *ops)
+{
+	ops->is_master = dpu_encoder_phys_cmd_is_master;
+	ops->mode_set = dpu_encoder_phys_cmd_mode_set;
+	ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
+	ops->enable = dpu_encoder_phys_cmd_enable;
+	ops->disable = dpu_encoder_phys_cmd_disable;
+	ops->destroy = dpu_encoder_phys_cmd_destroy;
+	ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
+	ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+	ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+	ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+	ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+	ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+	ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+	ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+	ops->hw_reset = dpu_encoder_helper_hw_reset;
+	ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+	ops->restore = dpu_encoder_phys_cmd_enable_helper;
+	ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+	ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+	ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+		struct dpu_enc_phys_init_params *p)
+{
+	struct dpu_encoder_phys *phys_enc = NULL;
+	struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+	struct dpu_hw_mdp *hw_mdp;
+	struct dpu_encoder_irq *irq;
+	int i, ret = 0;
+
+	DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+	cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+	if (!cmd_enc) {
+		ret = -ENOMEM;
+		DPU_ERROR("failed to allocate\n");
+		goto fail;
+	}
+	phys_enc = &cmd_enc->base;
+
+	hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+	if (IS_ERR_OR_NULL(hw_mdp)) {
+		ret = PTR_ERR(hw_mdp);
+		DPU_ERROR("failed to get mdptop\n");
+		goto fail_mdp_init;
+	}
+	phys_enc->hw_mdptop = hw_mdp;
+	phys_enc->intf_idx = p->intf_idx;
+
+	dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+	phys_enc->parent = p->parent;
+	phys_enc->parent_ops = p->parent_ops;
+	phys_enc->dpu_kms = p->dpu_kms;
+	phys_enc->split_role = p->split_role;
+	phys_enc->intf_mode = INTF_MODE_CMD;
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	cmd_enc->stream_sel = 0;
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+	for (i = 0; i < INTR_IDX_MAX; i++) {
+		irq = &phys_enc->irq[i];
+		INIT_LIST_HEAD(&irq->cb.list);
+		irq->irq_idx = -EINVAL;
+		irq->hw_idx = -EINVAL;
+		irq->cb.arg = phys_enc;
+	}
+
+	irq = &phys_enc->irq[INTR_IDX_CTL_START];
+	irq->name = "ctl_start";
+	irq->intr_type = DPU_IRQ_TYPE_CTL_START;
+	irq->intr_idx = INTR_IDX_CTL_START;
+	irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+	irq->name = "pp_done";
+	irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
+	irq->intr_idx = INTR_IDX_PINGPONG;
+	irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_RDPTR];
+	irq->name = "pp_rd_ptr";
+	irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
+	irq->intr_idx = INTR_IDX_RDPTR;
+	irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->name = "underrun";
+	irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+	irq->intr_idx = INTR_IDX_UNDERRUN;
+	irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+
+	atomic_set(&phys_enc->vblank_refcount, 0);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+	DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+	return phys_enc;
+
+fail_mdp_init:
+	kfree(cmd_enc);
+fail:
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 0000000..c9962a3
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+	container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+		struct dpu_encoder_phys *phys_enc)
+{
+	bool ret = false;
+
+	if (phys_enc->split_role != ENC_ROLE_SLAVE)
+		ret = true;
+
+	return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+		const struct dpu_encoder_phys_vid *vid_enc,
+		const struct drm_display_mode *mode,
+		struct intf_timing_params *timing)
+{
+	memset(timing, 0, sizeof(*timing));
+
+	if ((mode->htotal < mode->hsync_end)
+			|| (mode->hsync_start < mode->hdisplay)
+			|| (mode->vtotal < mode->vsync_end)
+			|| (mode->vsync_start < mode->vdisplay)
+			|| (mode->hsync_end < mode->hsync_start)
+			|| (mode->vsync_end < mode->vsync_start)) {
+		DPU_ERROR(
+		    "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+				mode->hsync_start, mode->hsync_end,
+				mode->htotal, mode->hdisplay);
+		DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+				mode->vsync_start, mode->vsync_end,
+				mode->vtotal, mode->vdisplay);
+		return;
+	}
+
+	/*
+	 * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+	 *  Active Region      Front Porch   Sync   Back Porch
+	 * <-----------------><------------><-----><----------->
+	 * <- [hv]display --->
+	 * <--------- [hv]sync_start ------>
+	 * <----------------- [hv]sync_end ------->
+	 * <---------------------------- [hv]total ------------->
+	 */
+	timing->width = mode->hdisplay;	/* active width */
+	timing->height = mode->vdisplay;	/* active height */
+	timing->xres = timing->width;
+	timing->yres = timing->height;
+	timing->h_back_porch = mode->htotal - mode->hsync_end;
+	timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+	timing->v_back_porch = mode->vtotal - mode->vsync_end;
+	timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+	timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+	timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+	timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+	timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+	timing->border_clr = 0;
+	timing->underflow_clr = 0xff;
+	timing->hsync_skew = mode->hskew;
+
+	/* DSI controller cannot handle active-low sync signals. */
+	if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+		timing->hsync_polarity = 0;
+		timing->vsync_polarity = 0;
+	}
+
+	/*
+	 * For edp only:
+	 * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+	 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+	 */
+	/*
+	 * if (vid_enc->hw->cap->type == INTF_EDP) {
+	 * display_v_start += mode->htotal - mode->hsync_start;
+	 * display_v_end -= mode->hsync_start - mode->hdisplay;
+	 * }
+	 */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+	u32 active = timing->xres;
+	u32 inactive =
+	    timing->h_back_porch + timing->h_front_porch +
+	    timing->hsync_pulse_width;
+	return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+	u32 active = timing->yres;
+	u32 inactive =
+	    timing->v_back_porch + timing->v_front_porch +
+	    timing->vsync_pulse_width;
+	return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ *	Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+		struct dpu_encoder_phys_vid *vid_enc,
+		const struct intf_timing_params *timing)
+{
+	u32 worst_case_needed_lines =
+	    vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+	u32 start_of_frame_lines =
+	    timing->v_back_porch + timing->vsync_pulse_width;
+	u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+	u32 actual_vfp_lines = 0;
+
+	/* Fetch must be outside active lines, otherwise undefined. */
+	if (start_of_frame_lines >= worst_case_needed_lines) {
+		DPU_DEBUG_VIDENC(vid_enc,
+				"prog fetch is not needed, large vbp+vsw\n");
+		actual_vfp_lines = 0;
+	} else if (timing->v_front_porch < needed_vfp_lines) {
+		/* Warn fetch needed, but not enough porch in panel config */
+		pr_warn_once
+			("low vbp+vfp may lead to perf issues in some cases\n");
+		DPU_DEBUG_VIDENC(vid_enc,
+				"less vfp than fetch req, using entire vfp\n");
+		actual_vfp_lines = timing->v_front_porch;
+	} else {
+		DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+		actual_vfp_lines = needed_vfp_lines;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc,
+		"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+		timing->v_front_porch, timing->v_back_porch,
+		timing->vsync_pulse_width);
+	DPU_DEBUG_VIDENC(vid_enc,
+		"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+		worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+	return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ *	the start of fetch into the vertical front porch for cases where the
+ *	vsync pulse width and vertical back porch time is insufficient
+ *
+ *	Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ *	HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+				      const struct intf_timing_params *timing)
+{
+	struct dpu_encoder_phys_vid *vid_enc =
+		to_dpu_encoder_phys_vid(phys_enc);
+	struct intf_prog_fetch f = { 0 };
+	u32 vfp_fetch_lines = 0;
+	u32 horiz_total = 0;
+	u32 vert_total = 0;
+	u32 vfp_fetch_start_vsync_counter = 0;
+	unsigned long lock_flags;
+
+	if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+		return;
+
+	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+	if (vfp_fetch_lines) {
+		vert_total = get_vertical_total(timing);
+		horiz_total = get_horizontal_total(timing);
+		vfp_fetch_start_vsync_counter =
+		    (vert_total - vfp_fetch_lines) * horiz_total + 1;
+		f.enable = 1;
+		f.fetch_start = vfp_fetch_start_vsync_counter;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc,
+		"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+		vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool dpu_encoder_phys_vid_mode_fixup(
+		struct dpu_encoder_phys *phys_enc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	if (phys_enc)
+		DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+
+	/*
+	 * Modifying mode has consequences when the mode comes back to us
+	 */
+	return true;
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+	struct drm_display_mode mode;
+	struct intf_timing_params timing_params = { 0 };
+	const struct dpu_format *fmt = NULL;
+	u32 fmt_fourcc = DRM_FORMAT_RGB888;
+	unsigned long lock_flags;
+	struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+	if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	mode = phys_enc->cached_mode;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+		DPU_ERROR("timing engine setup is not supported\n");
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+	drm_mode_debug_printmodeline(&mode);
+
+	if (phys_enc->split_role != ENC_ROLE_SOLO) {
+		mode.hdisplay >>= 1;
+		mode.htotal >>= 1;
+		mode.hsync_start >>= 1;
+		mode.hsync_end >>= 1;
+
+		DPU_DEBUG_VIDENC(vid_enc,
+			"split_role %d, halve horizontal %d %d %d %d\n",
+			phys_enc->split_role,
+			mode.hdisplay, mode.htotal,
+			mode.hsync_start, mode.hsync_end);
+	}
+
+	drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+	fmt = dpu_get_dpu_format(fmt_fourcc);
+	DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+	intf_cfg.intf = vid_enc->hw_intf->idx;
+	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+			&timing_params, fmt);
+	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	programmable_fetch_config(phys_enc, &timing_params);
+
+	vid_enc->timing_params = timing_params;
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	struct dpu_hw_ctl *hw_ctl;
+	unsigned long lock_flags;
+	u32 flush_register = 0;
+	int new_cnt = -1, old_cnt = -1;
+
+	if (!phys_enc)
+		return;
+
+	hw_ctl = phys_enc->hw_ctl;
+	if (!hw_ctl)
+		return;
+
+	DPU_ATRACE_BEGIN("vblank_irq");
+
+	if (phys_enc->parent_ops->handle_vblank_virt)
+		phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+				phys_enc);
+
+	old_cnt  = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+	/*
+	 * only decrement the pending flush count if we've actually flushed
+	 * hardware. due to sw irq latency, vblank may have already happened
+	 * so we need to double-check with hw that it accepted the flush bits
+	 */
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	if (hw_ctl && hw_ctl->ops.get_flush_register)
+		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+	if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
+		new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+				-1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+	DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+
+	if (!phys_enc)
+		return;
+
+	if (phys_enc->parent_ops->handle_underrun_virt)
+		phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return false;
+
+	if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
+		return true;
+
+	return false;
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+		struct dpu_encoder_phys *phys_enc)
+{
+	return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+}
+
+static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_irq *irq;
+
+	/*
+	 * Initialize irq->hw_idx only when irq is not registered.
+	 * Prevent invalidating irq->irq_idx as modeset may be
+	 * called many times during dfps.
+	 */
+
+	irq = &phys_enc->irq[INTR_IDX_VSYNC];
+	if (irq->irq_idx < 0)
+		irq->hw_idx = phys_enc->intf_idx;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	if (irq->irq_idx < 0)
+		irq->hw_idx = phys_enc->intf_idx;
+}
+
+static void dpu_encoder_phys_vid_mode_set(
+		struct dpu_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	struct dpu_rm *rm;
+	struct dpu_rm_hw_iter iter;
+	int i, instance;
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc || !phys_enc->dpu_kms) {
+		DPU_ERROR("invalid encoder/kms\n");
+		return;
+	}
+
+	rm = &phys_enc->dpu_kms->rm;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	if (adj_mode) {
+		phys_enc->cached_mode = *adj_mode;
+		drm_mode_debug_printmodeline(adj_mode);
+		DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+	}
+
+	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+	/* Retrieve previously allocated HW Resources. Shouldn't fail */
+	dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+	for (i = 0; i <= instance; i++) {
+		if (dpu_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+	}
+	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+		DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
+		phys_enc->hw_ctl = NULL;
+		return;
+	}
+
+	_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+		struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	int ret = 0;
+	struct dpu_encoder_phys_vid *vid_enc;
+	int refcount;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	refcount = atomic_read(&phys_enc->vblank_refcount);
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	/* Slave encoders don't report vblank */
+	if (!dpu_encoder_phys_vid_is_master(phys_enc))
+		goto end;
+
+	/* protect against negative */
+	if (!enable && refcount == 0) {
+		ret = -EINVAL;
+		goto end;
+	}
+
+	DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+		      atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = dpu_encoder_helper_unregister_irq(phys_enc,
+				INTR_IDX_VSYNC);
+
+end:
+	if (ret) {
+		DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+			  DRMID(phys_enc->parent),
+			  vid_enc->hw_intf->idx - INTF_0, ret, enable,
+			  refcount);
+	}
+	return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+	struct msm_drm_private *priv;
+	struct dpu_encoder_phys_vid *vid_enc;
+	struct dpu_hw_intf *intf;
+	struct dpu_hw_ctl *ctl;
+	u32 flush_mask = 0;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+			!phys_enc->parent->dev->dev_private) {
+		DPU_ERROR("invalid encoder/device\n");
+		return;
+	}
+	priv = phys_enc->parent->dev->dev_private;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	intf = vid_enc->hw_intf;
+	ctl = phys_enc->hw_ctl;
+	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+		return;
+
+	dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+	dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+	/*
+	 * For single flush cases (dual-ctl or pp-split), skip setting the
+	 * flush bit for the slave intf, since both intfs use same ctl
+	 * and HW will only flush the master.
+	 */
+	if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+		!dpu_encoder_phys_vid_is_master(phys_enc))
+		goto skip_flush;
+
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+	DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+		ctl->idx - CTL_0, flush_mask);
+
+	/* ctl_flush & timing engine enable will be triggered by framework */
+	if (phys_enc->enable_state == DPU_ENC_DISABLED)
+		phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+	kfree(vid_enc);
+}
+
+static void dpu_encoder_phys_vid_get_hw_resources(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc || !hw_res) {
+		DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+				phys_enc != 0, hw_res != 0, conn_state != 0);
+		return;
+	}
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf) {
+		DPU_ERROR("invalid arg(s), hw_intf\n");
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+	hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int _dpu_encoder_phys_vid_wait_for_vblank(
+		struct dpu_encoder_phys *phys_enc, bool notify)
+{
+	struct dpu_encoder_wait_info wait_info;
+	int ret;
+
+	if (!phys_enc) {
+		pr_err("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+	if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+		if (notify && phys_enc->parent_ops->handle_frame_done)
+			phys_enc->parent_ops->handle_frame_done(
+					phys_enc->parent, phys_enc,
+					DPU_ENCODER_FRAME_EVENT_DONE);
+		return 0;
+	}
+
+	/* Wait for kickoff to complete */
+	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+			&wait_info);
+
+	if (ret == -ETIMEDOUT) {
+		dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+	} else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
+		phys_enc->parent_ops->handle_frame_done(
+				phys_enc->parent, phys_enc,
+				DPU_ENCODER_FRAME_EVENT_DONE);
+
+	return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+		struct dpu_encoder_phys *phys_enc)
+{
+	return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_kickoff_params *params)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+	struct dpu_hw_ctl *ctl;
+	int rc;
+
+	if (!phys_enc || !params) {
+		DPU_ERROR("invalid encoder/parameters\n");
+		return;
+	}
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.wait_reset_status)
+		return;
+
+	/*
+	 * hw supports hardware initiated ctl reset, so before we kickoff a new
+	 * frame, need to check and wait for hw initiated ctl reset completion
+	 */
+	rc = ctl->ops.wait_reset_status(ctl);
+	if (rc) {
+		DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+				ctl->idx, rc);
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+		dpu_dbg_dump(false, __func__, true, true);
+	}
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+	struct msm_drm_private *priv;
+	struct dpu_encoder_phys_vid *vid_enc;
+	unsigned long lock_flags;
+	int ret;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+			!phys_enc->parent->dev->dev_private) {
+		DPU_ERROR("invalid encoder/device\n");
+		return;
+	}
+	priv = phys_enc->parent->dev->dev_private;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+		return;
+
+	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+		DPU_ERROR("already disabled\n");
+		return;
+	}
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+	if (dpu_encoder_phys_vid_is_master(phys_enc))
+		dpu_encoder_phys_inc_pending(phys_enc);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/*
+	 * Wait for a vsync so we know the ENABLE=0 latched before
+	 * the (connector) source of the vsync's gets disabled,
+	 * otherwise we end up in a funny state if we re-enable
+	 * before the disable latches, which results that some of
+	 * the settings changes for the new modeset (like new
+	 * scanout buffer) don't latch properly..
+	 */
+	if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+		ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+		if (ret) {
+			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+			DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+				  DRMID(phys_enc->parent),
+				  vid_enc->hw_intf->idx - INTF_0, ret);
+		}
+	}
+
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+		struct dpu_encoder_phys *phys_enc)
+{
+	unsigned long lock_flags;
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+	/*
+	 * Video mode must flush CTL before enabling timing engine
+	 * Video encoders need to turn on their interfaces now
+	 */
+	if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+		trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+				    vid_enc->hw_intf->idx - INTF_0);
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+		vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+		phys_enc->enable_state = DPU_ENC_ENABLED;
+	}
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+	int ret;
+
+	if (!phys_enc)
+		return;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+			    vid_enc->hw_intf->idx - INTF_0,
+			    enable,
+			    atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable) {
+		ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+		if (ret)
+			return;
+
+		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+	} else {
+		dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+	}
+}
+
+static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
+						bool enable, u32 frame_count)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+		vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+							enable, frame_count);
+}
+
+static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return 0;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+		vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	if (!dpu_encoder_phys_vid_is_master(phys_enc))
+		return -EINVAL;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+		return -EINVAL;
+
+	return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+	ops->is_master = dpu_encoder_phys_vid_is_master;
+	ops->mode_set = dpu_encoder_phys_vid_mode_set;
+	ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
+	ops->enable = dpu_encoder_phys_vid_enable;
+	ops->disable = dpu_encoder_phys_vid_disable;
+	ops->destroy = dpu_encoder_phys_vid_destroy;
+	ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
+	ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+	ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+	ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+	ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+	ops->irq_control = dpu_encoder_phys_vid_irq_control;
+	ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+	ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+	ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+	ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
+	ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
+	ops->hw_reset = dpu_encoder_helper_hw_reset;
+	ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+		struct dpu_enc_phys_init_params *p)
+{
+	struct dpu_encoder_phys *phys_enc = NULL;
+	struct dpu_encoder_phys_vid *vid_enc = NULL;
+	struct dpu_rm_hw_iter iter;
+	struct dpu_hw_mdp *hw_mdp;
+	struct dpu_encoder_irq *irq;
+	int i, ret = 0;
+
+	if (!p) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+	if (!vid_enc) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	phys_enc = &vid_enc->base;
+
+	hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+	if (IS_ERR_OR_NULL(hw_mdp)) {
+		ret = PTR_ERR(hw_mdp);
+		DPU_ERROR("failed to get mdptop\n");
+		goto fail;
+	}
+	phys_enc->hw_mdptop = hw_mdp;
+	phys_enc->intf_idx = p->intf_idx;
+
+	/**
+	 * hw_intf resource permanently assigned to this encoder
+	 * Other resources allocated at atomic commit time by use case
+	 */
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
+	while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
+		struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+		if (hw_intf->idx == p->intf_idx) {
+			vid_enc->hw_intf = hw_intf;
+			break;
+		}
+	}
+
+	if (!vid_enc->hw_intf) {
+		ret = -EINVAL;
+		DPU_ERROR("failed to get hw_intf\n");
+		goto fail;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+	dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+	phys_enc->parent = p->parent;
+	phys_enc->parent_ops = p->parent_ops;
+	phys_enc->dpu_kms = p->dpu_kms;
+	phys_enc->split_role = p->split_role;
+	phys_enc->intf_mode = INTF_MODE_VIDEO;
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	for (i = 0; i < INTR_IDX_MAX; i++) {
+		irq = &phys_enc->irq[i];
+		INIT_LIST_HEAD(&irq->cb.list);
+		irq->irq_idx = -EINVAL;
+		irq->hw_idx = -EINVAL;
+		irq->cb.arg = phys_enc;
+	}
+
+	irq = &phys_enc->irq[INTR_IDX_VSYNC];
+	irq->name = "vsync_irq";
+	irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
+	irq->intr_idx = INTR_IDX_VSYNC;
+	irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->name = "underrun";
+	irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+	irq->intr_idx = INTR_IDX_UNDERRUN;
+	irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+
+	atomic_set(&phys_enc->vblank_refcount, 0);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+
+	DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+	return phys_enc;
+
+fail:
+	DPU_ERROR("failed to create encoder\n");
+	if (vid_enc)
+		dpu_encoder_phys_vid_destroy(phys_enc);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 8189539..bfcd165 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -846,7 +846,7 @@
 	return 0;
 }
 
-int dpu_format_get_plane_sizes(
+static int dpu_format_get_plane_sizes(
 		const struct dpu_format *fmt,
 		const uint32_t w,
 		const uint32_t h,
@@ -869,47 +869,6 @@
 	return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
 }
 
-int dpu_format_get_block_size(const struct dpu_format *fmt,
-		uint32_t *w, uint32_t *h)
-{
-	if (!fmt || !w || !h) {
-		DRM_ERROR("invalid pointer\n");
-		return -EINVAL;
-	}
-
-	/* TP10 is 96x96 and all others are 128x128 */
-	if (DPU_FORMAT_IS_YUV(fmt) && DPU_FORMAT_IS_DX(fmt) &&
-			(fmt->num_planes == 2) && fmt->unpack_tight)
-		*w = *h = 96;
-	else
-		*w = *h = 128;
-
-	return 0;
-}
-
-uint32_t dpu_format_get_framebuffer_size(
-		const uint32_t format,
-		const uint32_t width,
-		const uint32_t height,
-		const uint32_t *pitches,
-		const uint64_t modifiers)
-{
-	const struct dpu_format *fmt;
-	struct dpu_hw_fmt_layout layout;
-
-	fmt = dpu_get_dpu_format_ext(format, modifiers);
-	if (!fmt)
-		return 0;
-
-	if (!pitches)
-		return -EINVAL;
-
-	if (dpu_format_get_plane_sizes(fmt, width, height, &layout, pitches))
-		layout.total_size = 0;
-
-	return layout.total_size;
-}
-
 static int _dpu_format_populate_addrs_ubwc(
 		struct msm_gem_address_space *aspace,
 		struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index b55bfd1..a54451d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -55,36 +55,6 @@
 		uint32_t pixel_formats_max);
 
 /**
- * dpu_format_get_plane_sizes - calculate size and layout of given buffer format
- * @fmt:             pointer to dpu_format
- * @w:               width of the buffer
- * @h:               height of the buffer
- * @layout:          layout of the buffer
- * @pitches:         array of size [DPU_MAX_PLANES] to populate
- *		     pitch for each plane
- *
- * Return: size of the buffer
- */
-int dpu_format_get_plane_sizes(
-		const struct dpu_format *fmt,
-		const uint32_t w,
-		const uint32_t h,
-		struct dpu_hw_fmt_layout *layout,
-		const uint32_t *pitches);
-
-/**
- * dpu_format_get_block_size - get block size of given format when
- *	operating in block mode
- * @fmt:             pointer to dpu_format
- * @w:               pointer to width of the block
- * @h:               pointer to height of the block
- *
- * Return: 0 if success; error oode otherwise
- */
-int dpu_format_get_block_size(const struct dpu_format *fmt,
-		uint32_t *w, uint32_t *h);
-
-/**
  * dpu_format_check_modified_format - validate format and buffers for
  *                   dpu non-standard, i.e. modified format
  * @kms:             kms driver
@@ -115,22 +85,4 @@
 		struct drm_framebuffer *fb,
 		struct dpu_hw_fmt_layout *fmtl);
 
-/**
- * dpu_format_get_framebuffer_size - get framebuffer memory size
- * @format:            DRM pixel format
- * @width:             pixel width
- * @height:            pixel height
- * @pitches:           array of size [DPU_MAX_PLANES] to populate
- *		       pitch for each plane
- * @modifiers:         drm modifier
- *
- * Return: memory size required for frame buffer
- */
-uint32_t dpu_format_get_framebuffer_size(
-		const uint32_t format,
-		const uint32_t width,
-		const uint32_t height,
-		const uint32_t *pitches,
-		const uint64_t modifiers);
-
 #endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
new file mode 100644
index 0000000..58d29e4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_blk.h"
+
+/* Serialization lock for dpu_hw_blk_list */
+static DEFINE_MUTEX(dpu_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(dpu_hw_blk_list);
+
+/**
+ * dpu_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum dpu_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+		struct dpu_hw_blk_ops *ops)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&hw_blk->list);
+	hw_blk->type = type;
+	hw_blk->id = id;
+	atomic_set(&hw_blk->refcount, 0);
+
+	if (ops)
+		hw_blk->ops = *ops;
+
+	mutex_lock(&dpu_hw_blk_lock);
+	list_add(&hw_blk->list, &dpu_hw_blk_list);
+	mutex_unlock(&dpu_hw_blk_lock);
+
+	return 0;
+}
+
+/**
+ * dpu_hw_blk_destroy - destroy hw block object.
+ * @hw_blk:  pointer to hw block object
+ * return: none
+ */
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return;
+	}
+
+	if (atomic_read(&hw_blk->refcount))
+		pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+				hw_blk->id);
+
+	mutex_lock(&dpu_hw_blk_lock);
+	list_del(&hw_blk->list);
+	mutex_unlock(&dpu_hw_blk_lock);
+}
+
+/**
+ * dpu_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
+{
+	struct dpu_hw_blk *curr;
+	int rc, refcount;
+
+	if (!hw_blk) {
+		mutex_lock(&dpu_hw_blk_lock);
+		list_for_each_entry(curr, &dpu_hw_blk_list, list) {
+			if ((curr->type != type) ||
+					(id >= 0 && curr->id != id) ||
+					(id < 0 &&
+						atomic_read(&curr->refcount)))
+				continue;
+
+			hw_blk = curr;
+			break;
+		}
+		mutex_unlock(&dpu_hw_blk_lock);
+	}
+
+	if (!hw_blk) {
+		pr_debug("no hw_blk:%d\n", type);
+		return NULL;
+	}
+
+	refcount = atomic_inc_return(&hw_blk->refcount);
+
+	if (refcount == 1 && hw_blk->ops.start) {
+		rc = hw_blk->ops.start(hw_blk);
+		if (rc) {
+			pr_err("failed to start  hw_blk:%d rc:%d\n", type, rc);
+			goto error_start;
+		}
+	}
+
+	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+			hw_blk->id, refcount);
+	return hw_blk;
+
+error_start:
+	dpu_hw_blk_put(hw_blk);
+	return ERR_PTR(rc);
+}
+
+/**
+ * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return;
+	}
+
+	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+			atomic_read(&hw_blk->refcount));
+
+	if (!atomic_read(&hw_blk->refcount)) {
+		pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+		return;
+	}
+
+	if (atomic_dec_return(&hw_blk->refcount))
+		return;
+
+	if (hw_blk->ops.stop)
+		hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
new file mode 100644
index 0000000..0f4ca8af
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_BLK_H
+#define _DPU_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct dpu_hw_blk;
+
+/**
+ * struct dpu_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct dpu_hw_blk_ops {
+	int (*start)(struct dpu_hw_blk *);
+	void (*stop)(struct dpu_hw_blk *);
+};
+
+/**
+ * struct dpu_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct dpu_hw_blk {
+	struct list_head list;
+	u32 type;
+	int id;
+	atomic_t refcount;
+	struct dpu_hw_blk_ops ops;
+};
+
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+		struct dpu_hw_blk_ops *ops);
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
+
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
+#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 0000000..44ee063
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_kms.h"
+
+#define VIG_SDM845_MASK \
+	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+	BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_SDM845_MASK \
+	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+	BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define MIXER_SDM845_MASK \
+	(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+	(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define DEFAULT_PIXEL_RAM_SIZE		(50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH		2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH	2560
+
+#define MAX_HORZ_DECIMATION	4
+#define MAX_VERT_DECIMATION	4
+
+#define MAX_UPSCALE_RATIO	20
+#define MAX_DOWNSCALE_RATIO	4
+#define SSPP_UNITY_SCALE	1
+
+#define STRCAT(X, Y) (X Y)
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps sdm845_dpu_caps = {
+	.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.max_mixer_blendstages = 0xb,
+	.qseed_type = DPU_SSPP_SCALER_QSEED3,
+	.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+	.ubwc_version = DPU_HW_UBWC_VER_20,
+	.has_src_split = true,
+	.has_dim_layer = true,
+	.has_idle_pc = true,
+};
+
+static struct dpu_mdp_cfg sdm845_mdp[] = {
+	{
+	.name = "top_0", .id = MDP_TOP,
+	.base = 0x0, .len = 0x45C,
+	.features = 0,
+	.highest_bank_bit = 0x2,
+	.has_dest_scaler = true,
+	.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+			.reg_off = 0x2AC, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+			.reg_off = 0x2B4, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+			.reg_off = 0x2BC, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+			.reg_off = 0x2C4, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+			.reg_off = 0x2AC, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+			.reg_off = 0x2B4, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+			.reg_off = 0x2BC, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+			.reg_off = 0x2C4, .bit_off = 8},
+	},
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static struct dpu_ctl_cfg sdm845_ctl[] = {
+	{
+	.name = "ctl_0", .id = CTL_0,
+	.base = 0x1000, .len = 0xE4,
+	.features = BIT(DPU_CTL_SPLIT_DISPLAY)
+	},
+	{
+	.name = "ctl_1", .id = CTL_1,
+	.base = 0x1200, .len = 0xE4,
+	.features = BIT(DPU_CTL_SPLIT_DISPLAY)
+	},
+	{
+	.name = "ctl_2", .id = CTL_2,
+	.base = 0x1400, .len = 0xE4,
+	.features = 0
+	},
+	{
+	.name = "ctl_3", .id = CTL_3,
+	.base = 0x1600, .len = 0xE4,
+	.features = 0
+	},
+	{
+	.name = "ctl_4", .id = CTL_4,
+	.base = 0x1800, .len = 0xE4,
+	.features = 0
+	},
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+static const struct dpu_sspp_blks_common sdm845_sspp_common = {
+	.maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+	.maxhdeciexp = MAX_HORZ_DECIMATION,
+	.maxvdeciexp = MAX_VERT_DECIMATION,
+};
+
+#define _VIG_SBLK(num, sdma_pri) \
+	{ \
+	.common = &sdm845_sspp_common, \
+	.maxdwnscale = MAX_DOWNSCALE_RATIO, \
+	.maxupscale = MAX_UPSCALE_RATIO, \
+	.smart_dma_priority = sdma_pri, \
+	.src_blk = {.name = STRCAT("sspp_src_", num), \
+		.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+	.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+		.id = DPU_SSPP_SCALER_QSEED3, \
+		.base = 0xa00, .len = 0xa0,}, \
+	.csc_blk = {.name = STRCAT("sspp_csc", num), \
+		.id = DPU_SSPP_CSC_10BIT, \
+		.base = 0x1a00, .len = 0x100,}, \
+	.format_list = plane_formats_yuv, \
+	.virt_format_list = plane_formats, \
+	}
+
+#define _DMA_SBLK(num, sdma_pri) \
+	{ \
+	.common = &sdm845_sspp_common, \
+	.maxdwnscale = SSPP_UNITY_SCALE, \
+	.maxupscale = SSPP_UNITY_SCALE, \
+	.smart_dma_priority = sdma_pri, \
+	.src_blk = {.name = STRCAT("sspp_src_", num), \
+		.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+	.format_list = plane_formats, \
+	.virt_format_list = plane_formats, \
+	}
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+	{ \
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x1c8, \
+	.features = VIG_SDM845_MASK, \
+	.sblk = &_sblk, \
+	.xin_id = _xinid, \
+	.type = SSPP_TYPE_VIG, \
+	.clk_ctrl = _clkctrl \
+	}
+
+#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+	{ \
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x1c8, \
+	.features = DMA_SDM845_MASK, \
+	.sblk = &_sblk, \
+	.xin_id = _xinid, \
+	.type = SSPP_TYPE_DMA, \
+	.clk_ctrl = _clkctrl \
+	}
+
+static struct dpu_sspp_cfg sdm845_sspp[] = {
+	SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
+		sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
+	SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
+		sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
+	SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
+		sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
+	SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
+		sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
+	SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
+		sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
+	SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
+		sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
+	SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
+		sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
+	SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
+		sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+	.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.maxblendstages = 11, /* excluding base layer */
+	.blendstage_base = { /* offsets relative to mixer base */
+		0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+		0xb0, 0xc8, 0xe0, 0xf8, 0x110
+	},
+};
+
+#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+	{ \
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x320, \
+	.features = MIXER_SDM845_MASK, \
+	.sblk = &sdm845_lm_sblk, \
+	.ds = _ds, \
+	.pingpong = _pp, \
+	.lm_pair_mask = (1 << _lmpair) \
+	}
+
+static struct dpu_lm_cfg sdm845_lm[] = {
+	LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
+	LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
+	LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
+	LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
+	LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
+	LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
+};
+
+/*************************************************************
+ * DS sub blocks config
+ *************************************************************/
+static const struct dpu_ds_top_cfg sdm845_ds_top = {
+	.name = "ds_top_0", .id = DS_TOP,
+	.base = 0x60000, .len = 0xc,
+	.maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
+	.maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.maxupscale = MAX_UPSCALE_RATIO,
+};
+
+#define DS_BLK(_name, _id, _base) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x800, \
+	.features = DPU_SSPP_SCALER_QSEED3, \
+	.top = &sdm845_ds_top \
+	}
+
+static struct dpu_ds_cfg sdm845_ds[] = {
+	DS_BLK("ds_0", DS_0, 0x800),
+	DS_BLK("ds_1", DS_1, 0x1000),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+	.te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+		.version = 0x1},
+	.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+		.len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+	.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+		.len = 0x20, .version = 0x10000},
+};
+
+#define PP_BLK_TE(_name, _id, _base) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0xd4, \
+	.features = PINGPONG_SDM845_SPLIT_MASK, \
+	.sblk = &sdm845_pp_sblk_te \
+	}
+#define PP_BLK(_name, _id, _base) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0xd4, \
+	.features = PINGPONG_SDM845_MASK, \
+	.sblk = &sdm845_pp_sblk \
+	}
+
+static struct dpu_pingpong_cfg sdm845_pp[] = {
+	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+	PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+	PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x280, \
+	.type = _type, \
+	.controller_id = _ctrl_id, \
+	.prog_fetch_lines_worst_case = 24 \
+	}
+
+static struct dpu_intf_cfg sdm845_intf[] = {
+	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+	INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
+	INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+};
+
+/*************************************************************
+ * CDM sub blocks config
+ *************************************************************/
+static struct dpu_cdm_cfg sdm845_cdm[] = {
+	{
+	.name = "cdm_0", .id = CDM_0,
+	.base = 0x79200, .len = 0x224,
+	.features = 0,
+	.intf_connect = BIT(INTF_3),
+	},
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static struct dpu_vbif_cfg sdm845_vbif[] = {
+	{
+	.name = "vbif_0", .id = VBIF_0,
+	.base = 0, .len = 0x1040,
+	.features = BIT(DPU_VBIF_QOS_REMAP),
+	.xin_halt_timeout = 0x4000,
+	.qos_rt_tbl = {
+		.npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+		.priority_lvl = sdm845_rt_pri_lvl,
+		},
+	.qos_nrt_tbl = {
+		.npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+		.priority_lvl = sdm845_nrt_pri_lvl,
+		},
+	.memtype_count = 14,
+	.memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+	},
+};
+
+static struct dpu_reg_dma_cfg sdm845_regdma = {
+	.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+	{.fl = 4, .lut = 0x357},
+	{.fl = 5, .lut = 0x3357},
+	{.fl = 6, .lut = 0x23357},
+	{.fl = 7, .lut = 0x223357},
+	{.fl = 8, .lut = 0x2223357},
+	{.fl = 9, .lut = 0x22223357},
+	{.fl = 10, .lut = 0x222223357},
+	{.fl = 11, .lut = 0x2222223357},
+	{.fl = 12, .lut = 0x22222223357},
+	{.fl = 13, .lut = 0x222222223357},
+	{.fl = 14, .lut = 0x1222222223357},
+	{.fl = 0, .lut = 0x11222222223357}
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+	{.fl = 10, .lut = 0x344556677},
+	{.fl = 11, .lut = 0x3344556677},
+	{.fl = 12, .lut = 0x23344556677},
+	{.fl = 13, .lut = 0x223344556677},
+	{.fl = 14, .lut = 0x1223344556677},
+	{.fl = 0, .lut = 0x112233344556677},
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+	{.fl = 0, .lut = 0x0},
+};
+
+static struct dpu_perf_cfg sdm845_perf_data = {
+	.max_bw_low = 6800000,
+	.max_bw_high = 6800000,
+	.min_core_ib = 2400000,
+	.min_llcc_ib = 800000,
+	.min_dram_ib = 800000,
+	.core_ib_ff = "6.0",
+	.core_clk_ff = "1.0",
+	.comp_ratio_rt =
+	"NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
+	.comp_ratio_nrt =
+	"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
+	.undersized_prefill_lines = 2,
+	.xtra_prefill_lines = 2,
+	.dest_scale_prefill_lines = 3,
+	.macrotile_prefill_lines = 4,
+	.yuv_nv12_prefill_lines = 8,
+	.linear_prefill_lines = 1,
+	.downscaling_prefill_lines = 1,
+	.amortizable_threshold = 25,
+	.min_prefill_lines = 24,
+	.danger_lut_tbl = {0xf, 0xffff, 0x0},
+	.qos_lut_tbl = {
+		{.nentry = ARRAY_SIZE(sdm845_qos_linear),
+		.entries = sdm845_qos_linear
+		},
+		{.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+		.entries = sdm845_qos_macrotile
+		},
+		{.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+		.entries = sdm845_qos_nrt
+		},
+	},
+	.cdp_cfg = {
+		{.rd_enable = 1, .wr_enable = 1},
+		{.rd_enable = 1, .wr_enable = 0}
+	},
+};
+
+/*************************************************************
+ * Hardware catalog init
+ *************************************************************/
+
+/*
+ * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+	*dpu_cfg = (struct dpu_mdss_cfg){
+		.caps = &sdm845_dpu_caps,
+		.mdp_count = ARRAY_SIZE(sdm845_mdp),
+		.mdp = sdm845_mdp,
+		.ctl_count = ARRAY_SIZE(sdm845_ctl),
+		.ctl = sdm845_ctl,
+		.sspp_count = ARRAY_SIZE(sdm845_sspp),
+		.sspp = sdm845_sspp,
+		.mixer_count = ARRAY_SIZE(sdm845_lm),
+		.mixer = sdm845_lm,
+		.ds_count = ARRAY_SIZE(sdm845_ds),
+		.ds = sdm845_ds,
+		.pingpong_count = ARRAY_SIZE(sdm845_pp),
+		.pingpong = sdm845_pp,
+		.cdm_count = ARRAY_SIZE(sdm845_cdm),
+		.cdm = sdm845_cdm,
+		.intf_count = ARRAY_SIZE(sdm845_intf),
+		.intf = sdm845_intf,
+		.vbif_count = ARRAY_SIZE(sdm845_vbif),
+		.vbif = sdm845_vbif,
+		.reg_dma_count = 1,
+		.dma_cfg = sdm845_regdma,
+		.perf = sdm845_perf_data,
+	};
+}
+
+static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+	{ .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
+	{ .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+};
+
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+{
+	kfree(dpu_cfg);
+}
+
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+	int i;
+	struct dpu_mdss_cfg *dpu_cfg;
+
+	dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
+	if (!dpu_cfg)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+		if (cfg_handler[i].hw_rev == hw_rev) {
+			cfg_handler[i].cfg_init(dpu_cfg);
+			dpu_cfg->hwversion = hw_rev;
+			return dpu_cfg;
+		}
+	}
+
+	DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+	dpu_hw_catalog_deinit(dpu_cfg);
+	return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 0000000..f0cb0d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,804 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS    12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28)    |\
+		((MINOR & 0xFFF) << 16)  |\
+		(STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev)		((rev) >> 28)
+#define DPU_HW_MINOR(rev)		(((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev)		((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev)		((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2)   \
+	(DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170	DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171	DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172	DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300	DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301	DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400	DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401	DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410	DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500	DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+
+
+#define DPU_HW_BLK_NAME_LEN	16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS	2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+	DPU_HW_UBWC_VER_10 = 0x100,
+	DPU_HW_UBWC_VER_20 = 0x200,
+	DPU_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev)       ((rev) >= DPU_HW_UBWC_VER_20)
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC,           MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0,      This chipsets supports Universal Bandwidth
+ *                         compression initial revision
+ * @DPU_MDP_UBWC_1_5,      Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX            Maximum value
+
+ */
+enum {
+	DPU_MDP_PANIC_PER_PIPE = 0x1,
+	DPU_MDP_10BIT_SUPPORT,
+	DPU_MDP_BWC,
+	DPU_MDP_UBWC_1_0,
+	DPU_MDP_UBWC_1_5,
+	DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC             Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2,  QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC,            Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT,      Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR,         SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS,            SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL,       SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT,      SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL      Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP             Supports client driven prefetch
+ * @DPU_SSPP_MAX             maximum value
+ */
+enum {
+	DPU_SSPP_SRC = 0x1,
+	DPU_SSPP_SCALER_QSEED2,
+	DPU_SSPP_SCALER_QSEED3,
+	DPU_SSPP_SCALER_RGB,
+	DPU_SSPP_CSC,
+	DPU_SSPP_CSC_10BIT,
+	DPU_SSPP_CURSOR,
+	DPU_SSPP_QOS,
+	DPU_SSPP_QOS_8LVL,
+	DPU_SSPP_EXCL_RECT,
+	DPU_SSPP_SMART_DMA_V1,
+	DPU_SSPP_SMART_DMA_V2,
+	DPU_SSPP_TS_PREFILL,
+	DPU_SSPP_TS_PREFILL_REC1,
+	DPU_SSPP_CDP,
+	DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER           Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT     Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC              Gamma correction block
+ * @DPU_DIM_LAYER             Layer mixer supports dim layer
+ * @DPU_MIXER_MAX             maximum value
+ */
+enum {
+	DPU_MIXER_LAYER = 0x1,
+	DPU_MIXER_SOURCESPLIT,
+	DPU_MIXER_GC,
+	DPU_DIM_LAYER,
+	DPU_MIXER_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE         Tear check block
+ * @DPU_PINGPONG_TE2        Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT      PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE      PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER,    Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+	DPU_PINGPONG_TE = 0x1,
+	DPU_PINGPONG_TE2,
+	DPU_PINGPONG_SPLIT,
+	DPU_PINGPONG_SLAVE,
+	DPU_PINGPONG_DITHER,
+	DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY       CTL supports video mode split display
+ * @DPU_CTL_MAX
+ */
+enum {
+	DPU_CTL_SPLIT_DISPLAY = 0x1,
+	DPU_CTL_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM        VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP        VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX              maximum value
+ */
+enum {
+	DPU_VBIF_QOS_OTLIM = 0x1,
+	DPU_VBIF_QOS_REMAP,
+	DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this block
+ * @base:              register base offset to mdss
+ * @len:               length of hardware block
+ * @features           bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+	char name[DPU_HW_BLK_NAME_LEN]; \
+	u32 id; \
+	u32 base; \
+	u32 len; \
+	unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this sub-block
+ * @base:              offset of this sub-block relative to the block
+ *                     offset
+ * @len                register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+	char name[DPU_HW_BLK_NAME_LEN]; \
+	u32 id; \
+	u32 base; \
+	u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info:   HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+	DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+	DPU_HW_SUBBLK_INFO;
+	u32 version;
+};
+
+struct dpu_csc_blk {
+	DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+	DPU_HW_SUBBLK_INFO;
+	u32 version;
+};
+
+/**
+ * struct dpu_format_extended - define dpu specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ *            framebuffer planes
+ */
+struct dpu_format_extended {
+	uint32_t fourcc_format;
+	uint64_t modifier;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+	DPU_QOS_LUT_USAGE_LINEAR,
+	DPU_QOS_LUT_USAGE_MACROTILE,
+	DPU_QOS_LUT_USAGE_NRT,
+	DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+	u32 fl;
+	u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+	u32 nentry;
+	struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width    max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ *                       supported z order
+ * @qseed_type         qseed2 or qseed3 support.
+ * @smart_dma_rev      Supported version of SmartDMA feature.
+ * @ubwc_version       UBWC feature version (0x0 for not supported)
+ * @has_src_split      source split feature status
+ * @has_dim_layer      dim layer feature status
+ * @has_idle_pc        indicate if idle power collapse feature is supported
+ */
+struct dpu_caps {
+	u32 max_mixer_width;
+	u32 max_mixer_blendstages;
+	u32 qseed_type;
+	u32 smart_dma_rev;
+	u32 ubwc_version;
+	bool has_src_split;
+	bool has_dim_layer;
+	bool has_idle_pc;
+};
+
+/**
+ * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @maxhdeciexp: max horizontal decimation supported by this pipe
+ *				(max is 2^value)
+ * @maxvdeciexp: max vertical decimation supported by this pipe
+ *				(max is 2^value)
+ */
+struct dpu_sspp_blks_common {
+	u32 maxlinewidth;
+	u32 pixel_ram_size;
+	u32 maxhdeciexp;
+	u32 maxvdeciexp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale:  maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ */
+struct dpu_sspp_sub_blks {
+	const struct dpu_sspp_blks_common *common;
+	u32 creq_vblank;
+	u32 danger_vblank;
+	u32 maxdwnscale;
+	u32 maxupscale;
+	u32 smart_dma_priority;
+	u32 max_per_pipe_bw;
+	struct dpu_src_blk src_blk;
+	struct dpu_scaler_blk scaler_blk;
+	struct dpu_pp_blk csc_blk;
+	struct dpu_pp_blk hsic_blk;
+	struct dpu_pp_blk memcolor_blk;
+	struct dpu_pp_blk pcc_blk;
+	struct dpu_pp_blk igc_blk;
+
+	const struct dpu_format_extended *format_list;
+	const struct dpu_format_extended *virt_format_list;
+};
+
+/**
+ * struct dpu_lm_sub_blks:      information of mixer block
+ * @maxwidth:               Max pixel width supported by this mixer
+ * @maxblendstages:         Max number of blend-stages supported
+ * @blendstage_base:        Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+	u32 maxwidth;
+	u32 maxblendstages;
+	u32 blendstage_base[MAX_BLOCKS];
+	struct dpu_pp_blk gc;
+};
+
+struct dpu_pingpong_sub_blks {
+	struct dpu_pp_blk te;
+	struct dpu_pp_blk te2;
+	struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+	DPU_CLK_CTRL_NONE,
+	DPU_CLK_CTRL_VIG0,
+	DPU_CLK_CTRL_VIG1,
+	DPU_CLK_CTRL_VIG2,
+	DPU_CLK_CTRL_VIG3,
+	DPU_CLK_CTRL_VIG4,
+	DPU_CLK_CTRL_RGB0,
+	DPU_CLK_CTRL_RGB1,
+	DPU_CLK_CTRL_RGB2,
+	DPU_CLK_CTRL_RGB3,
+	DPU_CLK_CTRL_DMA0,
+	DPU_CLK_CTRL_DMA1,
+	DPU_CLK_CTRL_CURSOR0,
+	DPU_CLK_CTRL_CURSOR1,
+	DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+	DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off:           register offset
+ * @bit_off:           bit offset
+ */
+struct dpu_clk_ctrl_reg {
+	u32 reg_off;
+	u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ * @highest_bank_bit:  UBWC parameter
+ * @ubwc_static:       ubwc static configuration
+ * @ubwc_swizzle:      ubwc default swizzle setting
+ * @has_dest_scaler:   indicates support of destination scaler
+ * @clk_ctrls          clock control register definition
+ */
+struct dpu_mdp_cfg {
+	DPU_HW_BLK_INFO;
+	u32 highest_bank_bit;
+	u32 ubwc_static;
+	u32 ubwc_swizzle;
+	bool has_dest_scaler;
+	struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ */
+struct dpu_ctl_cfg {
+	DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              SSPP sub-blocks information
+ * @xin_id:            bus client identifier
+ * @clk_ctrl           clock control identifier
+ * @type               sspp type identifier
+ */
+struct dpu_sspp_cfg {
+	DPU_HW_BLK_INFO;
+	const struct dpu_sspp_sub_blks *sblk;
+	u32 xin_id;
+	enum dpu_clk_ctrl_type clk_ctrl;
+	u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              LM Sub-blocks information
+ * @pingpong:          ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds:                ID of connected DS, DS_MAX if unsupported
+ * @lm_pair_mask:      Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+	DPU_HW_BLK_INFO;
+	const struct dpu_lm_sub_blks *sblk;
+	u32 pingpong;
+	u32 ds;
+	unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_ds_top_cfg - information of dest scaler top
+ * @id               enum identifying this block
+ * @base             register offset of this block
+ * @features         bit mask identifying features
+ * @version          hw version of dest scaler
+ * @maxinputwidth    maximum input line width
+ * @maxoutputwidth   maximum output line width
+ * @maxupscale       maximum upscale ratio
+ */
+struct dpu_ds_top_cfg {
+	DPU_HW_BLK_INFO;
+	u32 version;
+	u32 maxinputwidth;
+	u32 maxoutputwidth;
+	u32 maxupscale;
+};
+
+/**
+ * struct dpu_ds_cfg - information of dest scaler blocks
+ * @id          enum identifying this block
+ * @base        register offset wrt DS top offset
+ * @features    bit mask identifying features
+ * @version     hw version of the qseed block
+ * @top         DS top information
+ */
+struct dpu_ds_cfg {
+	DPU_HW_BLK_INFO;
+	u32 version;
+	const struct dpu_ds_top_cfg *top;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk               sub-blocks information
+ */
+struct dpu_pingpong_cfg  {
+	DPU_HW_BLK_INFO;
+	const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @intf_connect       Bitmask of INTF IDs this CDM can connect to
+ */
+struct dpu_cdm_cfg   {
+	DPU_HW_BLK_INFO;
+	unsigned long intf_connect;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @type:              Interface type(DSI, DP, HDMI)
+ * @controller_id:     Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case	Worst case latency num lines needed to prefetch
+ */
+struct dpu_intf_cfg  {
+	DPU_HW_BLK_INFO;
+	u32 type;   /* interface type*/
+	u32 controller_id;
+	u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps                pixel per seconds
+ * @ot_limit           OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+	u64 pps;
+	u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count              length of cfg
+ * @cfg                pointer to array of configuration settings with
+ *                     ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+	u32 count;
+	struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl      num of priority level
+ * @priority_lvl       pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+	u32 npriority_lvl;
+	u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @ot_rd_limit        default OT read limit
+ * @ot_wr_limit        default OT write limit
+ * @xin_halt_timeout   maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl  dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl  dynamic OT write configuration table
+ * @qos_rt_tbl         real-time QoS priority table
+ * @qos_nrt_tbl        non-real-time QoS priority table
+ * @memtype_count      number of defined memtypes
+ * @memtype            array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+	DPU_HW_BLK_INFO;
+	u32 default_ot_rd_limit;
+	u32 default_ot_wr_limit;
+	u32 xin_halt_timeout;
+	struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+	struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+	struct dpu_vbif_qos_tbl qos_rt_tbl;
+	struct dpu_vbif_qos_tbl qos_nrt_tbl;
+	u32 memtype_count;
+	u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @version            version of lutdma hw block
+ * @trigger_sel_off    offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+	DPU_HW_BLK_INFO;
+	u32 version;
+	u32 trigger_sel_off;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+	DPU_PERF_CDP_USAGE_RT,
+	DPU_PERF_CDP_USAGE_NRT,
+	DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+	bool rd_enable;
+	bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low         low threshold of maximum bandwidth (kbps)
+ * @max_bw_high        high threshold of maximum bandwidth (kbps)
+ * @min_core_ib        minimum bandwidth for core (kbps)
+ * @min_core_ib        minimum mnoc ib vote in kbps
+ * @min_llcc_ib        minimum llcc ib vote in kbps
+ * @min_dram_ib        minimum dram ib vote in kbps
+ * @core_ib_ff         core instantaneous bandwidth fudge factor
+ * @core_clk_ff        core clock fudge factor
+ * @comp_ratio_rt      string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt     string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines   undersized prefill in lines
+ * @xtra_prefill_lines         extra prefill latency in lines
+ * @dest_scale_prefill_lines   destination scaler latency in lines
+ * @macrotile_perfill_lines    macrotile latency in lines
+ * @yuv_nv12_prefill_lines     yuv_nv12 latency in lines
+ * @linear_prefill_lines       linear latency in lines
+ * @downscaling_prefill_lines  downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines  minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg            cdp use case configurations
+ */
+struct dpu_perf_cfg {
+	u32 max_bw_low;
+	u32 max_bw_high;
+	u32 min_core_ib;
+	u32 min_llcc_ib;
+	u32 min_dram_ib;
+	const char *core_ib_ff;
+	const char *core_clk_ff;
+	const char *comp_ratio_rt;
+	const char *comp_ratio_nrt;
+	u32 undersized_prefill_lines;
+	u32 xtra_prefill_lines;
+	u32 dest_scale_prefill_lines;
+	u32 macrotile_prefill_lines;
+	u32 yuv_nv12_prefill_lines;
+	u32 linear_prefill_lines;
+	u32 downscaling_prefill_lines;
+	u32 amortizable_threshold;
+	u32 min_prefill_lines;
+	u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+	u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+	struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+	struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats        Supported formats for dma pipe
+ * @cursor_formats     Supported formats for cursor pipe
+ * @vig_formats        Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+	u32 hwversion;
+
+	const struct dpu_caps *caps;
+
+	u32 mdp_count;
+	struct dpu_mdp_cfg *mdp;
+
+	u32 ctl_count;
+	struct dpu_ctl_cfg *ctl;
+
+	u32 sspp_count;
+	struct dpu_sspp_cfg *sspp;
+
+	u32 mixer_count;
+	struct dpu_lm_cfg *mixer;
+
+	u32 ds_count;
+	struct dpu_ds_cfg *ds;
+
+	u32 pingpong_count;
+	struct dpu_pingpong_cfg *pingpong;
+
+	u32 cdm_count;
+	struct dpu_cdm_cfg *cdm;
+
+	u32 intf_count;
+	struct dpu_intf_cfg *intf;
+
+	u32 vbif_count;
+	struct dpu_vbif_cfg *vbif;
+
+	u32 reg_dma_count;
+	struct dpu_reg_dma_cfg dma_cfg;
+
+	u32 ad_count;
+
+	/* Add additional block data structures here */
+
+	struct dpu_perf_cfg perf;
+	struct dpu_format_extended *dma_formats;
+	struct dpu_format_extended *cursor_formats;
+	struct dpu_format_extended *vig_formats;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+	u32 hw_rev;
+	void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DS(s) ((s)->ds)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev:       caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+/**
+ * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
+ * @dpu_cfg:      pointer returned from init function
+ */
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+
+/**
+ * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg:          pointer to sspp cfg
+ */
+static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
+{
+	return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
+			 test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
+}
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
new file mode 100644
index 0000000..3c9f028
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+
+static const struct dpu_format_extended plane_formats[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{0, 0},
+};
+
+static const struct dpu_format_extended plane_formats_yuv[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_NV21, 0},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_NV61, 0},
+	{DRM_FORMAT_VYUY, 0},
+	{DRM_FORMAT_UYVY, 0},
+	{DRM_FORMAT_YUYV, 0},
+	{DRM_FORMAT_YVYU, 0},
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_YVU420, 0},
+	{0, 0},
+};
+
+static const struct dpu_format_extended cursor_formats[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{0, 0},
+};
+
+static const struct dpu_format_extended wb2_formats[] = {
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_YUYV, 0},
+
+	{0, 0},
+};
+
+static const struct dpu_format_extended rgb_10bit_formats[] = {
+	{DRM_FORMAT_BGRA1010102, 0},
+	{DRM_FORMAT_BGRX1010102, 0},
+	{DRM_FORMAT_RGBA1010102, 0},
+	{DRM_FORMAT_RGBX1010102, 0},
+	{DRM_FORMAT_ABGR2101010, 0},
+	{DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XBGR2101010, 0},
+	{DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ARGB2101010, 0},
+	{DRM_FORMAT_XRGB2101010, 0},
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 0000000..554874b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE                  0x000
+#define CDM_CSC_10_BASE                    0x004
+
+#define CDM_CDWN2_OP_MODE                  0x100
+#define CDM_CDWN2_CLAMP_OUT                0x104
+#define CDM_CDWN2_PARAMS_3D_0              0x108
+#define CDM_CDWN2_PARAMS_3D_1              0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0         0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1         0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2         0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0        0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1        0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2        0x124
+#define CDM_CDWN2_COEFF_COSITE_V           0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V          0x12C
+#define CDM_CDWN2_OUT_SIZE                 0x130
+
+#define CDM_HDMI_PACK_OP_MODE              0x200
+#define CDM_CSC_10_MATRIX_COEFF_0          0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct dpu_csc_cfg rgb2yuv_cfg = {
+	{
+		0x0083, 0x0102, 0x0032,
+		0x1fb5, 0x1f6c, 0x00e1,
+		0x00e1, 0x1f45, 0x1fdc
+	},
+	{ 0x00, 0x00, 0x00 },
+	{ 0x0040, 0x0200, 0x0200 },
+	{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+	{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->cdm_count; i++) {
+		if (cdm == m->cdm[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->cdm[i].base;
+			b->length = m->cdm[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_CDM;
+			return &m->cdm[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
+		struct dpu_csc_cfg *data)
+{
+	dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+	return 0;
+}
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
+		struct dpu_hw_cdm_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 opmode = 0;
+	u32 out_size = 0;
+
+	if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+		opmode &= ~BIT(7);
+	else
+		opmode |= BIT(7);
+
+	/* ENABLE DWNS_H bit */
+	opmode |= BIT(1);
+
+	switch (cfg->h_cdwn_type) {
+	case CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_H field */
+		opmode &= ~(0x18);
+		/* CLEAR DWNS_H bit */
+		opmode &= ~BIT(1);
+		break;
+	case CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_H field (pixel drop is 0) */
+		opmode &= ~(0x18);
+		break;
+	case CDM_CDWN_AVG:
+		/* Clear METHOD_H field (Average is 0x1) */
+		opmode &= ~(0x18);
+		opmode |= (0x1 << 0x3);
+		break;
+	case CDM_CDWN_COSITE:
+		/* Clear METHOD_H field (Average is 0x2) */
+		opmode &= ~(0x18);
+		opmode |= (0x2 << 0x3);
+		/* Co-site horizontal coefficients */
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+				cosite_h_coeff[0]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+				cosite_h_coeff[1]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+				cosite_h_coeff[2]);
+		break;
+	case CDM_CDWN_OFFSITE:
+		/* Clear METHOD_H field (Average is 0x3) */
+		opmode &= ~(0x18);
+		opmode |= (0x3 << 0x3);
+
+		/* Off-site horizontal coefficients */
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+				offsite_h_coeff[0]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+				offsite_h_coeff[1]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+				offsite_h_coeff[2]);
+		break;
+	default:
+		pr_err("%s invalid horz down sampling type\n", __func__);
+		return -EINVAL;
+	}
+
+	/* ENABLE DWNS_V bit */
+	opmode |= BIT(2);
+
+	switch (cfg->v_cdwn_type) {
+	case CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_V field */
+		opmode &= ~(0x60);
+		/* CLEAR DWNS_V bit */
+		opmode &= ~BIT(2);
+		break;
+	case CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_V field (pixel drop is 0) */
+		opmode &= ~(0x60);
+		break;
+	case CDM_CDWN_AVG:
+		/* Clear METHOD_V field (Average is 0x1) */
+		opmode &= ~(0x60);
+		opmode |= (0x1 << 0x5);
+		break;
+	case CDM_CDWN_COSITE:
+		/* Clear METHOD_V field (Average is 0x2) */
+		opmode &= ~(0x60);
+		opmode |= (0x2 << 0x5);
+		/* Co-site vertical coefficients */
+		DPU_REG_WRITE(c,
+				CDM_CDWN2_COEFF_COSITE_V,
+				cosite_v_coeff[0]);
+		break;
+	case CDM_CDWN_OFFSITE:
+		/* Clear METHOD_V field (Average is 0x3) */
+		opmode &= ~(0x60);
+		opmode |= (0x3 << 0x5);
+
+		/* Off-site vertical coefficients */
+		DPU_REG_WRITE(c,
+				CDM_CDWN2_COEFF_OFFSITE_V,
+				offsite_v_coeff[0]);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+		opmode |= BIT(0); /* EN CDWN module */
+	else
+		opmode &= ~BIT(0);
+
+	out_size = (cfg->output_width & 0xFFFF) |
+		((cfg->output_height & 0xFFFF) << 16);
+	DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+	DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+	DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+			((0x3FF << 16) | 0x0));
+
+	return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
+		struct dpu_hw_cdm_cfg *cdm)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	const struct dpu_format *fmt = cdm->output_fmt;
+	struct cdm_output_cfg cdm_cfg = { 0 };
+	u32 opmode = 0;
+	u32 csc = 0;
+
+	if (!DPU_FORMAT_IS_YUV(fmt))
+		return -EINVAL;
+
+	if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+		if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+			return -EINVAL; /*unsupported format */
+		opmode = BIT(0);
+		opmode |= (fmt->chroma_sample << 1);
+		cdm_cfg.intf_en = true;
+	}
+
+	csc |= BIT(2);
+	csc &= ~BIT(1);
+	csc |= BIT(0);
+
+	if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+	DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+	DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+	return 0;
+}
+
+static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
+{
+	struct cdm_output_cfg cdm_cfg = { 0 };
+
+	if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
+	unsigned long features)
+{
+	ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
+	ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
+	ops->enable = dpu_hw_cdm_enable;
+	ops->disable = dpu_hw_cdm_disable;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m,
+		struct dpu_hw_mdp *hw_mdp)
+{
+	struct dpu_hw_cdm *c;
+	struct dpu_cdm_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _cdm_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_cdm_ops(&c->ops, c->caps->features);
+	c->hw_mdp = hw_mdp;
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	/*
+	 * Perform any default initialization for the chroma down module
+	 * @setup default csc coefficients
+	 */
+	dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
+{
+	if (cdm)
+		dpu_hw_blk_destroy(&cdm->base);
+	kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 0000000..5cceb1e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_cdm;
+
+struct dpu_hw_cdm_cfg {
+	u32 output_width;
+	u32 output_height;
+	u32 output_bit_depth;
+	u32 h_cdwn_type;
+	u32 v_cdwn_type;
+	const struct dpu_format *output_fmt;
+	u32 output_type;
+	int flags;
+};
+
+enum dpu_hw_cdwn_type {
+	CDM_CDWN_DISABLE,
+	CDM_CDWN_PIXEL_DROP,
+	CDM_CDWN_AVG,
+	CDM_CDWN_COSITE,
+	CDM_CDWN_OFFSITE,
+};
+
+enum dpu_hw_cdwn_output_type {
+	CDM_CDWN_OUTPUT_HDMI,
+	CDM_CDWN_OUTPUT_WB,
+};
+
+enum dpu_hw_cdwn_output_bit_depth {
+	CDM_CDWN_OUTPUT_8BIT,
+	CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ *                         Assumption is these functions will be called after
+ *                         clocks are enabled
+ *  @setup_csc:            Programs the csc matrix
+ *  @setup_cdwn:           Sets up the chroma down sub module
+ *  @enable:               Enables the output to interface and programs the
+ *                         output packer
+ *  @disable:              Puts the cdm in bypass mode
+ */
+struct dpu_hw_cdm_ops {
+	/**
+	 * Programs the CSC matrix for conversion from RGB space to YUV space,
+	 * it is optional to call this function as this matrix is automatically
+	 * set during initialization, user should call this if it wants
+	 * to program a different matrix than default matrix.
+	 * @cdm:          Pointer to the chroma down context structure
+	 * @data          Pointer to CSC configuration data
+	 * return:        0 if success; error code otherwise
+	 */
+	int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
+			struct dpu_csc_cfg *data);
+
+	/**
+	 * Programs the Chroma downsample part.
+	 * @cdm         Pointer to chroma down context
+	 */
+	int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
+	struct dpu_hw_cdm_cfg *cfg);
+
+	/**
+	 * Enable the CDM module
+	 * @cdm         Pointer to chroma down context
+	 */
+	int (*enable)(struct dpu_hw_cdm *cdm,
+	struct dpu_hw_cdm_cfg *cfg);
+
+	/**
+	 * Disable the CDM module
+	 * @cdm         Pointer to chroma down context
+	 */
+	void (*disable)(struct dpu_hw_cdm *cdm);
+};
+
+struct dpu_hw_cdm {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* chroma down */
+	const struct dpu_cdm_cfg *caps;
+	enum  dpu_cdm  idx;
+
+	/* mdp top hw driver */
+	struct dpu_hw_mdp *hw_mdp;
+
+	/* ops */
+	struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx:  cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ * @hw_mdp:  pointer to mdp top hw driver object
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m,
+		struct dpu_hw_mdp *hw_mdp);
+
+/**
+ * dpu_hw_cdm_destroy - destroys CDM driver context
+ * @cdm:   pointer to CDM driver context
+ */
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 0000000..06be7cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,540 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define   CTL_LAYER(lm)                 \
+	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT(lm)             \
+	(0x40 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT2(lm)             \
+	(0x70 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT3(lm)             \
+	(0xA0 + (((lm) - LM_0) * 0x004))
+#define   CTL_TOP                       0x014
+#define   CTL_FLUSH                     0x018
+#define   CTL_START                     0x01C
+#define   CTL_PREPARE                   0x0d0
+#define   CTL_SW_RESET                  0x030
+#define   CTL_LAYER_EXTN_OFFSET         0x40
+
+#define CTL_MIXER_BORDER_OUT            BIT(24)
+#define CTL_FLUSH_MASK_CTL              BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US        2000
+
+static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->ctl_count; i++) {
+		if (ctl == m->ctl[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->ctl[i].base;
+			b->length = m->ctl[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_CTL;
+			return &m->ctl[i];
+		}
+	}
+	return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+		enum dpu_lm lm)
+{
+	int i;
+	int stages = -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		if (lm == mixer[i].id) {
+			stages = mixer[i].sblk->maxblendstages;
+			break;
+		}
+	}
+
+	return stages;
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+	ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+		u32 flushbits)
+{
+	ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+	if (!ctx)
+		return 0x0;
+
+	return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+
+	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+	enum dpu_sspp sspp)
+{
+	uint32_t flushbits = 0;
+
+	switch (sspp) {
+	case SSPP_VIG0:
+		flushbits =  BIT(0);
+		break;
+	case SSPP_VIG1:
+		flushbits = BIT(1);
+		break;
+	case SSPP_VIG2:
+		flushbits = BIT(2);
+		break;
+	case SSPP_VIG3:
+		flushbits = BIT(18);
+		break;
+	case SSPP_RGB0:
+		flushbits = BIT(3);
+		break;
+	case SSPP_RGB1:
+		flushbits = BIT(4);
+		break;
+	case SSPP_RGB2:
+		flushbits = BIT(5);
+		break;
+	case SSPP_RGB3:
+		flushbits = BIT(19);
+		break;
+	case SSPP_DMA0:
+		flushbits = BIT(11);
+		break;
+	case SSPP_DMA1:
+		flushbits = BIT(12);
+		break;
+	case SSPP_DMA2:
+		flushbits = BIT(24);
+		break;
+	case SSPP_DMA3:
+		flushbits = BIT(25);
+		break;
+	case SSPP_CURSOR0:
+		flushbits = BIT(22);
+		break;
+	case SSPP_CURSOR1:
+		flushbits = BIT(23);
+		break;
+	default:
+		break;
+	}
+
+	return flushbits;
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+	enum dpu_lm lm)
+{
+	uint32_t flushbits = 0;
+
+	switch (lm) {
+	case LM_0:
+		flushbits = BIT(6);
+		break;
+	case LM_1:
+		flushbits = BIT(7);
+		break;
+	case LM_2:
+		flushbits = BIT(8);
+		break;
+	case LM_3:
+		flushbits = BIT(9);
+		break;
+	case LM_4:
+		flushbits = BIT(10);
+		break;
+	case LM_5:
+		flushbits = BIT(20);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	flushbits |= CTL_FLUSH_MASK_CTL;
+
+	return flushbits;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+		u32 *flushbits, enum dpu_intf intf)
+{
+	switch (intf) {
+	case INTF_0:
+		*flushbits |= BIT(31);
+		break;
+	case INTF_1:
+		*flushbits |= BIT(30);
+		break;
+	case INTF_2:
+		*flushbits |= BIT(29);
+		break;
+	case INTF_3:
+		*flushbits |= BIT(28);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
+		u32 *flushbits, enum dpu_cdm cdm)
+{
+	switch (cdm) {
+	case CDM_0:
+		*flushbits |= BIT(26);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	ktime_t timeout;
+	u32 status;
+
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+
+	/*
+	 * it takes around 30us to have mdp finish resetting its ctl path
+	 * poll every 50us so that reset should be completed at 1st poll
+	 */
+	do {
+		status = DPU_REG_READ(c, CTL_SW_RESET);
+		status &= 0x1;
+		if (status)
+			usleep_range(20, 50);
+	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+	return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 status;
+
+	status = DPU_REG_READ(c, CTL_SW_RESET);
+	status &= 0x01;
+	if (!status)
+		return 0;
+
+	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int i;
+
+	for (i = 0; i < ctx->mixer_count; i++) {
+		DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+		DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+		DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
+		DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+	}
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+	int i, j;
+	u8 stages;
+	int pipes_per_stage;
+
+	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+	if (stages < 0)
+		return;
+
+	if (test_bit(DPU_MIXER_SOURCESPLIT,
+		&ctx->mixer_hw_caps->features))
+		pipes_per_stage = PIPES_PER_STAGE;
+	else
+		pipes_per_stage = 1;
+
+	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+	if (!stage_cfg)
+		goto exit;
+
+	for (i = 0; i <= stages; i++) {
+		/* overflow to ext register if 'i + 1 > 7' */
+		mix = (i + 1) & 0x7;
+		ext = i >= 7;
+
+		for (j = 0 ; j < pipes_per_stage; j++) {
+			enum dpu_sspp_multirect_index rect_index =
+				stage_cfg->multirect_index[i][j];
+
+			switch (stage_cfg->stage[i][j]) {
+			case SSPP_VIG0:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+				} else {
+					mixercfg |= mix << 0;
+					mixercfg_ext |= ext << 0;
+				}
+				break;
+			case SSPP_VIG1:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+				} else {
+					mixercfg |= mix << 3;
+					mixercfg_ext |= ext << 2;
+				}
+				break;
+			case SSPP_VIG2:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+				} else {
+					mixercfg |= mix << 6;
+					mixercfg_ext |= ext << 4;
+				}
+				break;
+			case SSPP_VIG3:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+				} else {
+					mixercfg |= mix << 26;
+					mixercfg_ext |= ext << 6;
+				}
+				break;
+			case SSPP_RGB0:
+				mixercfg |= mix << 9;
+				mixercfg_ext |= ext << 8;
+				break;
+			case SSPP_RGB1:
+				mixercfg |= mix << 12;
+				mixercfg_ext |= ext << 10;
+				break;
+			case SSPP_RGB2:
+				mixercfg |= mix << 15;
+				mixercfg_ext |= ext << 12;
+				break;
+			case SSPP_RGB3:
+				mixercfg |= mix << 29;
+				mixercfg_ext |= ext << 14;
+				break;
+			case SSPP_DMA0:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+				} else {
+					mixercfg |= mix << 18;
+					mixercfg_ext |= ext << 16;
+				}
+				break;
+			case SSPP_DMA1:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+				} else {
+					mixercfg |= mix << 21;
+					mixercfg_ext |= ext << 18;
+				}
+				break;
+			case SSPP_DMA2:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+				} else {
+					mix |= (i + 1) & 0xF;
+					mixercfg_ext2 |= mix << 0;
+				}
+				break;
+			case SSPP_DMA3:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+				} else {
+					mix |= (i + 1) & 0xF;
+					mixercfg_ext2 |= mix << 4;
+				}
+				break;
+			case SSPP_CURSOR0:
+				mixercfg_ext |= ((i + 1) & 0xF) << 20;
+				break;
+			case SSPP_CURSOR1:
+				mixercfg_ext |= ((i + 1) & 0xF) << 26;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+exit:
+	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+		struct dpu_hw_intf_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 intf_cfg = 0;
+
+	intf_cfg |= (cfg->intf & 0xF) << 4;
+
+	if (cfg->mode_3d) {
+		intf_cfg |= BIT(19);
+		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+	}
+
+	switch (cfg->intf_mode_sel) {
+	case DPU_CTL_MODE_SEL_VID:
+		intf_cfg &= ~BIT(17);
+		intf_cfg &= ~(0x3 << 15);
+		break;
+	case DPU_CTL_MODE_SEL_CMD:
+		intf_cfg |= BIT(17);
+		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+		break;
+	default:
+		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+		return;
+	}
+
+	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+		unsigned long cap)
+{
+	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+	ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+	ops->trigger_start = dpu_hw_ctl_trigger_start;
+	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+	ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+	ops->reset = dpu_hw_ctl_reset_control;
+	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
+	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+	ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+	ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_ctl *c;
+	struct dpu_ctl_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _ctl_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		pr_err("failed to create dpu_hw_ctl %d\n", idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->caps = cfg;
+	_setup_ctl_ops(&c->ops, c->caps->features);
+	c->idx = idx;
+	c->mixer_count = m->mixer_count;
+	c->mixer_hw_caps = m->mixer;
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+	if (ctx)
+		dpu_hw_blk_destroy(&ctx->base);
+	kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 0000000..c66a71f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_blk.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID:    Video mode interface
+ * DPU_CTL_MODE_SEL_CMD:    Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+	DPU_CTL_MODE_SEL_VID = 0,
+	DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+	enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+	enum dpu_sspp_multirect_index multirect_index
+					[DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf :                 Interface id
+ * @mode_3d:               3d mux configuration
+ * @intf_mode_sel:         Interface mode, cmd / vid
+ * @stream_sel:            Stream selection for multi-stream interfaces
+ */
+struct dpu_hw_intf_cfg {
+	enum dpu_intf intf;
+	enum dpu_3d_blend_mode mode_3d;
+	enum dpu_ctl_mode_sel intf_mode_sel;
+	int stream_sel;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+	/**
+	 * kickoff hw operation for Sw controlled interfaces
+	 * DSI cmd mode and WB interface are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * kickoff prepare is in progress hw operation for sw
+	 * controlled interfaces: DSI cmd mode and WB interface
+	 * are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Clear the value of the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Query the value of the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * OR in the given flushbits to the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 * @flushbits : module flushmask
+	 */
+	void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+		u32 flushbits);
+
+	/**
+	 * Write the value of the pending_flush_mask to hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Read the value of the flush register
+	 * @ctx       : ctl path ctx pointer
+	 * @Return: value of the ctl flush register.
+	 */
+	u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Setup ctl_path interface config
+	 * @ctx
+	 * @cfg    : interface config structure pointer
+	 */
+	void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+		struct dpu_hw_intf_cfg *cfg);
+
+	int (*reset)(struct dpu_hw_ctl *c);
+
+	/*
+	 * wait_reset_status - checks ctl reset status
+	 * @ctx       : ctl path ctx pointer
+	 *
+	 * This function checks the ctl reset status bit.
+	 * If the reset bit is set, it keeps polling the status till the hw
+	 * reset is complete.
+	 * Returns: 0 on success or -error if reset incomplete within interval
+	 */
+	int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+	uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
+		enum dpu_sspp blk);
+
+	uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
+		enum dpu_lm blk);
+
+	int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
+		u32 *flushbits,
+		enum dpu_intf blk);
+
+	int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
+		u32 *flushbits,
+		enum dpu_cdm blk);
+
+	/**
+	 * Set all blend stages to disabled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Configure layer mixer to pipe configuration
+	 * @ctx       : ctl path ctx pointer
+	 * @lm        : layer mixer enumeration
+	 * @cfg       : blend stage configuration
+	 */
+	void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+		enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* ctl path */
+	int idx;
+	const struct dpu_ctl_cfg *caps;
+	int mixer_count;
+	const struct dpu_lm_cfg *mixer_hw_caps;
+	u32 pending_flush_mask;
+
+	/* ops */
+	struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx:  ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 0000000..c0b7f00
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,1183 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF		0x0
+#define MDP_INTF_0_OFF			0x6A000
+#define MDP_INTF_1_OFF			0x6A800
+#define MDP_INTF_2_OFF			0x6B000
+#define MDP_INTF_3_OFF			0x6B800
+#define MDP_INTF_4_OFF			0x6C000
+#define MDP_AD4_0_OFF			0x7C000
+#define MDP_AD4_1_OFF			0x7D000
+#define MDP_AD4_INTR_EN_OFF		0x41c
+#define MDP_AD4_INTR_CLEAR_OFF		0x424
+#define MDP_AD4_INTR_STATUS_OFF		0x420
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define DPU_INTR_WB_0_DONE BIT(0)
+#define DPU_INTR_WB_1_DONE BIT(1)
+#define DPU_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
+#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
+#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
+#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
+#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_DONE BIT(8)
+#define DPU_INTR_PING_PONG_1_DONE BIT(9)
+#define DPU_INTR_PING_PONG_2_DONE BIT(10)
+#define DPU_INTR_PING_PONG_3_DONE BIT(11)
+#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
+#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
+#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
+#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_0_VSYNC BIT(25)
+#define DPU_INTR_INTF_1_VSYNC BIT(27)
+#define DPU_INTR_INTF_2_VSYNC BIT(29)
+#define DPU_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Ctl start interrupt status bit definitions
+ */
+#define DPU_INTR_CTL_0_START BIT(9)
+#define DPU_INTR_CTL_1_START BIT(10)
+#define DPU_INTR_CTL_2_START BIT(11)
+#define DPU_INTR_CTL_3_START BIT(12)
+#define DPU_INTR_CTL_4_START BIT(13)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
+#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
+#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
+#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
+#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
+#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
+#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
+#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
+#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define DPU_INTR_PROG_LINE BIT(8)
+
+/**
+ * AD4 interrupt status bit definitions
+ */
+#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
+#define DPU_INTR_DARKENH_UPDATED BIT(3)
+#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
+#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off:	offset to CLEAR reg
+ * @en_off:	offset to ENABLE reg
+ * @status_off:	offset to STATUS reg
+ */
+struct dpu_intr_reg {
+	u32 clr_off;
+	u32 en_off;
+	u32 status_off;
+};
+
+/**
+ * struct dpu_irq_type - maps each irq with i/f
+ * @intr_type:		type of interrupt listed in dpu_intr_type
+ * @instance_idx:	instance index of the associated HW block in DPU
+ * @irq_mask:		corresponding bit in the interrupt status reg
+ * @reg_idx:		which reg set to use
+ */
+struct dpu_irq_type {
+	u32 intr_type;
+	u32 instance_idx;
+	u32 irq_mask;
+	u32 reg_idx;
+};
+
+/**
+ * List of DPU interrupt registers
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+	{
+		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+		MDP_SSPP_TOP0_OFF+INTR_EN,
+		MDP_SSPP_TOP0_OFF+INTR_STATUS
+	},
+	{
+		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+		MDP_SSPP_TOP0_OFF+INTR2_EN,
+		MDP_SSPP_TOP0_OFF+INTR2_STATUS
+	},
+	{
+		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+	},
+	{
+		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_0_OFF+INTF_INTR_EN,
+		MDP_INTF_0_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_1_OFF+INTF_INTR_EN,
+		MDP_INTF_1_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_2_OFF+INTF_INTR_EN,
+		MDP_INTF_2_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_3_OFF+INTF_INTR_EN,
+		MDP_INTF_3_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_4_OFF+INTF_INTR_EN,
+		MDP_INTF_4_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+	},
+	{
+		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+	}
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ *                     a matching interface type and instance index.
+ */
+static const struct dpu_irq_type dpu_irq_map[] = {
+	/* BEGIN MAP_RANGE: 0-31, INTR */
+	/* irq_idx: 0-3 */
+	{ DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
+	{ DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
+	/* irq_idx: 4-7 */
+	{ DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
+	/* irq_idx: 8-11 */
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_DONE, 0},
+	/* irq_idx: 12-15 */
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_RD_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_RD_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_RD_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_RD_PTR, 0},
+	/* irq_idx: 16-19 */
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_WR_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_WR_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_WR_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_WR_PTR, 0},
+	/* irq_idx: 20-23 */
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+	/* irq_idx: 24-27 */
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
+	/* irq_idx: 28-31 */
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
+
+	/* BEGIN MAP_RANGE: 32-64, INTR2 */
+	/* irq_idx: 32-35 */
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 36-39 */
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_WR_PTR, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 40 */
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_RD_PTR, 1},
+	/* irq_idx: 41-45 */
+	{ DPU_IRQ_TYPE_CTL_START, CTL_0,
+		DPU_INTR_CTL_0_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_1,
+		DPU_INTR_CTL_1_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_2,
+		DPU_INTR_CTL_2_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_3,
+		DPU_INTR_CTL_3_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_4,
+		DPU_INTR_CTL_4_START, 1},
+	/* irq_idx: 46-47 */
+	{ DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
+	{ DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
+	/* irq_idx: 48-51 */
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+	/* irq_idx: 52-55 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 56-59 */
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
+	/* irq_idx: 60-63 */
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+	/* BEGIN MAP_RANGE: 64-95 HIST */
+	/* irq_idx: 64-67 */
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+		DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 68-71 */
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+		DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 72-75 */
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+		DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+		DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+	/* irq_idx: 76-79 */
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+		DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 80-83 */
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+		DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 84-87 */
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+		DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+		DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+	/* irq_idx: 88-91 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 92-95 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+	/* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+	/* irq_idx: 96-99 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+		DPU_INTR_VIDEO_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
+	/* irq_idx: 100-103 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
+	/* irq_idx: 104-107 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 108-111 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 112-115 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 116-119 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 120-123 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 124-127 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+	/* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+	/* irq_idx: 128-131 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+		DPU_INTR_VIDEO_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
+	/* irq_idx: 132-135 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
+	/* irq_idx: 136-139 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 140-143 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 144-147 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 148-151 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 152-155 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 156-159 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+	/* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+	/* irq_idx: 160-163 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+		DPU_INTR_VIDEO_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
+	/* irq_idx: 164-167 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
+	/* irq_idx: 168-171 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 172-175 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 176-179 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 180-183 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 184-187 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 188-191 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+	/* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+	/* irq_idx: 192-195 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+		DPU_INTR_VIDEO_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
+	/* irq_idx: 196-199 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
+	/* irq_idx: 200-203 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 204-207 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 208-211 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 212-215 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 216-219 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 220-223 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+	/* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+	/* irq_idx: 224-227 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+		DPU_INTR_VIDEO_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
+	/* irq_idx: 228-231 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
+	/* irq_idx: 232-235 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 236-239 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 240-243 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 244-247 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 248-251 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 252-255 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+	/* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
+	/* irq_idx: 256-259 */
+	{ DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 260-263 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 264-267 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 268-271 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 272-275 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 276-279 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 280-283 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 284-287 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+
+	/* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
+	/* irq_idx: 288-291 */
+	{ DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 292-295 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 296-299 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 300-303 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 304-307 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 308-311 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 312-315 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 315-319 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+};
+
+static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
+		u32 instance_idx)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
+		if (intr_type == dpu_irq_map[i].intr_type &&
+			instance_idx == dpu_irq_map[i].instance_idx)
+			return i;
+	}
+
+	pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+			intr_type, instance_idx);
+	return -EINVAL;
+}
+
+static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
+		uint32_t mask)
+{
+	if (!intr)
+		return;
+
+	DPU_REG_WRITE(&intr->hw, reg_off, mask);
+
+	/* ensure register writes go through */
+	wmb();
+}
+
+static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
+		void (*cbfunc)(void *, int),
+		void *arg)
+{
+	int reg_idx;
+	int irq_idx;
+	int start_idx;
+	int end_idx;
+	u32 irq_status;
+	unsigned long irq_flags;
+
+	if (!intr)
+		return;
+
+	/*
+	 * The dispatcher will save the IRQ status before calling here.
+	 * Now need to go through each IRQ status and find matching
+	 * irq lookup index.
+	 */
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+		irq_status = intr->save_irq_status[reg_idx];
+
+		/*
+		 * Each Interrupt register has a range of 32 indexes, and
+		 * that is static for dpu_irq_map.
+		 */
+		start_idx = reg_idx * 32;
+		end_idx = start_idx + 32;
+
+		if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
+				end_idx > ARRAY_SIZE(dpu_irq_map))
+			continue;
+
+		/*
+		 * Search through matching intr status from irq map.
+		 * start_idx and end_idx defined the search range in
+		 * the dpu_irq_map.
+		 */
+		for (irq_idx = start_idx;
+				(irq_idx < end_idx) && irq_status;
+				irq_idx++)
+			if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
+				(dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+				/*
+				 * Once a match on irq mask, perform a callback
+				 * to the given cbfunc. cbfunc will take care
+				 * the interrupt status clearing. If cbfunc is
+				 * not provided, then the interrupt clearing
+				 * is here.
+				 */
+				if (cbfunc)
+					cbfunc(arg, irq_idx);
+				else
+					intr->ops.clear_intr_status_nolock(
+							intr, irq_idx);
+
+				/*
+				 * When callback finish, clear the irq_status
+				 * with the matching mask. Once irq_status
+				 * is all cleared, the search can be stopped.
+				 */
+				irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
+			}
+	}
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+	const struct dpu_intr_reg *reg;
+	const struct dpu_irq_type *irq;
+	const char *dbgstr = NULL;
+	uint32_t cache_irq_mask;
+
+	if (!intr)
+		return -EINVAL;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	irq = &dpu_irq_map[irq_idx];
+	reg_idx = irq->reg_idx;
+	reg = &dpu_intr_set[reg_idx];
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	cache_irq_mask = intr->cache_irq_mask[reg_idx];
+	if (cache_irq_mask & irq->irq_mask) {
+		dbgstr = "DPU IRQ already set:";
+	} else {
+		dbgstr = "DPU IRQ enabled:";
+
+		cache_irq_mask |= irq->irq_mask;
+		/* Cleaning any pending interrupt */
+		DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+		/* Enabling interrupts with the new mask */
+		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+		/* ensure register write goes through */
+		wmb();
+
+		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+	}
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+			irq->irq_mask, cache_irq_mask);
+
+	return 0;
+}
+
+static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+{
+	int reg_idx;
+	const struct dpu_intr_reg *reg;
+	const struct dpu_irq_type *irq;
+	const char *dbgstr = NULL;
+	uint32_t cache_irq_mask;
+
+	if (!intr)
+		return -EINVAL;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	irq = &dpu_irq_map[irq_idx];
+	reg_idx = irq->reg_idx;
+	reg = &dpu_intr_set[reg_idx];
+
+	cache_irq_mask = intr->cache_irq_mask[reg_idx];
+	if ((cache_irq_mask & irq->irq_mask) == 0) {
+		dbgstr = "DPU IRQ is already cleared:";
+	} else {
+		dbgstr = "DPU IRQ mask disable:";
+
+		cache_irq_mask &= ~irq->irq_mask;
+		/* Disable interrupts based on the new mask */
+		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+		/* Cleaning any pending interrupt */
+		DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+		/* ensure register write goes through */
+		wmb();
+
+		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+	}
+
+	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+			irq->irq_mask, cache_irq_mask);
+
+	return 0;
+}
+
+static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+	unsigned long irq_flags;
+
+	if (!intr)
+		return -EINVAL;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+	return 0;
+}
+
+static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+{
+	int i;
+
+	if (!intr)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+
+	/* ensure register writes go through */
+	wmb();
+
+	return 0;
+}
+
+static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+{
+	int i;
+
+	if (!intr)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+
+	/* ensure register writes go through */
+	wmb();
+
+	return 0;
+}
+
+static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
+		uint32_t *mask)
+{
+	if (!intr || !mask)
+		return -EINVAL;
+
+	*mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+		| IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
+	return 0;
+}
+
+static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
+{
+	int i;
+	u32 enable_mask;
+	unsigned long irq_flags;
+
+	if (!intr)
+		return;
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+		/* Read interrupt status */
+		intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
+				dpu_intr_set[i].status_off);
+
+		/* Read enable mask */
+		enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
+
+		/* and clear the interrupt */
+		if (intr->save_irq_status[i])
+			DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
+					intr->save_irq_status[i]);
+
+		/* Finally update IRQ status based on enable mask */
+		intr->save_irq_status[i] &= enable_mask;
+	}
+
+	/* ensure register writes go through */
+	wmb();
+
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+		int irq_idx)
+{
+	int reg_idx;
+
+	if (!intr)
+		return;
+
+	reg_idx = dpu_irq_map[irq_idx].reg_idx;
+	DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+			dpu_irq_map[irq_idx].irq_mask);
+
+	/* ensure register writes go through */
+	wmb();
+}
+
+static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
+		int irq_idx)
+{
+	unsigned long irq_flags;
+
+	if (!intr)
+		return;
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
+		int irq_idx, bool clear)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+	u32 intr_status;
+
+	if (!intr)
+		return 0;
+
+	if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return 0;
+	}
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+	reg_idx = dpu_irq_map[irq_idx].reg_idx;
+	intr_status = DPU_REG_READ(&intr->hw,
+			dpu_intr_set[reg_idx].status_off) &
+					dpu_irq_map[irq_idx].irq_mask;
+	if (intr_status && clear)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+				intr_status);
+
+	/* ensure register writes go through */
+	wmb();
+
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+	return intr_status;
+}
+
+static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
+{
+	ops->set_mask = dpu_hw_intr_set_mask;
+	ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
+	ops->enable_irq = dpu_hw_intr_enable_irq;
+	ops->disable_irq = dpu_hw_intr_disable_irq;
+	ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
+	ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
+	ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
+	ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
+	ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
+	ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
+	ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
+	ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+}
+
+static void __intr_offset(struct dpu_mdss_cfg *m,
+		void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+	hw->base_off = addr;
+	hw->blk_off = m->mdp[0].base;
+	hw->hwversion = m->hwversion;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_intr *intr;
+
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
+	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+	if (!intr)
+		return ERR_PTR(-ENOMEM);
+
+	__intr_offset(m, addr, &intr->hw);
+	__setup_intr_ops(&intr->ops);
+
+	intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+
+	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+			GFP_KERNEL);
+	if (intr->cache_irq_mask == NULL) {
+		kfree(intr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+			GFP_KERNEL);
+	if (intr->save_irq_status == NULL) {
+		kfree(intr->cache_irq_mask);
+		kfree(intr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&intr->irq_lock);
+
+	return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+	if (intr) {
+		kfree(intr->cache_irq_mask);
+		kfree(intr->save_irq_status);
+		kfree(intr);
+	}
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 0000000..61e4cba
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP		BIT(0)
+#define IRQ_SOURCE_DSI0		BIT(4)
+#define IRQ_SOURCE_DSI1		BIT(5)
+#define IRQ_SOURCE_HDMI		BIT(8)
+#define IRQ_SOURCE_EDP		BIT(12)
+#define IRQ_SOURCE_MHL		BIT(16)
+
+/**
+ * dpu_intr_type - HW Interrupt Type
+ * @DPU_IRQ_TYPE_WB_ROT_COMP:		WB rotator done
+ * @DPU_IRQ_TYPE_WB_WFD_COMP:		WB WFD done
+ * @DPU_IRQ_TYPE_PING_PONG_COMP:	PingPong done
+ * @DPU_IRQ_TYPE_PING_PONG_RD_PTR:	PingPong read pointer
+ * @DPU_IRQ_TYPE_PING_PONG_WR_PTR:	PingPong write pointer
+ * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF:	PingPong auto refresh
+ * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK:	PingPong Tear check
+ * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK:	PingPong TE detection
+ * @DPU_IRQ_TYPE_INTF_UNDER_RUN:	INTF underrun
+ * @DPU_IRQ_TYPE_INTF_VSYNC:		INTF VSYNC
+ * @DPU_IRQ_TYPE_CWB_OVERFLOW:		Concurrent WB overflow
+ * @DPU_IRQ_TYPE_HIST_VIG_DONE:		VIG Histogram done
+ * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ:	VIG Histogram reset
+ * @DPU_IRQ_TYPE_HIST_DSPP_DONE:	DSPP Histogram done
+ * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ:	DSPP Histogram reset
+ * @DPU_IRQ_TYPE_WD_TIMER:		Watchdog timer
+ * @DPU_IRQ_TYPE_SFI_VIDEO_IN:		Video static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_VIDEO_OUT:		Video static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_IN:		DSI CMD0 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_OUT:		DSI CMD0 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_IN:		DSI CMD1 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_OUT:		DSI CMD1 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_IN:		DSI CMD2 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_OUT:		DSI CMD2 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_PROG_LINE:		Programmable Line interrupt
+ * @DPU_IRQ_TYPE_AD4_BL_DONE:		AD4 backlight
+ * @DPU_IRQ_TYPE_CTL_START:		Control start
+ * @DPU_IRQ_TYPE_RESERVED:		Reserved for expansion
+ */
+enum dpu_intr_type {
+	DPU_IRQ_TYPE_WB_ROT_COMP,
+	DPU_IRQ_TYPE_WB_WFD_COMP,
+	DPU_IRQ_TYPE_PING_PONG_COMP,
+	DPU_IRQ_TYPE_PING_PONG_RD_PTR,
+	DPU_IRQ_TYPE_PING_PONG_WR_PTR,
+	DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
+	DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+	DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
+	DPU_IRQ_TYPE_INTF_UNDER_RUN,
+	DPU_IRQ_TYPE_INTF_VSYNC,
+	DPU_IRQ_TYPE_CWB_OVERFLOW,
+	DPU_IRQ_TYPE_HIST_VIG_DONE,
+	DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
+	DPU_IRQ_TYPE_HIST_DSPP_DONE,
+	DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+	DPU_IRQ_TYPE_WD_TIMER,
+	DPU_IRQ_TYPE_SFI_VIDEO_IN,
+	DPU_IRQ_TYPE_SFI_VIDEO_OUT,
+	DPU_IRQ_TYPE_SFI_CMD_0_IN,
+	DPU_IRQ_TYPE_SFI_CMD_0_OUT,
+	DPU_IRQ_TYPE_SFI_CMD_1_IN,
+	DPU_IRQ_TYPE_SFI_CMD_1_OUT,
+	DPU_IRQ_TYPE_SFI_CMD_2_IN,
+	DPU_IRQ_TYPE_SFI_CMD_2_OUT,
+	DPU_IRQ_TYPE_PROG_LINE,
+	DPU_IRQ_TYPE_AD4_BL_DONE,
+	DPU_IRQ_TYPE_CTL_START,
+	DPU_IRQ_TYPE_RESERVED,
+};
+
+struct dpu_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct dpu_hw_intr_ops {
+	/**
+	 * set_mask - Programs the given interrupt register with the
+	 *            given interrupt mask. Register value will get overwritten.
+	 * @intr:	HW interrupt handle
+	 * @reg_off:	MDSS HW register offset
+	 * @irqmask:	IRQ mask value
+	 */
+	void (*set_mask)(
+			struct dpu_hw_intr *intr,
+			uint32_t reg,
+			uint32_t irqmask);
+
+	/**
+	 * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+	 *                 Used for all irq related ops
+	 * @intr_type:		Interrupt type defined in dpu_intr_type
+	 * @instance_idx:	HW interrupt block instance
+	 * @return:		irq_idx or -EINVAL for lookup fail
+	 */
+	int (*irq_idx_lookup)(
+			enum dpu_intr_type intr_type,
+			u32 instance_idx);
+
+	/**
+	 * enable_irq - Enable IRQ based on lookup IRQ index
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*enable_irq)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * disable_irq - Disable IRQ based on lookup IRQ index
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*disable_irq)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+	 *                  any asserted IRQs). Useful during reset.
+	 * @intr:	HW interrupt handle
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*clear_all_irqs)(
+			struct dpu_hw_intr *intr);
+
+	/**
+	 * disable_all_irqs - Disables all the interrupts. Useful during reset.
+	 * @intr:	HW interrupt handle
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*disable_all_irqs)(
+			struct dpu_hw_intr *intr);
+
+	/**
+	 * dispatch_irqs - IRQ dispatcher will call the given callback
+	 *                 function when a matching interrupt status bit is
+	 *                 found in the irq mapping table.
+	 * @intr:	HW interrupt handle
+	 * @cbfunc:	Callback function pointer
+	 * @arg:	Argument to pass back during callback
+	 */
+	void (*dispatch_irqs)(
+			struct dpu_hw_intr *intr,
+			void (*cbfunc)(void *arg, int irq_idx),
+			void *arg);
+
+	/**
+	 * get_interrupt_statuses - Gets and store value from all interrupt
+	 *                          status registers that are currently fired.
+	 * @intr:	HW interrupt handle
+	 */
+	void (*get_interrupt_statuses)(
+			struct dpu_hw_intr *intr);
+
+	/**
+	 * clear_interrupt_status - Clears HW interrupt status based on given
+	 *                          lookup IRQ index.
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 */
+	void (*clear_interrupt_status)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * clear_intr_status_nolock() - clears the HW interrupts without lock
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 */
+	void (*clear_intr_status_nolock)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * get_interrupt_status - Gets HW interrupt status, and clear if set,
+	 *                        based on given lookup IRQ index.
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @clear:	True to clear irq after read
+	 */
+	u32 (*get_interrupt_status)(
+			struct dpu_hw_intr *intr,
+			int irq_idx,
+			bool clear);
+
+	/**
+	 * get_valid_interrupts - Gets a mask of all valid interrupt sources
+	 *                        within DPU. These are actually status bits
+	 *                        within interrupt registers that specify the
+	 *                        source of the interrupt in IRQs. For example,
+	 *                        valid interrupt sources can be MDP, DSI,
+	 *                        HDMI etc.
+	 * @intr:	HW interrupt handle
+	 * @mask:	Returning the interrupt source MASK
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*get_valid_interrupts)(
+			struct dpu_hw_intr *intr,
+			uint32_t *mask);
+};
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw:               virtual address mapping
+ * @ops:              function pointer mapping for IRQ handling
+ * @cache_irq_mask:   array of IRQ enable masks reg storage created during init
+ * @save_irq_status:  array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock:         spinlock for accessing IRQ resources
+ */
+struct dpu_hw_intr {
+	struct dpu_hw_blk_reg_map hw;
+	struct dpu_hw_intr_ops ops;
+	u32 *cache_irq_mask;
+	u32 *save_irq_status;
+	u32 irq_idx_tbl_size;
+	spinlock_t irq_lock;
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 0000000..d280df5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN           0x000
+#define INTF_CONFIG                     0x004
+#define INTF_HSYNC_CTL                  0x008
+#define INTF_VSYNC_PERIOD_F0            0x00C
+#define INTF_VSYNC_PERIOD_F1            0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0       0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1       0x018
+#define INTF_DISPLAY_V_START_F0         0x01C
+#define INTF_DISPLAY_V_START_F1         0x020
+#define INTF_DISPLAY_V_END_F0           0x024
+#define INTF_DISPLAY_V_END_F1           0x028
+#define INTF_ACTIVE_V_START_F0          0x02C
+#define INTF_ACTIVE_V_START_F1          0x030
+#define INTF_ACTIVE_V_END_F0            0x034
+#define INTF_ACTIVE_V_END_F1            0x038
+#define INTF_DISPLAY_HCTL               0x03C
+#define INTF_ACTIVE_HCTL                0x040
+#define INTF_BORDER_COLOR               0x044
+#define INTF_UNDERFLOW_COLOR            0x048
+#define INTF_HSYNC_SKEW                 0x04C
+#define INTF_POLARITY_CTL               0x050
+#define INTF_TEST_CTL                   0x054
+#define INTF_TP_COLOR0                  0x058
+#define INTF_TP_COLOR1                  0x05C
+#define INTF_FRAME_LINE_COUNT_EN        0x0A8
+#define INTF_FRAME_COUNT                0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define   INTF_DEFLICKER_CONFIG         0x0F0
+#define   INTF_DEFLICKER_STRNG_COEFF    0x0F4
+#define   INTF_DEFLICKER_WEAK_COEFF     0x0F8
+
+#define   INTF_DSI_CMD_MODE_TRIGGER_EN  0x084
+#define   INTF_PANEL_FORMAT             0x090
+#define   INTF_TPG_ENABLE               0x100
+#define   INTF_TPG_MAIN_CONTROL         0x104
+#define   INTF_TPG_VIDEO_CONFIG         0x108
+#define   INTF_TPG_COMPONENT_LIMITS     0x10C
+#define   INTF_TPG_RECTANGLE            0x110
+#define   INTF_TPG_INITIAL_VALUE        0x114
+#define   INTF_TPG_BLK_WHITE_PATTERN_FRAMES   0x118
+#define   INTF_TPG_RGB_MAPPING          0x11C
+#define   INTF_PROG_FETCH_START         0x170
+#define   INTF_PROG_ROT_START           0x174
+
+#define   INTF_FRAME_LINE_COUNT_EN      0x0A8
+#define   INTF_FRAME_COUNT              0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define INTF_MISR_CTRL			0x180
+#define INTF_MISR_SIGNATURE		0x184
+
+static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->intf_count; i++) {
+		if ((intf == m->intf[i].id) &&
+		(m->intf[i].type != INTF_NONE)) {
+			b->base_off = addr;
+			b->blk_off = m->intf[i].base;
+			b->length = m->intf[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_INTF;
+			return &m->intf[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+		const struct intf_timing_params *p,
+		const struct dpu_format *fmt)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 hsync_period, vsync_period;
+	u32 display_v_start, display_v_end;
+	u32 hsync_start_x, hsync_end_x;
+	u32 active_h_start, active_h_end;
+	u32 active_v_start, active_v_end;
+	u32 active_hctl, display_hctl, hsync_ctl;
+	u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+	u32 panel_format;
+	u32 intf_cfg;
+
+	/* read interface_cfg */
+	intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+	hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+	p->h_front_porch;
+	vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+	p->v_front_porch;
+
+	display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+	hsync_period) + p->hsync_skew;
+	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+	p->hsync_skew - 1;
+
+	if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+		display_v_start += p->hsync_pulse_width + p->h_back_porch;
+		display_v_end -= p->h_front_porch;
+	}
+
+	hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+	hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+	if (p->width != p->xres) {
+		active_h_start = hsync_start_x;
+		active_h_end = active_h_start + p->xres - 1;
+	} else {
+		active_h_start = 0;
+		active_h_end = 0;
+	}
+
+	if (p->height != p->yres) {
+		active_v_start = display_v_start;
+		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+	if (active_h_end) {
+		active_hctl = (active_h_end << 16) | active_h_start;
+		intf_cfg |= BIT(29);	/* ACTIVE_H_ENABLE */
+	} else {
+		active_hctl = 0;
+	}
+
+	if (active_v_end)
+		intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	den_polarity = 0;
+	if (ctx->cap->type == INTF_HDMI) {
+		hsync_polarity = p->yres >= 720 ? 0 : 1;
+		vsync_polarity = p->yres >= 720 ? 0 : 1;
+	} else {
+		hsync_polarity = 0;
+		vsync_polarity = 0;
+	}
+	polarity_ctl = (den_polarity << 2) | /*  DEN Polarity  */
+		(vsync_polarity << 1) | /* VSYNC Polarity */
+		(hsync_polarity << 0);  /* HSYNC Polarity */
+
+	if (!DPU_FORMAT_IS_YUV(fmt))
+		panel_format = (fmt->bits[C0_G_Y] |
+				(fmt->bits[C1_B_Cb] << 2) |
+				(fmt->bits[C2_R_Cr] << 4) |
+				(0x21 << 8));
+	else
+		/* Interface treats all the pixel data in RGB888 format */
+		panel_format = (COLOR_8BIT |
+				(COLOR_8BIT << 2) |
+				(COLOR_8BIT << 4) |
+				(0x21 << 8));
+
+	DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+	DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+	DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+			p->vsync_pulse_width * hsync_period);
+	DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+	DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+	DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+	DPU_REG_WRITE(c, INTF_ACTIVE_HCTL,  active_hctl);
+	DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+	DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+	DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+	DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+	DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+	DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+	DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+	DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+	DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+		struct dpu_hw_intf *intf,
+		u8 enable)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+	/* Note: Display interface select is handled in top block hw layer */
+	DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+		struct dpu_hw_intf *intf,
+		const struct intf_prog_fetch *fetch)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+	int fetch_enable;
+
+	/*
+	 * Fetch should always be outside the active lines. If the fetching
+	 * is programmed within active region, hardware behavior is unknown.
+	 */
+
+	fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+	if (fetch->enable) {
+		fetch_enable |= BIT(31);
+		DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+				fetch->fetch_start);
+	} else {
+		fetch_enable &= ~BIT(31);
+	}
+
+	DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_get_status(
+		struct dpu_hw_intf *intf,
+		struct intf_status *s)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+	s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+	if (s->is_en) {
+		s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+		s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+	} else {
+		s->line_count = 0;
+		s->frame_count = 0;
+	}
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
+						bool enable, u32 frame_count)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+	u32 config = 0;
+
+	DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+	/* clear misr data */
+	wmb();
+
+	if (enable)
+		config = (frame_count & MISR_FRAME_COUNT_MASK) |
+			MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+	DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+	return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+	struct dpu_hw_blk_reg_map *c;
+
+	if (!intf)
+		return 0;
+
+	c = &intf->hw;
+
+	return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+	ops->setup_prg_fetch  = dpu_hw_intf_setup_prg_fetch;
+	ops->get_status = dpu_hw_intf_get_status;
+	ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+	ops->setup_misr = dpu_hw_intf_setup_misr;
+	ops->collect_misr = dpu_hw_intf_collect_misr;
+	ops->get_line_count = dpu_hw_intf_get_line_count;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_intf *c;
+	struct dpu_intf_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _intf_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		pr_err("failed to create dpu_hw_intf %d\n", idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	c->idx = idx;
+	c->cap = cfg;
+	c->mdss = m;
+	_setup_intf_ops(&c->ops, c->cap->features);
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+	if (intf)
+		dpu_hw_blk_destroy(&intf->base);
+	kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 0000000..a79d735
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+	u32 width;		/* active width */
+	u32 height;		/* active height */
+	u32 xres;		/* Display panel width */
+	u32 yres;		/* Display panel height */
+
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 hsync_pulse_width;
+	u32 vsync_pulse_width;
+	u32 hsync_polarity;
+	u32 vsync_polarity;
+	u32 border_clr;
+	u32 underflow_clr;
+	u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+	u8 enable;
+	/* vsync counter for the front porch pixel line */
+	u32 fetch_start;
+};
+
+struct intf_status {
+	u8 is_en;		/* interface timing engine is enabled or not */
+	u32 frame_count;	/* frame count since timing engine enabled */
+	u32 line_count;		/* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
+ */
+struct dpu_hw_intf_ops {
+	void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+			const struct intf_timing_params *p,
+			const struct dpu_format *fmt);
+
+	void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+			const struct intf_prog_fetch *fetch);
+
+	void (*enable_timing)(struct dpu_hw_intf *intf,
+			u8 enable);
+
+	void (*get_status)(struct dpu_hw_intf *intf,
+			struct intf_status *status);
+
+	void (*setup_misr)(struct dpu_hw_intf *intf,
+			bool enable, u32 frame_count);
+
+	u32 (*collect_misr)(struct dpu_hw_intf *intf);
+
+	u32 (*get_line_count)(struct dpu_hw_intf *intf);
+};
+
+struct dpu_hw_intf {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* intf */
+	enum dpu_intf idx;
+	const struct dpu_intf_cfg *cap;
+	const struct dpu_mdss_cfg *mdss;
+
+	/* ops */
+	struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx:  interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf:   Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 0000000..4ab72b0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define LM_OP_MODE                        0x00
+#define LM_OUT_SIZE                       0x04
+#define LM_BORDER_COLOR_0                 0x08
+#define LM_BORDER_COLOR_1                 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP                     0x00
+#define LM_BLEND0_CONST_ALPHA            0x04
+#define LM_FG_COLOR_FILL_COLOR_0         0x08
+#define LM_FG_COLOR_FILL_COLOR_1         0x0C
+#define LM_FG_COLOR_FILL_SIZE            0x10
+#define LM_FG_COLOR_FILL_XY              0x14
+
+#define LM_BLEND0_FG_ALPHA               0x04
+#define LM_BLEND0_BG_ALPHA               0x08
+
+#define LM_MISR_CTRL			0x310
+#define LM_MISR_SIGNATURE		0x314
+
+static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->mixer_count; i++) {
+		if (mixer == m->mixer[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->mixer[i].base;
+			b->length = m->mixer[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_LM;
+			return &m->mixer[i];
+		}
+	}
+
+	return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c:     mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+	const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+	int rc;
+
+	if (stage == DPU_STAGE_BASE)
+		rc = -EINVAL;
+	else if (stage <= sblk->maxblendstages)
+		rc = sblk->blendstage_base[stage - DPU_STAGE_0];
+	else
+		rc = -EINVAL;
+
+	return rc;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+		struct dpu_hw_mixer_cfg *mixer)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 outsize;
+	u32 op_mode;
+
+	op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+	outsize = mixer->out_height << 16 | mixer->out_width;
+	DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+	/* SPLIT_LEFT_RIGHT */
+	if (mixer->right_mixer)
+		op_mode |= BIT(31);
+	else
+		op_mode &= ~BIT(31);
+	DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+		struct dpu_mdss_color *color,
+		u8 border_en)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	if (border_en) {
+		DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+			(color->color_0 & 0xFFF) |
+			((color->color_1 & 0xFFF) << 0x10));
+		DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+			(color->color_2 & 0xFFF) |
+			((color->color_3 & 0xFFF) << 0x10));
+	}
+}
+
+static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int stage_off;
+	u32 const_alpha;
+
+	if (stage == DPU_STAGE_BASE)
+		return;
+
+	stage_off = _stage_offset(ctx, stage);
+	if (WARN_ON(stage_off < 0))
+		return;
+
+	const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+	DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+	DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int stage_off;
+
+	if (stage == DPU_STAGE_BASE)
+		return;
+
+	stage_off = _stage_offset(ctx, stage);
+	if (WARN_ON(stage_off < 0))
+		return;
+
+	DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+	DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+	DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+	uint32_t mixer_op_mode)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int op_mode;
+
+	/* read the existing op_mode configuration */
+	op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+	op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+	DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
+			void *cfg)
+{
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
+				bool enable, u32 frame_count)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 config = 0;
+
+	DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+	/* clear misr data */
+	wmb();
+
+	if (enable)
+		config = (frame_count & MISR_FRAME_COUNT_MASK) |
+			MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+	DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	return DPU_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
+static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+		struct dpu_hw_lm_ops *ops,
+		unsigned long features)
+{
+	ops->setup_mixer_out = dpu_hw_lm_setup_out;
+	if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+		ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+	else
+		ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+	ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+	ops->setup_border_color = dpu_hw_lm_setup_border_color;
+	ops->setup_gc = dpu_hw_lm_gc;
+	ops->setup_misr = dpu_hw_lm_setup_misr;
+	ops->collect_misr = dpu_hw_lm_collect_misr;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_mixer *c;
+	struct dpu_lm_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _lm_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	c->idx = idx;
+	c->cap = cfg;
+	_setup_mixer_ops(m, &c->ops, c->cap->features);
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+	if (lm)
+		dpu_hw_blk_destroy(&lm->base);
+	kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 0000000..e29e5da
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+	u32 out_width;
+	u32 out_height;
+	bool right_mixer;
+	int flags;
+};
+
+struct dpu_hw_color3_cfg {
+	u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+	/*
+	 * Sets up mixer output width and height
+	 * and border color if enabled
+	 */
+	void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+		struct dpu_hw_mixer_cfg *cfg);
+
+	/*
+	 * Alpha blending configuration
+	 * for the specified stage
+	 */
+	void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+		uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+	/*
+	 * Alpha color component selection from either fg or bg
+	 */
+	void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+	/**
+	 * setup_border_color : enable/disable border color
+	 */
+	void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+		struct dpu_mdss_color *color,
+		u8 border_en);
+	/**
+	 * setup_gc : enable/disable gamma correction feature
+	 */
+	void (*setup_gc)(struct dpu_hw_mixer *mixer,
+			void *cfg);
+
+	/* setup_misr: enables/disables MISR in HW register */
+	void (*setup_misr)(struct dpu_hw_mixer *ctx,
+			bool enable, u32 frame_count);
+
+	/* collect_misr: reads and stores MISR data from HW register */
+	u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
+};
+
+struct dpu_hw_mixer {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* lm */
+	enum dpu_lm  idx;
+	const struct dpu_lm_cfg   *cap;
+	const struct dpu_mdp_cfg  *mdp;
+	const struct dpu_ctl_cfg  *ctl;
+
+	/* ops */
+	struct dpu_hw_lm_ops ops;
+
+	/* store mixer info specific to display */
+	struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx:  mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm:   Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 0000000..35e6bf9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,465 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME			"dpu"
+
+#define DPU_NONE                        0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE	9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE		6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE		3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES			4
+#endif
+
+#define PIPES_PER_STAGE			2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES		3
+#endif
+
+enum dpu_format_flags {
+	DPU_FORMAT_FLAG_YUV_BIT,
+	DPU_FORMAT_FLAG_DX_BIT,
+	DPU_FORMAT_FLAG_COMPRESSED_BIT,
+	DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV		BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX		BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED	BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X)		\
+	(test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X)		\
+	(test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X)		((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+	(((X)->fetch_mode == DPU_FETCH_UBWC) && \
+			!test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+	(((X)->fetch_mode == DPU_FETCH_UBWC) && \
+			test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST	(0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST	(1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL	(2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL	(3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA		(1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA		(1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA	(1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN		(1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST	(0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST	(1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL	(2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL	(3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA		(1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA		(1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA	(1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN		(1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO		0
+#define DPU_VSYNC1_SOURCE_GPIO		1
+#define DPU_VSYNC2_SOURCE_GPIO		2
+#define DPU_VSYNC_SOURCE_INTF_0		3
+#define DPU_VSYNC_SOURCE_INTF_1		4
+#define DPU_VSYNC_SOURCE_INTF_2		5
+#define DPU_VSYNC_SOURCE_INTF_3		6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4	11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3	12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2	13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1	14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0	15
+
+enum dpu_hw_blk_type {
+	DPU_HW_BLK_TOP = 0,
+	DPU_HW_BLK_SSPP,
+	DPU_HW_BLK_LM,
+	DPU_HW_BLK_CTL,
+	DPU_HW_BLK_CDM,
+	DPU_HW_BLK_PINGPONG,
+	DPU_HW_BLK_INTF,
+	DPU_HW_BLK_WB,
+	DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+	MDP_TOP = 0x1,
+	MDP_MAX,
+};
+
+enum dpu_sspp {
+	SSPP_NONE,
+	SSPP_VIG0,
+	SSPP_VIG1,
+	SSPP_VIG2,
+	SSPP_VIG3,
+	SSPP_RGB0,
+	SSPP_RGB1,
+	SSPP_RGB2,
+	SSPP_RGB3,
+	SSPP_DMA0,
+	SSPP_DMA1,
+	SSPP_DMA2,
+	SSPP_DMA3,
+	SSPP_CURSOR0,
+	SSPP_CURSOR1,
+	SSPP_MAX
+};
+
+enum dpu_sspp_type {
+	SSPP_TYPE_VIG,
+	SSPP_TYPE_RGB,
+	SSPP_TYPE_DMA,
+	SSPP_TYPE_CURSOR,
+	SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+	LM_0 = 1,
+	LM_1,
+	LM_2,
+	LM_3,
+	LM_4,
+	LM_5,
+	LM_6,
+	LM_MAX
+};
+
+enum dpu_stage {
+	DPU_STAGE_BASE = 0,
+	DPU_STAGE_0,
+	DPU_STAGE_1,
+	DPU_STAGE_2,
+	DPU_STAGE_3,
+	DPU_STAGE_4,
+	DPU_STAGE_5,
+	DPU_STAGE_6,
+	DPU_STAGE_7,
+	DPU_STAGE_8,
+	DPU_STAGE_9,
+	DPU_STAGE_10,
+	DPU_STAGE_MAX
+};
+enum dpu_dspp {
+	DSPP_0 = 1,
+	DSPP_1,
+	DSPP_2,
+	DSPP_3,
+	DSPP_MAX
+};
+
+enum dpu_ds {
+	DS_TOP,
+	DS_0,
+	DS_1,
+	DS_MAX
+};
+
+enum dpu_ctl {
+	CTL_0 = 1,
+	CTL_1,
+	CTL_2,
+	CTL_3,
+	CTL_4,
+	CTL_MAX
+};
+
+enum dpu_cdm {
+	CDM_0 = 1,
+	CDM_1,
+	CDM_MAX
+};
+
+enum dpu_pingpong {
+	PINGPONG_0 = 1,
+	PINGPONG_1,
+	PINGPONG_2,
+	PINGPONG_3,
+	PINGPONG_4,
+	PINGPONG_S0,
+	PINGPONG_MAX
+};
+
+enum dpu_intf {
+	INTF_0 = 1,
+	INTF_1,
+	INTF_2,
+	INTF_3,
+	INTF_4,
+	INTF_5,
+	INTF_6,
+	INTF_MAX
+};
+
+enum dpu_intf_type {
+	INTF_NONE = 0x0,
+	INTF_DSI = 0x1,
+	INTF_HDMI = 0x3,
+	INTF_LCDC = 0x5,
+	INTF_EDP = 0x9,
+	INTF_DP = 0xa,
+	INTF_TYPE_MAX,
+
+	/* virtual interfaces */
+	INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+	INTF_MODE_NONE = 0,
+	INTF_MODE_CMD,
+	INTF_MODE_VIDEO,
+	INTF_MODE_WB_BLOCK,
+	INTF_MODE_WB_LINE,
+	INTF_MODE_MAX
+};
+
+enum dpu_wb {
+	WB_0 = 1,
+	WB_1,
+	WB_2,
+	WB_3,
+	WB_MAX
+};
+
+enum dpu_ad {
+	AD_0 = 0x1,
+	AD_1,
+	AD_MAX
+};
+
+enum dpu_cwb {
+	CWB_0 = 0x1,
+	CWB_1,
+	CWB_2,
+	CWB_3,
+	CWB_MAX
+};
+
+enum dpu_wd_timer {
+	WD_TIMER_0 = 0x1,
+	WD_TIMER_1,
+	WD_TIMER_2,
+	WD_TIMER_3,
+	WD_TIMER_4,
+	WD_TIMER_5,
+	WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+	VBIF_0,
+	VBIF_1,
+	VBIF_MAX,
+	VBIF_RT = VBIF_0,
+	VBIF_NRT = VBIF_1
+};
+
+enum dpu_iommu_domain {
+	DPU_IOMMU_DOMAIN_UNSECURE,
+	DPU_IOMMU_DOMAIN_SECURE,
+	DPU_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+	C0_G_Y = 0,
+	C1_B_Cb = 1,
+	C2_R_Cr = 2,
+	C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED   : Color components in single plane
+ * @DPU_PLANE_PLANAR        : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+	DPU_PLANE_INTERLEAVED,
+	DPU_PLANE_PLANAR,
+	DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB   : No chroma subsampling
+ * @DPU_CHROMA_H2V1  : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2  : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420   : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+	DPU_CHROMA_RGB,
+	DPU_CHROMA_H2V1,
+	DPU_CHROMA_H1V2,
+	DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR   : fetch is line by line
+ * @DPU_FETCH_TILE     : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC     : fetch and decompress data
+ */
+enum dpu_fetch_type {
+	DPU_FETCH_LINEAR,
+	DPU_FETCH_TILE,
+	DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+	COLOR_ALPHA_1BIT = 0,
+	COLOR_ALPHA_4BIT = 1,
+	COLOR_4BIT = 0,
+	COLOR_5BIT = 1, /* No 5-bit Alpha */
+	COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+	COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE      : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT   : column interleaving
+ * @BLEND_3D_MAX       :
+ */
+enum dpu_3d_blend_mode {
+	BLEND_3D_NONE = 0,
+	BLEND_3D_FRAME_INT,
+	BLEND_3D_H_ROW_INT,
+	BLEND_3D_V_ROW_INT,
+	BLEND_3D_COL_INT,
+	BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+	struct msm_format base;
+	enum dpu_plane_type fetch_planes;
+	u8 element[DPU_MAX_PLANES];
+	u8 bits[DPU_MAX_PLANES];
+	enum dpu_chroma_samp_type chroma_sample;
+	u8 unpack_align_msb;
+	u8 unpack_tight;
+	u8 unpack_count;
+	u8 bpp;
+	u8 alpha_enable;
+	u8 num_planes;
+	enum dpu_fetch_type fetch_mode;
+	DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+	u16 tile_width;
+	u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+	const struct dpu_format *format;
+	uint32_t num_planes;
+	uint32_t width;
+	uint32_t height;
+	uint32_t total_size;
+	uint32_t plane_addr[DPU_MAX_PLANES];
+	uint32_t plane_size[DPU_MAX_PLANES];
+	uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+	/* matrix coefficients in S15.16 format */
+	uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+	uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+	uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+	uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+	uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+	u32 color_0;
+	u32 color_1;
+	u32 color_2;
+	u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE     (1 << 0)
+#define DPU_DBG_MASK_CDM      (1 << 1)
+#define DPU_DBG_MASK_INTF     (1 << 2)
+#define DPU_DBG_MASK_LM       (1 << 3)
+#define DPU_DBG_MASK_CTL      (1 << 4)
+#define DPU_DBG_MASK_PINGPONG (1 << 5)
+#define DPU_DBG_MASK_SSPP     (1 << 6)
+#define DPU_DBG_MASK_WB       (1 << 7)
+#define DPU_DBG_MASK_TOP      (1 << 8)
+#define DPU_DBG_MASK_VBIF     (1 << 9)
+#define DPU_DBG_MASK_ROT      (1 << 10)
+
+#endif  /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 0000000..cc3a623
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,250 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN                0x000
+#define PP_SYNC_CONFIG_VSYNC            0x004
+#define PP_SYNC_CONFIG_HEIGHT           0x008
+#define PP_SYNC_WRCOUNT                 0x00C
+#define PP_VSYNC_INIT_VAL               0x010
+#define PP_INT_COUNT_VAL                0x014
+#define PP_SYNC_THRESH                  0x018
+#define PP_START_POS                    0x01C
+#define PP_RD_PTR_IRQ                   0x020
+#define PP_WR_PTR_IRQ                   0x024
+#define PP_OUT_LINE_COUNT               0x028
+#define PP_LINE_COUNT                   0x02C
+
+#define PP_FBC_MODE                     0x034
+#define PP_FBC_BUDGET_CTL               0x038
+#define PP_FBC_LOSSY_MODE               0x03C
+
+static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->pingpong_count; i++) {
+		if (pp == m->pingpong[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->pingpong[i].base;
+			b->length = m->pingpong[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_PINGPONG;
+			return &m->pingpong[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+		struct dpu_hw_tear_check *te)
+{
+	struct dpu_hw_blk_reg_map *c;
+	int cfg;
+
+	if (!pp || !te)
+		return -EINVAL;
+	c = &pp->hw;
+
+	cfg = BIT(19); /*VSYNC_COUNTER_EN */
+	if (te->hw_vsync_mode)
+		cfg |= BIT(20);
+
+	cfg |= te->vsync_count;
+
+	DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+	DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+	DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+	DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+	DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+	DPU_REG_WRITE(c, PP_SYNC_THRESH,
+			((te->sync_threshold_continue << 16) |
+			 te->sync_threshold_start));
+	DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+			(te->start_pos + te->sync_threshold_start + 1));
+
+	return 0;
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+		u32 timeout_us)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 val;
+	int rc;
+
+	if (!pp)
+		return -EINVAL;
+
+	c = &pp->hw;
+	rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+			val, (val & 0xffff) >= 1, 10, timeout_us);
+
+	return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+	struct dpu_hw_blk_reg_map *c;
+
+	if (!pp)
+		return -EINVAL;
+	c = &pp->hw;
+
+	DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+	return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+		bool enable_external_te)
+{
+	struct dpu_hw_blk_reg_map *c = &pp->hw;
+	u32 cfg;
+	int orig;
+
+	if (!pp)
+		return -EINVAL;
+
+	c = &pp->hw;
+	cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+	orig = (bool)(cfg & BIT(20));
+	if (enable_external_te)
+		cfg |= BIT(20);
+	else
+		cfg &= ~BIT(20);
+	DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+	trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+	return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+		struct dpu_hw_pp_vsync_info *info)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 val;
+
+	if (!pp || !info)
+		return -EINVAL;
+	c = &pp->hw;
+
+	val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+	info->rd_ptr_init_val = val & 0xffff;
+
+	val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+	info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+	info->rd_ptr_line_count = val & 0xffff;
+
+	val = DPU_REG_READ(c, PP_LINE_COUNT);
+	info->wr_ptr_line_count = val & 0xffff;
+
+	return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+	struct dpu_hw_blk_reg_map *c = &pp->hw;
+	u32 height, init;
+	u32 line = 0xFFFF;
+
+	if (!pp)
+		return 0;
+	c = &pp->hw;
+
+	init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+	height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+	if (height < init)
+		goto line_count_exit;
+
+	line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+	if (line < init)
+		line += (0xFFFF - init);
+	else
+		line -= init;
+
+line_count_exit:
+	return line;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
+	const struct dpu_pingpong_cfg *hw_cap)
+{
+	ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
+	ops->enable_tearcheck = dpu_hw_pp_enable_te;
+	ops->connect_external_te = dpu_hw_pp_connect_external_te;
+	ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
+	ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+	ops->get_line_count = dpu_hw_pp_get_line_count;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_pingpong *c;
+	struct dpu_pingpong_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _pingpong_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_pingpong_ops(&c->ops, c->caps);
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+	if (pp)
+		dpu_hw_blk_destroy(&pp->base);
+	kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 0000000..3caccd7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+	/*
+	 * This is ratio of MDP VSYNC clk freq(Hz) to
+	 * refresh rate divided by no of lines
+	 */
+	u32 vsync_count;
+	u32 sync_cfg_height;
+	u32 vsync_init_val;
+	u32 sync_threshold_start;
+	u32 sync_threshold_continue;
+	u32 start_pos;
+	u32 rd_ptr_irq;
+	u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+	u32 rd_ptr_init_val;	/* value of rd pointer at vsync edge */
+	u32 rd_ptr_frame_count;	/* num frames sent since enabling interface */
+	u32 rd_ptr_line_count;	/* current line on panel (rd ptr) */
+	u32 wr_ptr_line_count;	/* current line within pp fifo (wr ptr) */
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ *  @setup_tearcheck : program tear check values
+ *  @enable_tearcheck : enables tear check
+ *  @get_vsync_info : retries timing info of the panel
+ *  @setup_dither : function to program the dither hw block
+ *  @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+	/**
+	 * enables vysnc generation and sets up init value of
+	 * read pointer and programs the tear check cofiguration
+	 */
+	int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+			struct dpu_hw_tear_check *cfg);
+
+	/**
+	 * enables tear check block
+	 */
+	int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+			bool enable);
+
+	/**
+	 * read, modify, write to either set or clear listening to external TE
+	 * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+	 */
+	int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+			bool enable_external_te);
+
+	/**
+	 * provides the programmed and current
+	 * line_count
+	 */
+	int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+			struct dpu_hw_pp_vsync_info  *info);
+
+	/**
+	 * poll until write pointer transmission starts
+	 * @Return: 0 on success, -ETIMEDOUT on timeout
+	 */
+	int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+	/**
+	 * Obtain current vertical line counter
+	 */
+	u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_pingpong {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* pingpong */
+	enum dpu_pingpong idx;
+	const struct dpu_pingpong_cfg *caps;
+
+	/* ops */
+	struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ *	pingpong idx.
+ * @idx:  Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ *	should be called to free the context
+ * @pp:   Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 0000000..c25b52a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define DPU_FETCH_CONFIG_RESET_VALUE   0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE                      0x00
+#define SSPP_SRC_XY                        0x08
+#define SSPP_OUT_SIZE                      0x0c
+#define SSPP_OUT_XY                        0x10
+#define SSPP_SRC0_ADDR                     0x14
+#define SSPP_SRC1_ADDR                     0x18
+#define SSPP_SRC2_ADDR                     0x1C
+#define SSPP_SRC3_ADDR                     0x20
+#define SSPP_SRC_YSTRIDE0                  0x24
+#define SSPP_SRC_YSTRIDE1                  0x28
+#define SSPP_SRC_FORMAT                    0x30
+#define SSPP_SRC_UNPACK_PATTERN            0x34
+#define SSPP_SRC_OP_MODE                   0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1                 0x16C
+#define SSPP_SRC_XY_REC1                   0x168
+#define SSPP_OUT_SIZE_REC1                 0x160
+#define SSPP_OUT_XY_REC1                   0x164
+#define SSPP_SRC_FORMAT_REC1               0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1       0x178
+#define SSPP_SRC_OP_MODE_REC1              0x17C
+#define SSPP_MULTIRECT_OPMODE              0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1       0x180
+#define SSPP_EXCL_REC_SIZE_REC1            0x184
+#define SSPP_EXCL_REC_XY_REC1              0x188
+
+#define MDSS_MDP_OP_DEINTERLACE            BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD        BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1              BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0              BIT(17)
+#define MDSS_MDP_OP_IGC_EN                 BIT(16)
+#define MDSS_MDP_OP_FLIP_UD                BIT(14)
+#define MDSS_MDP_OP_FLIP_LR                BIT(13)
+#define MDSS_MDP_OP_BWC_EN                 BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE            BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS           (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH             (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED              (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR            0x3c
+#define SSPP_EXCL_REC_CTL                  0x40
+#define SSPP_UBWC_STATIC_CTRL              0x44
+#define SSPP_FETCH_CONFIG                  0x048
+#define SSPP_DANGER_LUT                    0x60
+#define SSPP_SAFE_LUT                      0x64
+#define SSPP_CREQ_LUT                      0x68
+#define SSPP_QOS_CTRL                      0x6C
+#define SSPP_DECIMATION_CONFIG             0xB4
+#define SSPP_SRC_ADDR_SW_STATUS            0x70
+#define SSPP_CREQ_LUT_0                    0x74
+#define SSPP_CREQ_LUT_1                    0x78
+#define SSPP_SW_PIX_EXT_C0_LR              0x100
+#define SSPP_SW_PIX_EXT_C0_TB              0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS      0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR            0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB            0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS    0x118
+#define SSPP_SW_PIX_EXT_C3_LR              0x120
+#define SSPP_SW_PIX_EXT_C3_TB              0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
+#define SSPP_TRAFFIC_SHAPER                0x130
+#define SSPP_CDP_CNTL                      0x134
+#define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
+#define SSPP_TRAFFIC_SHAPER_REC1           0x158
+#define SSPP_EXCL_REC_SIZE                 0x1B4
+#define SSPP_EXCL_REC_XY                   0x1B8
+#define SSPP_VIG_OP_MODE                   0x0
+#define SSPP_VIG_CSC_10_OP_MODE            0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX        0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN       BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK   0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF    4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK     0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF      20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG                       0x04
+#define COMP0_3_PHASE_STEP_X               0x10
+#define COMP0_3_PHASE_STEP_Y               0x14
+#define COMP1_2_PHASE_STEP_X               0x18
+#define COMP1_2_PHASE_STEP_Y               0x1c
+#define COMP0_3_INIT_PHASE_X               0x20
+#define COMP0_3_INIT_PHASE_Y               0x24
+#define COMP1_2_INIT_PHASE_X               0x28
+#define COMP1_2_INIT_PHASE_Y               0x2C
+#define VIG_0_QSEED2_SHARP                 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN          BIT(17)
+#define VIG_OP_MEM_PROT_CONT   BIT(15)
+#define VIG_OP_MEM_PROT_VAL    BIT(14)
+#define VIG_OP_MEM_PROT_SAT    BIT(13)
+#define VIG_OP_MEM_PROT_HUE    BIT(12)
+#define VIG_OP_HIST            BIT(8)
+#define VIG_OP_SKY_COL         BIT(7)
+#define VIG_OP_FOIL            BIT(6)
+#define VIG_OP_SKIN_COL        BIT(5)
+#define VIG_OP_PA_EN           BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND  BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN          BIT(0)
+#define CSC_10BIT_OFFSET       4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK			19200000
+
+static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+		int s_id,
+		u32 *idx)
+{
+	int rc = 0;
+	const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+	if (!ctx)
+		return -EINVAL;
+
+	switch (s_id) {
+	case DPU_SSPP_SRC:
+		*idx = sblk->src_blk.base;
+		break;
+	case DPU_SSPP_SCALER_QSEED2:
+	case DPU_SSPP_SCALER_QSEED3:
+	case DPU_SSPP_SCALER_RGB:
+		*idx = sblk->scaler_blk.base;
+		break;
+	case DPU_SSPP_CSC:
+	case DPU_SSPP_CSC_10BIT:
+		*idx = sblk->csc_blk.base;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+		enum dpu_sspp_multirect_index index,
+		enum dpu_sspp_multirect_mode mode)
+{
+	u32 mode_mask;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (index == DPU_SSPP_RECT_SOLO) {
+		/**
+		 * if rect index is RECT_SOLO, we cannot expect a
+		 * virtual plane sharing the same SSPP id. So we go
+		 * and disable multirect
+		 */
+		mode_mask = 0;
+	} else {
+		mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+		mode_mask |= index;
+		if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+			mode_mask |= BIT(2);
+		else
+			mode_mask &= ~BIT(2);
+	}
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+		u32 mask, u8 en)
+{
+	u32 idx;
+	u32 opmode;
+
+	if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+		_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+		!test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+		return;
+
+	opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+		u32 mask, u8 en)
+{
+	u32 idx;
+	u32 opmode;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+		return;
+
+	opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+		const struct dpu_format *fmt, u32 flags,
+		enum dpu_sspp_multirect_index rect_mode)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 chroma_samp, unpack, src_format;
+	u32 opmode = 0;
+	u32 fast_clear = 0;
+	u32 op_mode_off, unpack_pat_off, format_off;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+		return;
+
+	if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+		op_mode_off = SSPP_SRC_OP_MODE;
+		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+		format_off = SSPP_SRC_FORMAT;
+	} else {
+		op_mode_off = SSPP_SRC_OP_MODE_REC1;
+		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+		format_off = SSPP_SRC_FORMAT_REC1;
+	}
+
+	c = &ctx->hw;
+	opmode = DPU_REG_READ(c, op_mode_off + idx);
+	opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+			MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+	if (flags & DPU_SSPP_FLIP_LR)
+		opmode |= MDSS_MDP_OP_FLIP_LR;
+	if (flags & DPU_SSPP_FLIP_UD)
+		opmode |= MDSS_MDP_OP_FLIP_UD;
+
+	chroma_samp = fmt->chroma_sample;
+	if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+		if (chroma_samp == DPU_CHROMA_H2V1)
+			chroma_samp = DPU_CHROMA_H1V2;
+		else if (chroma_samp == DPU_CHROMA_H1V2)
+			chroma_samp = DPU_CHROMA_H2V1;
+	}
+
+	src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+		(fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+		(fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+	if (flags & DPU_SSPP_ROT_90)
+		src_format |= BIT(11); /* ROT90 */
+
+	if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+		src_format |= BIT(8); /* SRCC3_EN */
+
+	if (flags & DPU_SSPP_SOLID_FILL)
+		src_format |= BIT(22);
+
+	unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+		(fmt->element[1] << 8) | (fmt->element[0] << 0);
+	src_format |= ((fmt->unpack_count - 1) << 12) |
+		(fmt->unpack_tight << 17) |
+		(fmt->unpack_align_msb << 18) |
+		((fmt->bpp - 1) << 9);
+
+	if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+		if (DPU_FORMAT_IS_UBWC(fmt))
+			opmode |= MDSS_MDP_OP_BWC_EN;
+		src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+		DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+			DPU_FETCH_CONFIG_RESET_VALUE |
+			ctx->mdp->highest_bank_bit << 18);
+		if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+			fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+			DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+					fast_clear | (ctx->mdp->ubwc_swizzle) |
+					(ctx->mdp->highest_bank_bit << 4));
+		}
+	}
+
+	opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+	/* if this is YUV pixel format, enable CSC */
+	if (DPU_FORMAT_IS_YUV(fmt))
+		src_format |= BIT(15);
+
+	if (DPU_FORMAT_IS_DX(fmt))
+		src_format |= BIT(14);
+
+	/* update scaler opmode, if appropriate */
+	if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+		_sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+			DPU_FORMAT_IS_YUV(fmt));
+	else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+		_sspp_setup_csc10_opmode(ctx,
+			VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+			DPU_FORMAT_IS_YUV(fmt));
+
+	DPU_REG_WRITE(c, format_off + idx, src_format);
+	DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+	DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+	/* clear previous UBWC error */
+	DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pixel_ext *pe_ext)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u8 color;
+	u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+	const u32 bytemask = 0xff;
+	const u32 shortmask = 0xffff;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+		return;
+
+	c = &ctx->hw;
+
+	/* program SW pixel extension override for all pipes*/
+	for (color = 0; color < DPU_MAX_PLANES; color++) {
+		/* color 2 has the same set of registers as color 1 */
+		if (color == 2)
+			continue;
+
+		lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+			((pe_ext->right_rpt[color] & bytemask) << 16)|
+			((pe_ext->left_ftch[color] & bytemask) << 8)|
+			(pe_ext->left_rpt[color] & bytemask);
+
+		tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+			((pe_ext->btm_rpt[color] & bytemask) << 16)|
+			((pe_ext->top_ftch[color] & bytemask) << 8)|
+			(pe_ext->top_rpt[color] & bytemask);
+
+		tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+			pe_ext->num_ext_pxls_top[color] +
+			pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+			((pe_ext->roi_w[color] +
+			pe_ext->num_ext_pxls_left[color] +
+			pe_ext->num_ext_pxls_right[color]) & shortmask);
+	}
+
+	/* color 0 */
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+			tot_req_pixels[0]);
+
+	/* color 1 and color 2 */
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+			tot_req_pixels[1]);
+
+	/* color 3 */
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+			tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *sspp,
+		struct dpu_hw_pixel_ext *pe,
+		void *scaler_cfg)
+{
+	u32 idx;
+	struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+	(void)pe;
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+		|| !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+		return;
+
+	dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+			ctx->cap->sblk->scaler_blk.version,
+			sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+	u32 idx;
+
+	if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+		return 0;
+
+	return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/**
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *cfg,
+		enum dpu_sspp_multirect_index rect_index)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+	u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+		return;
+
+	c = &ctx->hw;
+
+	if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+		src_size_off = SSPP_SRC_SIZE;
+		src_xy_off = SSPP_SRC_XY;
+		out_size_off = SSPP_OUT_SIZE;
+		out_xy_off = SSPP_OUT_XY;
+	} else {
+		src_size_off = SSPP_SRC_SIZE_REC1;
+		src_xy_off = SSPP_SRC_XY_REC1;
+		out_size_off = SSPP_OUT_SIZE_REC1;
+		out_xy_off = SSPP_OUT_XY_REC1;
+	}
+
+
+	/* src and dest rect programming */
+	src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+	src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+		   drm_rect_width(&cfg->src_rect);
+	dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+	dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+		drm_rect_width(&cfg->dst_rect);
+
+	if (rect_index == DPU_SSPP_RECT_SOLO) {
+		ystride0 = (cfg->layout.plane_pitch[0]) |
+			(cfg->layout.plane_pitch[1] << 16);
+		ystride1 = (cfg->layout.plane_pitch[2]) |
+			(cfg->layout.plane_pitch[3] << 16);
+	} else {
+		ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+		ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+		if (rect_index == DPU_SSPP_RECT_0) {
+			ystride0 = (ystride0 & 0xFFFF0000) |
+				(cfg->layout.plane_pitch[0] & 0x0000FFFF);
+			ystride1 = (ystride1 & 0xFFFF0000)|
+				(cfg->layout.plane_pitch[2] & 0x0000FFFF);
+		} else {
+			ystride0 = (ystride0 & 0x0000FFFF) |
+				((cfg->layout.plane_pitch[0] << 16) &
+				 0xFFFF0000);
+			ystride1 = (ystride1 & 0x0000FFFF) |
+				((cfg->layout.plane_pitch[2] << 16) &
+				 0xFFFF0000);
+		}
+	}
+
+	/* rectangle register programming */
+	DPU_REG_WRITE(c, src_size_off + idx, src_size);
+	DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+	DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+	DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+	DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+	DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *cfg,
+		enum dpu_sspp_multirect_index rect_mode)
+{
+	int i;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (rect_mode == DPU_SSPP_RECT_SOLO) {
+		for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+			DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+					cfg->layout.plane_addr[i]);
+	} else if (rect_mode == DPU_SSPP_RECT_0) {
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+				cfg->layout.plane_addr[0]);
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+				cfg->layout.plane_addr[2]);
+	} else {
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+				cfg->layout.plane_addr[0]);
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+				cfg->layout.plane_addr[2]);
+	}
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+		struct dpu_csc_cfg *data)
+{
+	u32 idx;
+	bool csc10 = false;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+		return;
+
+	if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+		idx += CSC_10BIT_OFFSET;
+		csc10 = true;
+	}
+
+	dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+		dpu_sspp_multirect_index rect_index)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+	else
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+				color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+	DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+		DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+		DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+				cfg->creq_lut >> 32);
+	} else {
+		DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+	}
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+	u32 qos_ctrl = 0;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (cfg->vblank_en) {
+		qos_ctrl |= ((cfg->creq_vblank &
+				SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+				SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+		qos_ctrl |= ((cfg->danger_vblank &
+				SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+				SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+		qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+	}
+
+	if (cfg->danger_safe_en)
+		qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cdp_cfg *cfg)
+{
+	u32 idx;
+	u32 cdp_cntl = 0;
+
+	if (!ctx || !cfg)
+		return;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (cfg->enable)
+		cdp_cntl |= BIT(0);
+	if (cfg->ubwc_meta_enable)
+		cdp_cntl |= BIT(1);
+	if (cfg->tile_amortize_enable)
+		cdp_cntl |= BIT(2);
+	if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+		cdp_cntl |= BIT(3);
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+		unsigned long features)
+{
+	if (test_bit(DPU_SSPP_SRC, &features)) {
+		c->ops.setup_format = dpu_hw_sspp_setup_format;
+		c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+		c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+		c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+		c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+	}
+
+	if (test_bit(DPU_SSPP_QOS, &features)) {
+		c->ops.setup_danger_safe_lut =
+			dpu_hw_sspp_setup_danger_safe_lut;
+		c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+		c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+	}
+
+	if (test_bit(DPU_SSPP_CSC, &features) ||
+		test_bit(DPU_SSPP_CSC_10BIT, &features))
+		c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+	if (dpu_hw_sspp_multirect_enabled(c->cap))
+		c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+	if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+		c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+		c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+	}
+
+	if (test_bit(DPU_SSPP_CDP, &features))
+		c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *catalog,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	if ((sspp < SSPP_MAX) && catalog && addr && b) {
+		for (i = 0; i < catalog->sspp_count; i++) {
+			if (sspp == catalog->sspp[i].id) {
+				b->base_off = addr;
+				b->blk_off = catalog->sspp[i].base;
+				b->length = catalog->sspp[i].len;
+				b->hwversion = catalog->hwversion;
+				b->log_mask = DPU_DBG_MASK_SSPP;
+				return &catalog->sspp[i];
+			}
+		}
+	}
+
+	return ERR_PTR(-ENOMEM);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+		void __iomem *addr, struct dpu_mdss_cfg *catalog,
+		bool is_virtual_pipe)
+{
+	struct dpu_hw_pipe *hw_pipe;
+	struct dpu_sspp_cfg *cfg;
+	int rc;
+
+	if (!addr || !catalog)
+		return ERR_PTR(-EINVAL);
+
+	hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+	if (!hw_pipe)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(hw_pipe);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	hw_pipe->catalog = catalog;
+	hw_pipe->mdp = &catalog->mdp[0];
+	hw_pipe->idx = idx;
+	hw_pipe->cap = cfg;
+	_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+	rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return hw_pipe;
+
+blk_init_error:
+	kzfree(hw_pipe);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+	if (ctx)
+		dpu_hw_blk_destroy(&ctx->base);
+	kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 0000000..4d81e5f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,424 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR		BIT(0)
+#define DPU_SSPP_FLIP_UD		BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90	BIT(2)
+#define DPU_SSPP_ROT_90			BIT(3)
+#define DPU_SSPP_SOLID_FILL		BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
+	(1UL << DPU_SSPP_SCALER_QSEED2) | \
+	(1UL << DPU_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+	DPU_SSPP_COMP_0,
+	DPU_SSPP_COMP_1_2,
+	DPU_SSPP_COMP_2,
+	DPU_SSPP_COMP_3,
+
+	DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+	DPU_SSPP_RECT_SOLO = 0,
+	DPU_SSPP_RECT_0,
+	DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+	DPU_SSPP_MULTIRECT_NONE = 0,
+	DPU_SSPP_MULTIRECT_PARALLEL,
+	DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+	DPU_FRAME_LINEAR,
+	DPU_FRAME_TILE_A4X,
+	DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+	DPU_SCALE_FILTER_NEAREST = 0,
+	DPU_SCALE_FILTER_BIL,
+	DPU_SCALE_FILTER_PCMN,
+	DPU_SCALE_FILTER_CA,
+	DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+	DPU_SCALE_ALPHA_PIXEL_REP,
+	DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+	DPU_SCALE_2D_4X4,
+	DPU_SCALE_2D_CIR,
+	DPU_SCALE_1D_SEP,
+	DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+	u32 strength;
+	u32 edge_thr;
+	u32 smooth_thr;
+	u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+	/* scaling factors are enabled for this input layer */
+	uint8_t enable_pxl_ext;
+
+	int init_phase_x[DPU_MAX_PLANES];
+	int phase_step_x[DPU_MAX_PLANES];
+	int init_phase_y[DPU_MAX_PLANES];
+	int phase_step_y[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels extension in left, right, top and bottom direction
+	 * for all color components. This pixel value for each color component
+	 * should be sum of fetch + repeat pixels.
+	 */
+	int num_ext_pxls_left[DPU_MAX_PLANES];
+	int num_ext_pxls_right[DPU_MAX_PLANES];
+	int num_ext_pxls_top[DPU_MAX_PLANES];
+	int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be overfetched in left, right, top and
+	 * bottom directions from source image for scaling.
+	 */
+	int left_ftch[DPU_MAX_PLANES];
+	int right_ftch[DPU_MAX_PLANES];
+	int top_ftch[DPU_MAX_PLANES];
+	int btm_ftch[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be repeated in left, right, top and
+	 * bottom directions for scaling.
+	 */
+	int left_rpt[DPU_MAX_PLANES];
+	int right_rpt[DPU_MAX_PLANES];
+	int top_rpt[DPU_MAX_PLANES];
+	int btm_rpt[DPU_MAX_PLANES];
+
+	uint32_t roi_w[DPU_MAX_PLANES];
+	uint32_t roi_h[DPU_MAX_PLANES];
+
+	/*
+	 * Filter type to be used for scaling in horizontal and vertical
+	 * directions
+	 */
+	enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+	enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout:    format layout information for programming buffer to hardware
+ * @src_rect:  src ROI, caller takes into account the different operations
+ *             such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index:     index of the rectangle of SSPP
+ * @mode:      parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+	struct dpu_hw_fmt_layout layout;
+	struct drm_rect src_rect;
+	struct drm_rect dst_rect;
+	enum dpu_sspp_multirect_index index;
+	enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+	u32 danger_lut;
+	u32 safe_lut;
+	u64 creq_lut;
+	u32 creq_vblank;
+	u32 danger_vblank;
+	bool vblank_en;
+	bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+	DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+	DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ *	DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ *	DPU_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_pipe_cdp_cfg {
+	bool enable;
+	bool ubwc_meta_enable;
+	bool tile_amortize_enable;
+	u32 preload_ahead;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+	u64 size;
+	u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+	/**
+	 * setup_format - setup pixel format cropping rectangle, flip
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @flags: Extra flags for format config
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_format)(struct dpu_hw_pipe *ctx,
+			const struct dpu_format *fmt, u32 flags,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_rects - setup pipe ROI rectangles
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_rects)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_cfg *cfg,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_pe - setup pipe pixel extension
+	 * @ctx: Pointer to pipe context
+	 * @pe_ext: Pointer to pixel ext settings
+	 */
+	void (*setup_pe)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pixel_ext *pe_ext);
+
+	/**
+	 * setup_sourceaddress - setup pipe source addresses
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_cfg *cfg,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_csc - setup color space coversion
+	 * @ctx: Pointer to pipe context
+	 * @data: Pointer to config structure
+	 */
+	void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+
+	/**
+	 * setup_solidfill - enable/disable colorfill
+	 * @ctx: Pointer to pipe context
+	 * @const_color: Fill color value
+	 * @flags: Pipe flags
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_multirect - setup multirect configuration
+	 * @ctx: Pointer to pipe context
+	 * @index: rectangle index in multirect
+	 * @mode: parallel fetch / time multiplex multirect mode
+	 */
+
+	void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+			enum dpu_sspp_multirect_index index,
+			enum dpu_sspp_multirect_mode mode);
+
+	/**
+	 * setup_sharpening - setup sharpening
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to config structure
+	 */
+	void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_sharp_cfg *cfg);
+
+	/**
+	 * setup_danger_safe_lut - setup danger safe LUTs
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_creq_lut - setup CREQ LUT
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_qos_ctrl - setup QoS control
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_histogram - setup histograms
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to histogram configuration
+	 */
+	void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+			void *cfg);
+
+	/**
+	 * setup_scaler - setup scaler
+	 * @ctx: Pointer to pipe context
+	 * @pipe_cfg: Pointer to pipe configuration
+	 * @pe_cfg: Pointer to pixel extension configuration
+	 * @scaler_cfg: Pointer to scaler configuration
+	 */
+	void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *pipe_cfg,
+		struct dpu_hw_pixel_ext *pe_cfg,
+		void *scaler_cfg);
+
+	/**
+	 * get_scaler_ver - get scaler h/w version
+	 * @ctx: Pointer to pipe context
+	 */
+	u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+	/**
+	 * setup_cdp - setup client driven prefetch
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to cdp configuration
+	 */
+	void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_cdp_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+	struct dpu_mdss_cfg *catalog;
+	struct dpu_mdp_cfg *mdp;
+
+	/* Pipe */
+	enum dpu_sspp idx;
+	const struct dpu_sspp_cfg *cap;
+
+	/* Ops */
+	struct dpu_hw_sspp_ops ops;
+};
+
+/**
+ * dpu_hw_pipe - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_pipe, base);
+}
+
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx:  Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ * @is_virtual_pipe: is this pipe virtual pipe
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+		void __iomem *addr, struct dpu_mdss_cfg *catalog,
+		bool is_virtual_pipe);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx:  Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 0000000..db2798e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,398 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE                        0x28
+#define UBWC_STATIC                       0x144
+
+#define FLD_SPLIT_DISPLAY_CMD             BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN          BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX             BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX             BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS                     0x360
+#define SAFE_STATUS                       0x364
+
+#define TE_LINE_INTERVAL                  0x3F4
+
+#define TRAFFIC_SHAPER_EN                 BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num)     (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
+
+#define MDP_WD_TIMER_0_CTL                0x380
+#define MDP_WD_TIMER_0_CTL2               0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE         0x388
+#define MDP_WD_TIMER_1_CTL                0x390
+#define MDP_WD_TIMER_1_CTL2               0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE         0x398
+#define MDP_WD_TIMER_2_CTL                0x420
+#define MDP_WD_TIMER_2_CTL2               0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE         0x428
+#define MDP_WD_TIMER_3_CTL                0x430
+#define MDP_WD_TIMER_3_CTL2               0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE         0x438
+#define MDP_WD_TIMER_4_CTL                0x440
+#define MDP_WD_TIMER_4_CTL2               0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE         0x448
+
+#define MDP_TICK_COUNT                    16
+#define XO_CLK_RATE                       19200
+#define MS_TICKS_IN_SEC                   1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+	((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL                           0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+		struct split_pipe_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 upper_pipe = 0;
+	u32 lower_pipe = 0;
+
+	if (!mdp || !cfg)
+		return;
+
+	c = &mdp->hw;
+
+	if (cfg->en) {
+		if (cfg->mode == INTF_MODE_CMD) {
+			lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+			/* interface controlling sw trigger */
+			if (cfg->intf == INTF_2)
+				lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+			else
+				lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+			upper_pipe = lower_pipe;
+		} else {
+			if (cfg->intf == INTF_2) {
+				lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+				upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+			} else {
+				lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+				upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+			}
+		}
+	}
+
+	DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+	DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+	DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+	DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
+		struct cdm_output_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 out_ctl = 0;
+
+	if (!mdp || !cfg)
+		return;
+
+	c = &mdp->hw;
+
+	if (cfg->intf_en)
+		out_ctl |= BIT(19);
+
+	DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+		enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_off, bit_off;
+	u32 reg_val, new_val;
+	bool clk_forced_on;
+
+	if (!mdp)
+		return false;
+
+	c = &mdp->hw;
+
+	if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+		return false;
+
+	reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+	bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+	reg_val = DPU_REG_READ(c, reg_off);
+
+	if (enable)
+		new_val = reg_val | BIT(bit_off);
+	else
+		new_val = reg_val & ~BIT(bit_off);
+
+	DPU_REG_WRITE(c, reg_off, new_val);
+
+	clk_forced_on = !(reg_val & BIT(bit_off));
+
+	return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+		struct dpu_danger_safe_status *status)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 value;
+
+	if (!mdp || !status)
+		return;
+
+	c = &mdp->hw;
+
+	value = DPU_REG_READ(c, DANGER_STATUS);
+	status->mdp = (value >> 0) & 0x3;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+		struct dpu_vsync_source_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+	static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+	if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+		return;
+
+	c = &mdp->hw;
+	reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+	for (i = 0; i < cfg->pp_count; i++) {
+		int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+		if (pp_idx >= ARRAY_SIZE(pp_offset))
+			continue;
+
+		reg &= ~(0xf << pp_offset[pp_idx]);
+		reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+	}
+	DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+	if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+			cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+		switch (cfg->vsync_source) {
+		case DPU_VSYNC_SOURCE_WD_TIMER_4:
+			wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_4_CTL;
+			wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_3:
+			wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_3_CTL;
+			wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_2:
+			wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_2_CTL;
+			wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_1:
+			wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_1_CTL;
+			wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_0:
+		default:
+			wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_0_CTL;
+			wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+			break;
+		}
+
+		DPU_REG_WRITE(c, wd_load_value,
+			CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+		DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+		reg = DPU_REG_READ(c, wd_ctl2);
+		reg |= BIT(8);		/* enable heartbeat timer */
+		reg |= BIT(0);		/* enable WD timer */
+		DPU_REG_WRITE(c, wd_ctl2, reg);
+
+		/* make sure that timers are enabled/disabled for vsync state */
+		wmb();
+	}
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+		struct dpu_danger_safe_status *status)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 value;
+
+	if (!mdp || !status)
+		return;
+
+	c = &mdp->hw;
+
+	value = DPU_REG_READ(c, SAFE_STATUS);
+	status->mdp = (value >> 0) & 0x1;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_blk_reg_map c;
+
+	if (!mdp || !m)
+		return;
+
+	if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
+		return;
+
+	/* force blk offset to zero to access beginning of register region */
+	c = mdp->hw;
+	c.blk_off = 0x0;
+	DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+	struct dpu_hw_blk_reg_map *c;
+
+	if (!mdp)
+		return;
+
+	c = &mdp->hw;
+
+	DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+	ops->setup_cdm_output = dpu_hw_setup_cdm_output;
+	ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+	ops->get_danger_status = dpu_hw_get_danger_status;
+	ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+	ops->get_safe_status = dpu_hw_get_safe_status;
+	ops->reset_ubwc = dpu_hw_reset_ubwc;
+	ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+		const struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	if (!m || !addr || !b)
+		return ERR_PTR(-EINVAL);
+
+	for (i = 0; i < m->mdp_count; i++) {
+		if (mdp == m->mdp[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->mdp[i].base;
+			b->length = m->mdp[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_TOP;
+			return &m->mdp[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_mdp *mdp;
+	const struct dpu_mdp_cfg *cfg;
+	int rc;
+
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
+	mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+	if (!mdp)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _top_offset(idx, m, addr, &mdp->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(mdp);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	mdp->idx = idx;
+	mdp->caps = cfg;
+	_setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+	rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+
+	return mdp;
+
+blk_init_error:
+	kzfree(mdp);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+	if (mdp)
+		dpu_hw_blk_destroy(&mdp->base);
+	kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 0000000..899925a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en        : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+	bool en;
+	bool rd_client;
+	u32 client_id;
+	u32 bpc_denom;
+	u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en        : Enable/disable dual pipe confguration
+ * @mode      : Panel interface mode
+ * @intf      : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ *              flushed
+ */
+struct split_pipe_cfg {
+	bool en;
+	enum dpu_intf_mode mode;
+	enum dpu_intf intf;
+	bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @intf_en   : enable/disable interface output
+ */
+struct cdm_output_cfg {
+	bool intf_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+	u8 mdp;
+	u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ *                                    watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+	u32 pp_count;
+	u32 frame_rate;
+	u32 ppnumber[PINGPONG_MAX];
+	u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+	/** setup_split_pipe() : Regsiters are not double buffered, thisk
+	 * function should be called before timing control enable
+	 * @mdp  : mdp top context driver
+	 * @cfg  : upper and lower part of pipe configuration
+	 */
+	void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+			struct split_pipe_cfg *p);
+
+	/**
+	 * setup_cdm_output() : Setup selection control of the cdm data path
+	 * @mdp  : mdp top context driver
+	 * @cfg  : cdm output configuration
+	 */
+	void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
+			struct cdm_output_cfg *cfg);
+
+	/**
+	 * setup_traffic_shaper() : Setup traffic shaper control
+	 * @mdp  : mdp top context driver
+	 * @cfg  : traffic shaper configuration
+	 */
+	void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+			struct traffic_shaper_cfg *cfg);
+
+	/**
+	 * setup_clk_force_ctrl - set clock force control
+	 * @mdp: mdp top context driver
+	 * @clk_ctrl: clock to be controlled
+	 * @enable: force on enable
+	 * @return: if the clock is forced-on by this function
+	 */
+	bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+			enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+	/**
+	 * get_danger_status - get danger status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+			struct dpu_danger_safe_status *status);
+
+	/**
+	 * setup_vsync_source - setup vsync source configuration details
+	 * @mdp: mdp top context driver
+	 * @cfg: vsync source selection configuration
+	 */
+	void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+				struct dpu_vsync_source_cfg *cfg);
+
+	/**
+	 * get_safe_status - get safe status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+			struct dpu_danger_safe_status *status);
+
+	/**
+	 * reset_ubwc - reset top level UBWC configuration
+	 * @mdp: mdp top context driver
+	 * @m: pointer to mdss catalog data
+	 */
+	void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
+
+	/**
+	 * intf_audio_select - select the external interface for audio
+	 * @mdp: mdp top context driver
+	 */
+	void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* top */
+	enum dpu_mdp idx;
+	const struct dpu_mdp_cfg *caps;
+
+	/* ops */
+	struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * to_dpu_hw_mdp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_mdp, base);
+}
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 1ba571e..4cabae4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -92,59 +92,6 @@
 	return &dpu_hw_util_log_mask;
 }
 
-void dpu_set_scaler_v2(struct dpu_hw_scaler3_cfg *cfg,
-		const struct dpu_drm_scaler_v2 *scale_v2)
-{
-	int i;
-
-	cfg->enable = scale_v2->enable;
-	cfg->dir_en = scale_v2->dir_en;
-
-	for (i = 0; i < DPU_MAX_PLANES; i++) {
-		cfg->init_phase_x[i] = scale_v2->init_phase_x[i];
-		cfg->phase_step_x[i] = scale_v2->phase_step_x[i];
-		cfg->init_phase_y[i] = scale_v2->init_phase_y[i];
-		cfg->phase_step_y[i] = scale_v2->phase_step_y[i];
-
-		cfg->preload_x[i] = scale_v2->preload_x[i];
-		cfg->preload_y[i] = scale_v2->preload_y[i];
-		cfg->src_width[i] = scale_v2->src_width[i];
-		cfg->src_height[i] = scale_v2->src_height[i];
-	}
-
-	cfg->dst_width = scale_v2->dst_width;
-	cfg->dst_height = scale_v2->dst_height;
-
-	cfg->y_rgb_filter_cfg = scale_v2->y_rgb_filter_cfg;
-	cfg->uv_filter_cfg = scale_v2->uv_filter_cfg;
-	cfg->alpha_filter_cfg = scale_v2->alpha_filter_cfg;
-	cfg->blend_cfg = scale_v2->blend_cfg;
-
-	cfg->lut_flag = scale_v2->lut_flag;
-	cfg->dir_lut_idx = scale_v2->dir_lut_idx;
-	cfg->y_rgb_cir_lut_idx = scale_v2->y_rgb_cir_lut_idx;
-	cfg->uv_cir_lut_idx = scale_v2->uv_cir_lut_idx;
-	cfg->y_rgb_sep_lut_idx = scale_v2->y_rgb_sep_lut_idx;
-	cfg->uv_sep_lut_idx = scale_v2->uv_sep_lut_idx;
-
-	cfg->de.enable = scale_v2->de.enable;
-	cfg->de.sharpen_level1 = scale_v2->de.sharpen_level1;
-	cfg->de.sharpen_level2 = scale_v2->de.sharpen_level2;
-	cfg->de.clip = scale_v2->de.clip;
-	cfg->de.limit = scale_v2->de.limit;
-	cfg->de.thr_quiet = scale_v2->de.thr_quiet;
-	cfg->de.thr_dieout = scale_v2->de.thr_dieout;
-	cfg->de.thr_low = scale_v2->de.thr_low;
-	cfg->de.thr_high = scale_v2->de.thr_high;
-	cfg->de.prec_shift = scale_v2->de.prec_shift;
-
-	for (i = 0; i < DPU_MAX_DE_CURVES; i++) {
-		cfg->de.adjust_a[i] = scale_v2->de.adjust_a[i];
-		cfg->de.adjust_b[i] = scale_v2->de.adjust_b[i];
-		cfg->de.adjust_c[i] = scale_v2->de.adjust_c[i];
-	}
-}
-
 static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
 		struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
 {
@@ -419,34 +366,3 @@
 	DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
 	DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
 }
-
-/**
- * _dpu_copy_formats   - copy formats from src_list to dst_list
- * @dst_list:          pointer to destination list where to copy formats
- * @dst_list_size:     size of destination list
- * @dst_list_pos:      starting position on the list where to copy formats
- * @src_list:          pointer to source list where to copy formats from
- * @src_list_size:     size of source list
- * Return: number of elements populated
- */
-uint32_t dpu_copy_formats(
-		struct dpu_format_extended *dst_list,
-		uint32_t dst_list_size,
-		uint32_t dst_list_pos,
-		const struct dpu_format_extended *src_list,
-		uint32_t src_list_size)
-{
-	uint32_t cur_pos, i;
-
-	if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1)))
-		return 0;
-
-	for (i = 0, cur_pos = dst_list_pos;
-		(cur_pos < (dst_list_size - 1)) && (i < src_list_size)
-		&& src_list[i].fourcc_format; ++i, ++cur_pos)
-		dst_list[cur_pos] = src_list[i];
-
-	dst_list[cur_pos].fourcc_format = 0;
-
-	return i;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 42f1b22..1240f50 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -333,9 +333,6 @@
 
 void *dpu_hw_util_get_dir(void);
 
-void dpu_set_scaler_v2(struct dpu_hw_scaler3_cfg *cfg,
-		const struct dpu_drm_scaler_v2 *scale_v2);
-
 void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
 		struct dpu_hw_scaler3_cfg *scaler3_cfg,
 		u32 scaler_offset, u32 scaler_version,
@@ -348,11 +345,4 @@
 		u32 csc_reg_off,
 		struct dpu_csc_cfg *data, bool csc10);
 
-uint32_t dpu_copy_formats(
-		struct dpu_format_extended *dst_list,
-		uint32_t dst_list_size,
-		uint32_t dst_list_pos,
-		const struct dpu_format_extended *src_list,
-		uint32_t src_list_size);
-
 #endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 0000000..d439055
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_dbg.h"
+
+#define VBIF_VERSION			0x0000
+#define VBIF_CLK_FORCE_CTRL0		0x0008
+#define VBIF_CLK_FORCE_CTRL1		0x000C
+#define VBIF_QOS_REMAP_00		0x0020
+#define VBIF_QOS_REMAP_01		0x0024
+#define VBIF_QOS_REMAP_10		0x0028
+#define VBIF_QOS_REMAP_11		0x002C
+#define VBIF_WRITE_GATHER_EN		0x00AC
+#define VBIF_IN_RD_LIM_CONF0		0x00B0
+#define VBIF_IN_RD_LIM_CONF1		0x00B4
+#define VBIF_IN_RD_LIM_CONF2		0x00B8
+#define VBIF_IN_WR_LIM_CONF0		0x00C0
+#define VBIF_IN_WR_LIM_CONF1		0x00C4
+#define VBIF_IN_WR_LIM_CONF2		0x00C8
+#define VBIF_OUT_RD_LIM_CONF0		0x00D0
+#define VBIF_OUT_WR_LIM_CONF0		0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0	0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1	0x0164
+#define VBIF_XIN_PND_ERR		0x0190
+#define VBIF_XIN_SRC_ERR		0x0194
+#define VBIF_XIN_CLR_ERR		0x019C
+#define VBIF_XIN_HALT_CTRL0		0x0200
+#define VBIF_XIN_HALT_CTRL1		0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000	0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000	0x0590
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+		u32 *pnd_errors, u32 *src_errors)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 pnd, src;
+
+	if (!vbif)
+		return;
+	c = &vbif->hw;
+	pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+	src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+	if (pnd_errors)
+		*pnd_errors = pnd;
+	if (src_errors)
+		*src_errors = src;
+
+	DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+		u32 xin_id, u32 value)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_off;
+	u32 bit_off;
+	u32 reg_val;
+
+	/*
+	 * Assume 4 bits per bit field, 8 fields per 32-bit register so
+	 * 16 bit fields maximum across two registers
+	 */
+	if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+		return;
+
+	c = &vbif->hw;
+
+	if (xin_id >= 8) {
+		xin_id -= 8;
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+	} else {
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+	}
+	bit_off = (xin_id & 0x7) * 4;
+	reg_val = DPU_REG_READ(c, reg_off);
+	reg_val &= ~(0x7 << bit_off);
+	reg_val |= (value & 0x7) << bit_off;
+	DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+		u32 xin_id, bool rd, u32 limit)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+	u32 reg_off;
+	u32 bit_off;
+
+	if (rd)
+		reg_off = VBIF_IN_RD_LIM_CONF0;
+	else
+		reg_off = VBIF_IN_WR_LIM_CONF0;
+
+	reg_off += (xin_id / 4) * 4;
+	bit_off = (xin_id % 4) * 8;
+	reg_val = DPU_REG_READ(c, reg_off);
+	reg_val &= ~(0xFF << bit_off);
+	reg_val |= (limit) << bit_off;
+	DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+		u32 xin_id, bool rd)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+	u32 reg_off;
+	u32 bit_off;
+	u32 limit;
+
+	if (rd)
+		reg_off = VBIF_IN_RD_LIM_CONF0;
+	else
+		reg_off = VBIF_IN_WR_LIM_CONF0;
+
+	reg_off += (xin_id / 4) * 4;
+	bit_off = (xin_id % 4) * 8;
+	reg_val = DPU_REG_READ(c, reg_off);
+	limit = (reg_val >> bit_off) & 0xFF;
+
+	return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+		u32 xin_id, bool enable)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+
+	reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+	if (enable)
+		reg_val |= BIT(xin_id);
+	else
+		reg_val &= ~BIT(xin_id);
+
+	DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+		u32 xin_id)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+
+	reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+	return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+		u32 xin_id, u32 level, u32 remap_level)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+	if (!vbif)
+		return;
+
+	c = &vbif->hw;
+
+	reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+	reg_shift = (xin_id & 0x7) * 4;
+
+	reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+	reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+	mask = 0x7 << reg_shift;
+
+	reg_val &= ~mask;
+	reg_val |= (remap_level << reg_shift) & mask;
+
+	reg_val_lvl &= ~mask;
+	reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+	DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+	DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_val;
+
+	if (!vbif || xin_id >= MAX_XIN_COUNT)
+		return;
+
+	c = &vbif->hw;
+
+	reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+	reg_val |= BIT(xin_id);
+	DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+		unsigned long cap)
+{
+	ops->set_limit_conf = dpu_hw_set_limit_conf;
+	ops->get_limit_conf = dpu_hw_get_limit_conf;
+	ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+	ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+	if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+		ops->set_qos_remap = dpu_hw_set_qos_remap;
+	ops->set_mem_type = dpu_hw_set_mem_type;
+	ops->clear_errors = dpu_hw_clear_errors;
+	ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+		const struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->vbif_count; i++) {
+		if (vbif == m->vbif[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->vbif[i].base;
+			b->length = m->vbif[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_VBIF;
+			return &m->vbif[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_vbif *c;
+	const struct dpu_vbif_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _top_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	c->idx = idx;
+	c->cap = cfg;
+	_setup_vbif_ops(&c->ops, c->cap->features);
+
+	/* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+	return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+	kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 0000000..471ff67
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+	/**
+	 * set_limit_conf - set transaction limit config
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @rd: true for read limit; false for write limit
+	 * @limit: outstanding transaction limit
+	 */
+	void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, bool rd, u32 limit);
+
+	/**
+	 * get_limit_conf - get transaction limit config
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @rd: true for read limit; false for write limit
+	 * @return: outstanding transaction limit
+	 */
+	u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, bool rd);
+
+	/**
+	 * set_halt_ctrl - set halt control
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @enable: halt control enable
+	 */
+	void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, bool enable);
+
+	/**
+	 * get_halt_ctrl - get halt control
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @return: halt control enable
+	 */
+	bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+			u32 xin_id);
+
+	/**
+	 * set_qos_remap - set QoS priority remap
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @level: priority level
+	 * @remap_level: remapped level
+	 */
+	void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, u32 level, u32 remap_level);
+
+	/**
+	 * set_mem_type - set memory type
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @value: memory type value
+	 */
+	void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, u32 value);
+
+	/**
+	 * clear_errors - clear any vbif errors
+	 *	This function clears any detected pending/source errors
+	 *	on the VBIF interface, and optionally returns the detected
+	 *	error mask(s).
+	 * @vbif: vbif context driver
+	 * @pnd_errors: pointer to pending error reporting variable
+	 * @src_errors: pointer to source error reporting variable
+	 */
+	void (*clear_errors)(struct dpu_hw_vbif *vbif,
+		u32 *pnd_errors, u32 *src_errors);
+
+	/**
+	 * set_write_gather_en - set write_gather enable
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 */
+	void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+	/* base */
+	struct dpu_hw_blk_reg_map hw;
+
+	/* vbif */
+	enum dpu_vbif idx;
+	const struct dpu_vbif_cfg *cap;
+
+	/* ops */
+	struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m:    Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 0000000..5b2bc9b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL                   0x004
+#define INTR_EN                         0x010
+#define INTR_STATUS                     0x014
+#define INTR_CLEAR                      0x018
+#define INTR2_EN                        0x008
+#define INTR2_STATUS                    0x00c
+#define INTR2_CLEAR                     0x02c
+#define HIST_INTR_EN                    0x01c
+#define HIST_INTR_STATUS                0x020
+#define HIST_INTR_CLEAR                 0x024
+#define INTF_INTR_EN                    0x1C0
+#define INTF_INTR_STATUS                0x1C4
+#define INTF_INTR_CLEAR                 0x1C8
+#define SPLIT_DISPLAY_EN                0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL   0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN        0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN        0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN        0x308
+#define HW_EVENTS_CTL                   0x37C
+#define CLK_CTRL3                       0x3A8
+#define CLK_STATUS3                     0x3AC
+#define CLK_CTRL4                       0x3B0
+#define CLK_STATUS4                     0x3B4
+#define CLK_CTRL5                       0x3B8
+#define CLK_STATUS5                     0x3BC
+#define CLK_CTRL7                       0x3D0
+#define CLK_STATUS7                     0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL   0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL  0x3F4
+#define INTF_SW_RESET_MASK              0x3FC
+#define HDMI_DP_CORE_SELECT             0x408
+#define MDP_OUT_CTL_0                   0x410
+#define MDP_VSYNC_SEL                   0x414
+#define DCE_SEL                         0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
new file mode 100644
index 0000000..b557687
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -0,0 +1,203 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "dpu_io_util.h"
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+	int i;
+
+	for (i = num_clk - 1; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+}
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+		rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
+		if (rc) {
+			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	for (i--; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+
+	return rc;
+}
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		if (clk_arry[i].clk) {
+			if (clk_arry[i].type != DSS_CLK_AHB) {
+				DEV_DBG("%pS->%s: '%s' rate %ld\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name,
+					clk_arry[i].rate);
+				rc = clk_set_rate(clk_arry[i].clk,
+					clk_arry[i].rate);
+				if (rc) {
+					DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+					break;
+				}
+			}
+		} else {
+			DEV_ERR("%pS->%s: '%s' is not available\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			rc = -EPERM;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+	int i, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			DEV_DBG("%pS->%s: enable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			if (clk_arry[i].clk) {
+				rc = clk_prepare_enable(clk_arry[i].clk);
+				if (rc)
+					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+			} else {
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+				rc = -EPERM;
+			}
+
+			if (rc) {
+				msm_dss_enable_clk(&clk_arry[i],
+					i, false);
+				break;
+			}
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: disable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+
+			if (clk_arry[i].clk)
+				clk_disable_unprepare(clk_arry[i].clk);
+			else
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+		}
+	}
+
+	return rc;
+}
+
+int msm_dss_parse_clock(struct platform_device *pdev,
+			struct dss_module_power *mp)
+{
+	u32 i, rc = 0;
+	const char *clock_name;
+	int num_clk = 0;
+
+	if (!pdev || !mp)
+		return -EINVAL;
+
+	mp->num_clk = 0;
+	num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+	if (num_clk <= 0) {
+		pr_debug("clocks are not defined\n");
+		return 0;
+	}
+
+	mp->clk_config = devm_kcalloc(&pdev->dev,
+				      num_clk, sizeof(struct dss_clk),
+				      GFP_KERNEL);
+	if (!mp->clk_config)
+		return -ENOMEM;
+
+	for (i = 0; i < num_clk; i++) {
+		rc = of_property_read_string_index(pdev->dev.of_node,
+						   "clock-names", i,
+						   &clock_name);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+				i);
+			break;
+		}
+		strlcpy(mp->clk_config[i].clk_name, clock_name,
+			sizeof(mp->clk_config[i].clk_name));
+
+		mp->clk_config[i].type = DSS_CLK_AHB;
+	}
+
+	rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+		goto err;
+	}
+
+	rc = of_clk_set_defaults(pdev->dev.of_node, false);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+		goto err;
+	}
+
+	for (i = 0; i < num_clk; i++) {
+		u32 rate = clk_get_rate(mp->clk_config[i].clk);
+		if (!rate)
+			continue;
+		mp->clk_config[i].rate = rate;
+		mp->clk_config[i].type = DSS_CLK_PCLK;
+	}
+
+	mp->num_clk = num_clk;
+	return 0;
+
+err:
+	msm_dss_put_clk(mp->clk_config, num_clk);
+	return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
new file mode 100644
index 0000000..bc07381
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IO_UTIL_H__
+#define __DPU_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_gpio {
+	unsigned int gpio;
+	unsigned int value;
+	char gpio_name[32];
+};
+
+enum dss_clk_type {
+	DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+	DSS_CLK_PCLK,
+};
+
+struct dss_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+	enum dss_clk_type type;
+	unsigned long rate;
+	unsigned long max_rate;
+};
+
+struct dss_module_power {
+	unsigned int num_gpio;
+	struct dss_gpio *gpio_config;
+	unsigned int num_clk;
+	struct dss_clk *clk_config;
+};
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int msm_dss_parse_clock(struct platform_device *pdev,
+		struct dss_module_power *mp);
+#endif /* __DPU_IO_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
new file mode 100644
index 0000000..d5e6ce0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_irq.h"
+#include "dpu_core_irq.h"
+
+irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	return dpu_core_irq(dpu_kms);
+}
+
+void dpu_irq_preinstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	if (!dpu_kms->dev || !dpu_kms->dev->dev) {
+		pr_err("invalid device handles\n");
+		return;
+	}
+
+	dpu_core_irq_preinstall(dpu_kms);
+}
+
+int dpu_irq_postinstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+	int rc;
+
+	if (!kms) {
+		DPU_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	rc = dpu_core_irq_postinstall(dpu_kms);
+
+	return rc;
+}
+
+void dpu_irq_uninstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	if (!kms) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	dpu_core_irq_uninstall(dpu_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
new file mode 100644
index 0000000..3e147f7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IRQ_H__
+#define __DPU_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * dpu_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask:	enable status of MDSS level interrupt
+ * @domain:		interrupt domain of this controller
+ */
+struct dpu_irq_controller {
+	unsigned long enabled_mask;
+	struct irq_domain *domain;
+};
+
+/**
+ * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		none
+ */
+void dpu_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		0 if success; error code otherwise
+ */
+int dpu_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev:		pointer to kms context
+ * @return:		none
+ */
+void dpu_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq - MDSS level IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		interrupt handling status
+ */
+irqreturn_t dpu_irq(struct msm_kms *kms);
+
+#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 0000000..74cc204
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1345 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+
+#include "dpu_kms.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_vbif.h"
+#include "dpu_encoder.h"
+#include "dpu_plane.h"
+#include "dpu_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+static const char * const iommu_ports[] = {
+		"mdp_0",
+};
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+static unsigned long dpu_iomap_size(struct platform_device *pdev,
+				    const char *name)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	if (!res) {
+		DRM_ERROR("failed to get memory resource: %s\n", name);
+		return 0;
+	}
+
+	return resource_size(res);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+		bool danger_status)
+{
+	struct dpu_kms *kms = (struct dpu_kms *)s->private;
+	struct msm_drm_private *priv;
+	struct dpu_danger_safe_status status;
+	int i;
+
+	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+		DPU_ERROR("invalid arg(s)\n");
+		return 0;
+	}
+
+	priv = kms->dev->dev_private;
+	memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+	pm_runtime_get_sync(&kms->pdev->dev);
+	if (danger_status) {
+		seq_puts(s, "\nDanger signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	} else {
+		seq_puts(s, "\nSafe signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	}
+	pm_runtime_put_sync(&kms->pdev->dev);
+
+	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
+
+	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+		seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
+				status.sspp[i]);
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+	return _dpu_danger_signal_status(s, true);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+	return _dpu_danger_signal_status(s, false);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+
+static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
+{
+	debugfs_remove_recursive(dpu_kms->debugfs_danger);
+	dpu_kms->debugfs_danger = NULL;
+}
+
+static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent)
+{
+	dpu_kms->debugfs_danger = debugfs_create_dir("danger",
+			parent);
+	if (!dpu_kms->debugfs_danger) {
+		DPU_ERROR("failed to create danger debugfs\n");
+		return -EINVAL;
+	}
+
+	debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+			dpu_kms, &dpu_debugfs_danger_stats_fops);
+	debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+			dpu_kms, &dpu_debugfs_safe_stats_fops);
+
+	return 0;
+}
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+	struct dpu_debugfs_regset32 *regset;
+	struct dpu_kms *dpu_kms;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	void __iomem *base;
+	uint32_t i, addr;
+
+	if (!s || !s->private)
+		return 0;
+
+	regset = s->private;
+
+	dpu_kms = regset->dpu_kms;
+	if (!dpu_kms || !dpu_kms->mmio)
+		return 0;
+
+	dev = dpu_kms->dev;
+	if (!dev)
+		return 0;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return 0;
+
+	base = dpu_kms->mmio + regset->offset;
+
+	/* insert padding spaces, if needed */
+	if (regset->offset & 0xF) {
+		seq_printf(s, "[%x]", regset->offset & ~0xF);
+		for (i = 0; i < (regset->offset & 0xF); i += 4)
+			seq_puts(s, "         ");
+	}
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+	/* main register output */
+	for (i = 0; i < regset->blk_len; i += 4) {
+		addr = regset->offset + i;
+		if ((addr & 0xF) == 0x0)
+			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+		seq_printf(s, " %08x", readl_relaxed(base + i));
+	}
+	seq_puts(s, "\n");
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+	.open =		dpu_debugfs_open_regset32,
+	.read =		seq_read,
+	.llseek =	seq_lseek,
+	.release =	single_release,
+};
+
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+	if (regset) {
+		regset->offset = offset;
+		regset->blk_len = length;
+		regset->dpu_kms = dpu_kms;
+	}
+}
+
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+		void *parent, struct dpu_debugfs_regset32 *regset)
+{
+	if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+		return NULL;
+
+	/* make sure offset is a multiple of 4 */
+	regset->offset = round_down(regset->offset, 4);
+
+	return debugfs_create_file(name, mode, parent,
+			regset, &dpu_fops_regset32);
+}
+
+static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+{
+	void *p;
+	int rc;
+
+	p = dpu_hw_util_get_log_mask_ptr();
+
+	if (!dpu_kms || !p)
+		return -EINVAL;
+
+	dpu_kms->debugfs_root = debugfs_create_dir("debug",
+					   dpu_kms->dev->primary->debugfs_root);
+	if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
+		DRM_ERROR("debugfs create_dir failed %ld\n",
+			  PTR_ERR(dpu_kms->debugfs_root));
+		return PTR_ERR(dpu_kms->debugfs_root);
+	}
+
+	rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
+	if (rc) {
+		DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
+		return rc;
+	}
+
+	/* allow root to be NULL */
+	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+
+	(void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
+	(void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
+	(void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+
+	rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
+	if (rc) {
+		DPU_ERROR("failed to init perf %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+	/* don't need to NULL check debugfs_root */
+	if (dpu_kms) {
+		dpu_debugfs_vbif_destroy(dpu_kms);
+		dpu_debugfs_danger_destroy(dpu_kms);
+		dpu_debugfs_core_irq_destroy(dpu_kms);
+		debugfs_remove_recursive(dpu_kms->debugfs_root);
+	}
+}
+#else
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct dpu_kms *dpu_kms;
+	struct msm_drm_private *priv;
+	struct drm_device *dev;
+	struct drm_encoder *encoder;
+
+	if (!kms)
+		return;
+	dpu_kms = to_dpu_kms(kms);
+	dev = dpu_kms->dev;
+
+	if (!dev || !dev->dev_private)
+		return;
+	priv = dev->dev_private;
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc != NULL)
+			dpu_encoder_prepare_commit(encoder);
+}
+
+/*
+ * Override the encoder enable since we need to setup the inline rotator and do
+ * some crtc magic before enabling any bridge that might be present.
+ */
+void dpu_kms_encoder_enable(struct drm_encoder *encoder)
+{
+	const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
+	struct drm_crtc *crtc = encoder->crtc;
+
+	/* Forward this enable call to the commit hook */
+	if (funcs && funcs->commit)
+		funcs->commit(encoder);
+
+	if (crtc && crtc->state->active) {
+		trace_dpu_kms_enc_enable(DRMID(crtc));
+		dpu_crtc_commit_kickoff(crtc);
+	}
+}
+
+static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+		/* If modeset is required, kickoff is run in encoder_enable */
+		if (drm_atomic_crtc_needs_modeset(crtc_state))
+			continue;
+
+		if (crtc->state->active) {
+			trace_dpu_kms_commit(DRMID(crtc));
+			dpu_crtc_commit_kickoff(crtc);
+		}
+	}
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
+{
+	struct dpu_kms *dpu_kms;
+	struct msm_drm_private *priv;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	if (!kms || !old_state)
+		return;
+	dpu_kms = to_dpu_kms(kms);
+
+	if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
+		return;
+	priv = dpu_kms->dev->dev_private;
+
+	DPU_ATRACE_BEGIN("kms_complete_commit");
+
+	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+		dpu_crtc_complete_commit(crtc, old_crtc_state);
+
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+		struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev;
+	int ret;
+
+	if (!kms || !crtc || !crtc->state) {
+		DPU_ERROR("invalid params\n");
+		return;
+	}
+
+	dev = crtc->dev;
+
+	if (!crtc->state->enable) {
+		DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+		return;
+	}
+
+	if (!crtc->state->active) {
+		DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+		return;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		/*
+		 * Wait for post-flush if necessary to delay before
+		 * plane_cleanup. For example, wait for vsync in case of video
+		 * mode panels. This may be a no-op for command mode panels.
+		 */
+		trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+		ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+		if (ret && ret != -EWOULDBLOCK) {
+			DPU_ERROR("wait for commit done returned %d\n", ret);
+			break;
+		}
+	}
+}
+
+static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+				    struct msm_drm_private *priv,
+				    struct dpu_kms *dpu_kms)
+{
+	struct drm_encoder *encoder = NULL;
+	int i, rc;
+
+	/*TODO: Support two independent DSI connectors */
+	encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
+	if (IS_ERR_OR_NULL(encoder)) {
+		DPU_ERROR("encoder init failed for dsi display\n");
+		return;
+	}
+
+	priv->encoders[priv->num_encoders++] = encoder;
+
+	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+		if (!priv->dsi[i]) {
+			DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
+			return;
+		}
+
+		rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+		if (rc) {
+			DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+				i, rc);
+			continue;
+		}
+	}
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ *                           for underlying displays
+ * @dev:        Pointer to drm device structure
+ * @priv:       Pointer to private drm device data
+ * @dpu_kms:    Pointer to dpu kms structure
+ * Returns:     Zero on success
+ */
+static void _dpu_kms_setup_displays(struct drm_device *dev,
+				    struct msm_drm_private *priv,
+				    struct dpu_kms *dpu_kms)
+{
+	_dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+
+	/**
+	 * Extend this function to initialize other
+	 * types of displays
+	 */
+}
+
+static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	} else if (!dpu_kms->dev) {
+		DPU_ERROR("invalid dev\n");
+		return;
+	} else if (!dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid dev_private\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+	priv->num_crtcs = 0;
+
+	for (i = 0; i < priv->num_planes; i++)
+		priv->planes[i]->funcs->destroy(priv->planes[i]);
+	priv->num_planes = 0;
+
+	for (i = 0; i < priv->num_connectors; i++)
+		priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+	priv->num_connectors = 0;
+
+	for (i = 0; i < priv->num_encoders; i++)
+		priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+	priv->num_encoders = 0;
+}
+
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+	struct drm_device *dev;
+	struct drm_plane *primary_planes[MAX_PLANES], *plane;
+	struct drm_crtc *crtc;
+
+	struct msm_drm_private *priv;
+	struct dpu_mdss_cfg *catalog;
+
+	int primary_planes_idx = 0, i, ret;
+	int max_crtc_count;
+
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return -EINVAL;
+	}
+
+	dev = dpu_kms->dev;
+	priv = dev->dev_private;
+	catalog = dpu_kms->catalog;
+
+	/*
+	 * Create encoder and query display drivers to create
+	 * bridges and connectors
+	 */
+	_dpu_kms_setup_displays(dev, priv, dpu_kms);
+
+	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+	/* Create the planes */
+	for (i = 0; i < catalog->sspp_count; i++) {
+		bool primary = true;
+
+		if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
+			|| primary_planes_idx >= max_crtc_count)
+			primary = false;
+
+		plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
+				(1UL << max_crtc_count) - 1, 0);
+		if (IS_ERR(plane)) {
+			DPU_ERROR("dpu_plane_init failed\n");
+			ret = PTR_ERR(plane);
+			goto fail;
+		}
+		priv->planes[priv->num_planes++] = plane;
+
+		if (primary)
+			primary_planes[primary_planes_idx++] = plane;
+	}
+
+	max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+	/* Create one CRTC per encoder */
+	for (i = 0; i < max_crtc_count; i++) {
+		crtc = dpu_crtc_init(dev, primary_planes[i]);
+		if (IS_ERR(crtc)) {
+			ret = PTR_ERR(crtc);
+			goto fail;
+		}
+		priv->crtcs[priv->num_crtcs++] = crtc;
+	}
+
+	/* All CRTCs are compatible with all encoders */
+	for (i = 0; i < priv->num_encoders; i++)
+		priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+	return 0;
+fail:
+	_dpu_kms_drm_obj_destroy(dpu_kms);
+	return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+	struct drm_device *dev;
+	int rc;
+
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return -EINVAL;
+	}
+
+	dev = dpu_kms->dev;
+
+	rc = _dpu_debugfs_init(dpu_kms);
+	if (rc)
+		DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
+
+	return rc;
+}
+#endif
+
+static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+		struct drm_encoder *encoder)
+{
+	return rate;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+	struct drm_device *dev;
+	int i;
+
+	dev = dpu_kms->dev;
+	if (!dev)
+		return;
+
+	if (dpu_kms->hw_intr)
+		dpu_hw_intr_destroy(dpu_kms->hw_intr);
+	dpu_kms->hw_intr = NULL;
+
+	if (dpu_kms->power_event)
+		dpu_power_handle_unregister_event(
+				&dpu_kms->phandle, dpu_kms->power_event);
+
+	/* safe to call these more than once during shutdown */
+	_dpu_debugfs_destroy(dpu_kms);
+	_dpu_kms_mmu_destroy(dpu_kms);
+
+	if (dpu_kms->catalog) {
+		for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+			u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+			if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+				dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+		}
+	}
+
+	if (dpu_kms->rm_init)
+		dpu_rm_destroy(&dpu_kms->rm);
+	dpu_kms->rm_init = false;
+
+	if (dpu_kms->catalog)
+		dpu_hw_catalog_deinit(dpu_kms->catalog);
+	dpu_kms->catalog = NULL;
+
+	if (dpu_kms->core_client)
+		dpu_power_client_destroy(&dpu_kms->phandle,
+			dpu_kms->core_client);
+	dpu_kms->core_client = NULL;
+
+	if (dpu_kms->vbif[VBIF_NRT])
+		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+	dpu_kms->vbif[VBIF_NRT] = NULL;
+
+	if (dpu_kms->vbif[VBIF_RT])
+		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+	dpu_kms->vbif[VBIF_RT] = NULL;
+
+	if (dpu_kms->mmio)
+		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+	dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms;
+
+	if (!kms) {
+		DPU_ERROR("invalid kms\n");
+		return;
+	}
+
+	dpu_kms = to_dpu_kms(kms);
+
+	dpu_dbg_destroy();
+	_dpu_kms_hw_destroy(dpu_kms);
+}
+
+static int dpu_kms_pm_suspend(struct device *dev)
+{
+	struct drm_device *ddev;
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_atomic_state *state;
+	struct dpu_kms *dpu_kms;
+	int ret = 0, num_crtcs = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev_to_msm_kms(ddev))
+		return -EINVAL;
+
+	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+	/* disable hot-plug polling */
+	drm_kms_helper_poll_disable(ddev);
+
+	/* acquire modeset lock(s) */
+	drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+	DPU_ATRACE_BEGIN("kms_pm_suspend");
+
+	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+	if (ret)
+		goto unlock;
+
+	/* save current state for resume */
+	if (dpu_kms->suspend_state)
+		drm_atomic_state_put(dpu_kms->suspend_state);
+	dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+	if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
+		DRM_ERROR("failed to back up suspend state\n");
+		dpu_kms->suspend_state = NULL;
+		goto unlock;
+	}
+
+	/* create atomic state to disable all CRTCs */
+	state = drm_atomic_state_alloc(ddev);
+	if (IS_ERR_OR_NULL(state)) {
+		DRM_ERROR("failed to allocate crtc disable state\n");
+		goto unlock;
+	}
+
+	state->acquire_ctx = &ctx;
+
+	/* check for nothing to do */
+	if (num_crtcs == 0) {
+		DRM_DEBUG("all crtcs are already in the off state\n");
+		drm_atomic_state_put(state);
+		goto suspended;
+	}
+
+	/* commit the "disable all" state */
+	ret = drm_atomic_commit(state);
+	if (ret < 0) {
+		DRM_ERROR("failed to disable crtcs, %d\n", ret);
+		drm_atomic_state_put(state);
+		goto unlock;
+	}
+
+suspended:
+	dpu_kms->suspend_block = true;
+
+unlock:
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	}
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+	DPU_ATRACE_END("kms_pm_suspend");
+	return 0;
+}
+
+static int dpu_kms_pm_resume(struct device *dev)
+{
+	struct drm_device *ddev;
+	struct dpu_kms *dpu_kms;
+	int ret;
+
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev_to_msm_kms(ddev))
+		return -EINVAL;
+
+	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+	DPU_ATRACE_BEGIN("kms_pm_resume");
+
+	drm_mode_config_reset(ddev);
+
+	drm_modeset_lock_all(ddev);
+
+	dpu_kms->suspend_block = false;
+
+	if (dpu_kms->suspend_state) {
+		dpu_kms->suspend_state->acquire_ctx =
+			ddev->mode_config.acquire_ctx;
+		ret = drm_atomic_commit(dpu_kms->suspend_state);
+		if (ret < 0) {
+			DRM_ERROR("failed to restore state, %d\n", ret);
+			drm_atomic_state_put(dpu_kms->suspend_state);
+		}
+		dpu_kms->suspend_state = NULL;
+	}
+	drm_modeset_unlock_all(ddev);
+
+	/* enable hot-plug polling */
+	drm_kms_helper_poll_enable(ddev);
+
+	DPU_ATRACE_END("kms_pm_resume");
+	return 0;
+}
+
+static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
+				 struct drm_encoder *encoder,
+				 bool cmd_mode)
+{
+	struct msm_display_info info;
+	struct msm_drm_private *priv = encoder->dev->dev_private;
+	int i, rc = 0;
+
+	memset(&info, 0, sizeof(info));
+
+	info.intf_type = encoder->encoder_type;
+	info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
+			MSM_DISPLAY_CAP_VID_MODE;
+
+	/* TODO: No support for DSI swap */
+	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+		if (priv->dsi[i]) {
+			info.h_tile_instance[info.num_of_h_tiles] = i;
+			info.num_of_h_tiles++;
+		}
+	}
+
+	rc = dpu_encoder_setup(encoder->dev, encoder, &info);
+	if (rc)
+		DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+			encoder->base.id, rc);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+	.hw_init         = dpu_kms_hw_init,
+	.irq_preinstall  = dpu_irq_preinstall,
+	.irq_postinstall = dpu_irq_postinstall,
+	.irq_uninstall   = dpu_irq_uninstall,
+	.irq             = dpu_irq,
+	.prepare_commit  = dpu_kms_prepare_commit,
+	.commit          = dpu_kms_commit,
+	.complete_commit = dpu_kms_complete_commit,
+	.wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
+	.enable_vblank   = dpu_kms_enable_vblank,
+	.disable_vblank  = dpu_kms_disable_vblank,
+	.check_modified_format = dpu_format_check_modified_format,
+	.get_format      = dpu_get_msm_format,
+	.round_pixclk    = dpu_kms_round_pixclk,
+	.pm_suspend      = dpu_kms_pm_suspend,
+	.pm_resume       = dpu_kms_pm_resume,
+	.destroy         = dpu_kms_destroy,
+	.set_encoder_mode = _dpu_kms_set_encoder_mode,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init    = dpu_kms_debugfs_init,
+#endif
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
+{
+	dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+}
+
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+	struct msm_mmu *mmu;
+
+	mmu = dpu_kms->base.aspace->mmu;
+
+	mmu->funcs->detach(mmu, (const char **)iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+	msm_gem_address_space_put(dpu_kms->base.aspace);
+
+	return 0;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+	struct iommu_domain *domain;
+	struct msm_gem_address_space *aspace;
+	int ret;
+
+	domain = iommu_domain_alloc(&platform_bus_type);
+	if (!domain)
+		return 0;
+
+	aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
+			domain, "dpu1");
+	if (IS_ERR(aspace)) {
+		ret = PTR_ERR(aspace);
+		goto fail;
+	}
+
+	dpu_kms->base.aspace = aspace;
+
+	ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+	if (ret) {
+		DPU_ERROR("failed to attach iommu %d\n", ret);
+		msm_gem_address_space_put(aspace);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	_dpu_kms_mmu_destroy(dpu_kms);
+
+	return ret;
+}
+
+static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+		char *clock_name)
+{
+	struct dss_module_power *mp = &dpu_kms->mp;
+	int i;
+
+	for (i = 0; i < mp->num_clk; i++) {
+		if (!strcmp(mp->clk_config[i].clk_name, clock_name))
+			return &mp->clk_config[i];
+	}
+
+	return NULL;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+	struct dss_clk *clk;
+
+	clk = _dpu_kms_get_clk(dpu_kms, clock_name);
+	if (!clk)
+		return -EINVAL;
+
+	return clk_get_rate(clk->clk);
+}
+
+static void dpu_kms_handle_power_event(u32 event_type, void *usr)
+{
+	struct dpu_kms *dpu_kms = usr;
+
+	if (!dpu_kms)
+		return;
+
+	if (event_type == DPU_POWER_EVENT_POST_ENABLE)
+		dpu_vbif_init_memtypes(dpu_kms);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	int i, rc = -EINVAL;
+
+	if (!kms) {
+		DPU_ERROR("invalid kms\n");
+		goto end;
+	}
+
+	dpu_kms = to_dpu_kms(kms);
+	dev = dpu_kms->dev;
+	if (!dev) {
+		DPU_ERROR("invalid device\n");
+		goto end;
+	}
+
+	rc = dpu_dbg_init(&dpu_kms->pdev->dev);
+	if (rc) {
+		DRM_ERROR("failed to init dpu dbg: %d\n", rc);
+		goto end;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		DPU_ERROR("invalid private data\n");
+		goto dbg_destroy;
+	}
+
+	dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
+	if (IS_ERR(dpu_kms->mmio)) {
+		rc = PTR_ERR(dpu_kms->mmio);
+		DPU_ERROR("mdp register memory map failed: %d\n", rc);
+		dpu_kms->mmio = NULL;
+		goto error;
+	}
+	DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+	dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
+
+	dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
+	if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+		rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+		DPU_ERROR("vbif register memory map failed: %d\n", rc);
+		dpu_kms->vbif[VBIF_RT] = NULL;
+		goto error;
+	}
+	dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
+	dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+	if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+		dpu_kms->vbif[VBIF_NRT] = NULL;
+		DPU_DEBUG("VBIF NRT is not defined");
+	} else {
+		dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
+							     "vbif_nrt");
+	}
+
+	dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+	if (IS_ERR(dpu_kms->reg_dma)) {
+		dpu_kms->reg_dma = NULL;
+		DPU_DEBUG("REG_DMA is not defined");
+	} else {
+		dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
+	}
+
+	dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
+					"core");
+	if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
+		rc = PTR_ERR(dpu_kms->core_client);
+		if (!dpu_kms->core_client)
+			rc = -EINVAL;
+		DPU_ERROR("dpu power client create failed: %d\n", rc);
+		dpu_kms->core_client = NULL;
+		goto error;
+	}
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+	_dpu_kms_core_hw_rev_init(dpu_kms);
+
+	pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+	dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+	if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+		rc = PTR_ERR(dpu_kms->catalog);
+		if (!dpu_kms->catalog)
+			rc = -EINVAL;
+		DPU_ERROR("catalog init failed: %d\n", rc);
+		dpu_kms->catalog = NULL;
+		goto power_error;
+	}
+
+	dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
+
+	/*
+	 * Now we need to read the HW catalog and initialize resources such as
+	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+	 */
+	rc = _dpu_kms_mmu_init(dpu_kms);
+	if (rc) {
+		DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
+			dpu_kms->dev);
+	if (rc) {
+		DPU_ERROR("rm init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	dpu_kms->rm_init = true;
+
+	dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
+	if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+		rc = PTR_ERR(dpu_kms->hw_mdp);
+		if (!dpu_kms->hw_mdp)
+			rc = -EINVAL;
+		DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+		dpu_kms->hw_mdp = NULL;
+		goto power_error;
+	}
+
+	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+		u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+		dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+				dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+		if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+			rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+			if (!dpu_kms->hw_vbif[vbif_idx])
+				rc = -EINVAL;
+			DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+			dpu_kms->hw_vbif[vbif_idx] = NULL;
+			goto power_error;
+		}
+	}
+
+	rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+			&dpu_kms->phandle,
+			_dpu_kms_get_clk(dpu_kms, "core"));
+	if (rc) {
+		DPU_ERROR("failed to init perf %d\n", rc);
+		goto perf_err;
+	}
+
+	dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+	if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+		rc = PTR_ERR(dpu_kms->hw_intr);
+		DPU_ERROR("hw_intr init failed: %d\n", rc);
+		dpu_kms->hw_intr = NULL;
+		goto hw_intr_init_err;
+	}
+
+	/*
+	 * _dpu_kms_drm_obj_init should create the DRM related objects
+	 * i.e. CRTCs, planes, encoders, connectors and so forth
+	 */
+	rc = _dpu_kms_drm_obj_init(dpu_kms);
+	if (rc) {
+		DPU_ERROR("modeset init failed: %d\n", rc);
+		goto drm_obj_init_err;
+	}
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	/*
+	 * max crtc width is equal to the max mixer width * 2 and max height is
+	 * is 4K
+	 */
+	dev->mode_config.max_width =
+			dpu_kms->catalog->caps->max_mixer_width * 2;
+	dev->mode_config.max_height = 4096;
+
+	/*
+	 * Support format modifiers for compression etc.
+	 */
+	dev->mode_config.allow_fb_modifiers = true;
+
+	/*
+	 * Handle (re)initializations during power enable
+	 */
+	dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+	dpu_kms->power_event = dpu_power_handle_register_event(
+			&dpu_kms->phandle,
+			DPU_POWER_EVENT_POST_ENABLE,
+			dpu_kms_handle_power_event, dpu_kms, "kms");
+
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	return 0;
+
+drm_obj_init_err:
+	dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+	_dpu_kms_hw_destroy(dpu_kms);
+dbg_destroy:
+	dpu_dbg_destroy();
+end:
+	return rc;
+}
+
+struct msm_kms *dpu_kms_init(struct drm_device *dev)
+{
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	int irq;
+
+	if (!dev || !dev->dev_private) {
+		DPU_ERROR("drm device node invalid\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	priv = dev->dev_private;
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+	if (irq < 0) {
+		DPU_ERROR("failed to get irq: %d\n", irq);
+		return ERR_PTR(irq);
+	}
+	dpu_kms->base.irq = irq;
+
+	return &dpu_kms->base;
+}
+
+static int dpu_bind(struct device *dev, struct device *master, void *data)
+{
+	struct drm_device *ddev = dev_get_drvdata(master);
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_drm_private *priv = ddev->dev_private;
+	struct dpu_kms *dpu_kms;
+	struct dss_module_power *mp;
+	int ret = 0;
+
+	dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+	if (!dpu_kms)
+		return -ENOMEM;
+
+	mp = &dpu_kms->mp;
+	ret = msm_dss_parse_clock(pdev, mp);
+	if (ret) {
+		DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+		return ret;
+	}
+
+	dpu_power_resource_init(pdev, &dpu_kms->phandle);
+
+	platform_set_drvdata(pdev, dpu_kms);
+
+	msm_kms_init(&dpu_kms->base, &kms_funcs);
+	dpu_kms->dev = ddev;
+	dpu_kms->pdev = pdev;
+
+	pm_runtime_enable(&pdev->dev);
+	dpu_kms->rpm_enabled = true;
+
+	priv->kms = &dpu_kms->base;
+	return ret;
+}
+
+static void dpu_unbind(struct device *dev, struct device *master, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct dss_module_power *mp = &dpu_kms->mp;
+
+	dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+	devm_kfree(&pdev->dev, mp->clk_config);
+	mp->num_clk = 0;
+
+	if (dpu_kms->rpm_enabled)
+		pm_runtime_disable(&pdev->dev);
+}
+
+static const struct component_ops dpu_ops = {
+	.bind   = dpu_bind,
+	.unbind = dpu_unbind,
+};
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &dpu_ops);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &dpu_ops);
+	return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+	int rc = -1;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct drm_device *ddev;
+	struct dss_module_power *mp = &dpu_kms->mp;
+
+	ddev = dpu_kms->dev;
+	if (!ddev) {
+		DPU_ERROR("invalid drm_device\n");
+		goto exit;
+	}
+
+	rc = dpu_power_resource_enable(&dpu_kms->phandle,
+			dpu_kms->core_client, false);
+	if (rc)
+		DPU_ERROR("resource disable failed: %d\n", rc);
+
+	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+	if (rc)
+		DPU_ERROR("clock disable failed rc:%d\n", rc);
+
+exit:
+	return rc;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+	int rc = -1;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct drm_device *ddev;
+	struct dss_module_power *mp = &dpu_kms->mp;
+
+	ddev = dpu_kms->dev;
+	if (!ddev) {
+		DPU_ERROR("invalid drm_device\n");
+		goto exit;
+	}
+
+	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+	if (rc) {
+		DPU_ERROR("clock enable failed rc:%d\n", rc);
+		goto exit;
+	}
+
+	rc = dpu_power_resource_enable(&dpu_kms->phandle,
+			dpu_kms->core_client, true);
+	if (rc)
+		DPU_ERROR("resource enable failed: %d\n", rc);
+
+exit:
+	return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+	SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+	{ .compatible = "qcom,sdm845-dpu", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+	.probe = dpu_dev_probe,
+	.remove = dpu_dev_remove,
+	.driver = {
+		.name = "msm_dpu",
+		.of_match_table = dpu_dt_match,
+		.pm = &dpu_pm_ops,
+	},
+};
+
+void __init msm_dpu_register(void)
+{
+	platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+	platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 0000000..66d4666
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_dbg.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_power_handle.h"
+#include "dpu_irq.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...)                                                \
+	do {                                                               \
+		if (unlikely(drm_debug & DRM_UT_KMS))                      \
+			DRM_DEBUG(fmt, ##__VA_ARGS__); \
+		else                                                       \
+			pr_debug(fmt, ##__VA_ARGS__);                      \
+	} while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...)                                         \
+	do {                                                               \
+		if (unlikely(drm_debug & DRM_UT_DRIVER))                   \
+			DRM_ERROR(fmt, ##__VA_ARGS__); \
+		else                                                       \
+			pr_debug(fmt, ##__VA_ARGS__);                      \
+	} while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ *	This macro is similar to the standard ktime_compare() function, but
+ *	attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+	ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE  12
+
+/* timeout in frames waiting for frame done */
+#define DPU_FRAME_DONE_TIMEOUT	60
+
+/*
+ * struct dpu_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct dpu_irq_callback {
+	struct list_head list;
+	void (*func)(void *arg, int irq_idx);
+	void *arg;
+};
+
+/**
+ * struct dpu_irq: IRQ structure contains callback registration info
+ * @total_irq:    total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl:   array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock:      callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct dpu_irq {
+	u32 total_irqs;
+	struct list_head *irq_cb_tbl;
+	atomic_t *enable_counts;
+	atomic_t *irq_counts;
+	spinlock_t cb_lock;
+	struct dentry *debugfs_file;
+};
+
+struct dpu_kms {
+	struct msm_kms base;
+	struct drm_device *dev;
+	int core_rev;
+	struct dpu_mdss_cfg *catalog;
+
+	struct dpu_power_handle phandle;
+	struct dpu_power_client *core_client;
+	struct dpu_power_event *power_event;
+
+	/* directory entry for debugfs */
+	struct dentry *debugfs_root;
+	struct dentry *debugfs_danger;
+	struct dentry *debugfs_vbif;
+
+	/* io/register spaces: */
+	void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+	unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
+
+	struct regulator *vdd;
+	struct regulator *mmagic;
+	struct regulator *venus;
+
+	struct dpu_hw_intr *hw_intr;
+	struct dpu_irq irq_obj;
+
+	struct dpu_core_perf perf;
+
+	/* saved atomic state during system suspend */
+	struct drm_atomic_state *suspend_state;
+	bool suspend_block;
+
+	struct dpu_rm rm;
+	bool rm_init;
+
+	struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+	struct dpu_hw_mdp *hw_mdp;
+
+	bool has_danger_ctrl;
+
+	struct platform_device *pdev;
+	bool rpm_enabled;
+	struct dss_module_power mp;
+};
+
+struct vsync_info {
+	u32 frame_count;
+	u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+		((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
+/**
+ * dpu_kms_is_suspend_state - whether or not the system is pm suspended
+ * @dev: Pointer to drm device
+ * Return: Suspend status
+ */
+static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
+{
+	if (!ddev_to_msm_kms(dev))
+		return false;
+
+	return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
+}
+
+/**
+ * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
+ *				suspend status
+ * @dev: Pointer to drm device
+ * Return: True if commits should be rejected due to pm suspend
+ */
+static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
+{
+	if (!dpu_kms_is_suspend_state(dev))
+		return false;
+
+	return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
+}
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
+ */
+struct dpu_debugfs_regset32 {
+	uint32_t offset;
+	uint32_t blk_len;
+	struct dpu_kms *dpu_kms;
+};
+
+/**
+ * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize dpu_debugfs_regset32 structures for use
+ * with dpu_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * dpu_debugfs_setup_regset32 to initialize it.
+ *
+ * @name:   File name within debugfs
+ * @mode:   File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ *         or debugfs_remove_recursive() (on a parent directory) to remove the
+ *         file
+ */
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+		void *parent, struct dpu_debugfs_regset32 *regset);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE	4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void dpu_kms_encoder_enable(struct drm_encoder *encoder);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms:  poiner to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
new file mode 100644
index 0000000..9e533b8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -0,0 +1,245 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include "dpu_kms.h"
+
+#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+
+#define HW_INTR_STATUS			0x0010
+
+struct dpu_mdss {
+	struct msm_mdss base;
+	void __iomem *mmio;
+	unsigned long mmio_len;
+	u32 hwversion;
+	struct dss_module_power mp;
+	struct dpu_irq_controller irq_controller;
+};
+
+static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+{
+	struct dpu_mdss *dpu_mdss = arg;
+	u32 interrupts;
+
+	interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
+
+	while (interrupts) {
+		irq_hw_number_t hwirq = fls(interrupts) - 1;
+		unsigned int mapping;
+		int rc;
+
+		mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
+					   hwirq);
+		if (mapping == 0) {
+			DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
+			return IRQ_NONE;
+		}
+
+		rc = generic_handle_irq(mapping);
+		if (rc < 0) {
+			DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
+				  hwirq, mapping, rc);
+			return IRQ_NONE;
+		}
+
+		interrupts &= ~(1 << hwirq);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void dpu_mdss_irq_mask(struct irq_data *irqd)
+{
+	struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static void dpu_mdss_irq_unmask(struct irq_data *irqd)
+{
+	struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static struct irq_chip dpu_mdss_irq_chip = {
+	.name = "dpu_mdss",
+	.irq_mask = dpu_mdss_irq_mask,
+	.irq_unmask = dpu_mdss_irq_unmask,
+};
+
+static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
+		unsigned int irq, irq_hw_number_t hwirq)
+{
+	struct dpu_mdss *dpu_mdss = domain->host_data;
+	int ret;
+
+	irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
+	ret = irq_set_chip_data(irq, dpu_mdss);
+
+	return ret;
+}
+
+static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
+	.map = dpu_mdss_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
+{
+	struct device *dev;
+	struct irq_domain *domain;
+
+	dev = dpu_mdss->base.dev->dev;
+
+	domain = irq_domain_add_linear(dev->of_node, 32,
+			&dpu_mdss_irqdomain_ops, dpu_mdss);
+	if (!domain) {
+		DPU_ERROR("failed to add irq_domain\n");
+		return -EINVAL;
+	}
+
+	dpu_mdss->irq_controller.enabled_mask = 0;
+	dpu_mdss->irq_controller.domain = domain;
+
+	return 0;
+}
+
+static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+{
+	if (dpu_mdss->irq_controller.domain) {
+		irq_domain_remove(dpu_mdss->irq_controller.domain);
+		dpu_mdss->irq_controller.domain = NULL;
+	}
+	return 0;
+}
+static int dpu_mdss_enable(struct msm_mdss *mdss)
+{
+	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+	struct dss_module_power *mp = &dpu_mdss->mp;
+	int ret;
+
+	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+	if (ret)
+		DPU_ERROR("clock enable failed, ret:%d\n", ret);
+
+	return ret;
+}
+
+static int dpu_mdss_disable(struct msm_mdss *mdss)
+{
+	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+	struct dss_module_power *mp = &dpu_mdss->mp;
+	int ret;
+
+	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+	if (ret)
+		DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+	return ret;
+}
+
+static void dpu_mdss_destroy(struct drm_device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev->dev);
+	struct msm_drm_private *priv = dev->dev_private;
+	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+	struct dss_module_power *mp = &dpu_mdss->mp;
+
+	_dpu_mdss_irq_domain_fini(dpu_mdss);
+
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+	devm_kfree(&pdev->dev, mp->clk_config);
+
+	if (dpu_mdss->mmio)
+		devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+	dpu_mdss->mmio = NULL;
+
+	pm_runtime_disable(dev->dev);
+	priv->mdss = NULL;
+}
+
+static const struct msm_mdss_funcs mdss_funcs = {
+	.enable	= dpu_mdss_enable,
+	.disable = dpu_mdss_disable,
+	.destroy = dpu_mdss_destroy,
+};
+
+int dpu_mdss_init(struct drm_device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev->dev);
+	struct msm_drm_private *priv = dev->dev_private;
+	struct resource *res;
+	struct dpu_mdss *dpu_mdss;
+	struct dss_module_power *mp;
+	int ret = 0;
+
+	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+	if (!dpu_mdss)
+		return -ENOMEM;
+
+	dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
+	if (IS_ERR(dpu_mdss->mmio))
+		return PTR_ERR(dpu_mdss->mmio);
+
+	DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
+	if (!res) {
+		DRM_ERROR("failed to get memory resource for mdss\n");
+		return -ENOMEM;
+	}
+	dpu_mdss->mmio_len = resource_size(res);
+
+	mp = &dpu_mdss->mp;
+	ret = msm_dss_parse_clock(pdev, mp);
+	if (ret) {
+		DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+		goto clk_parse_err;
+	}
+
+	dpu_mdss->base.dev = dev;
+	dpu_mdss->base.funcs = &mdss_funcs;
+
+	ret = _dpu_mdss_irq_domain_add(dpu_mdss);
+	if (ret)
+		goto irq_domain_error;
+
+	ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+			dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
+	if (ret) {
+		DPU_ERROR("failed to init irq: %d\n", ret);
+		goto irq_error;
+	}
+
+	pm_runtime_enable(dev->dev);
+
+	pm_runtime_get_sync(dev->dev);
+	dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
+	pm_runtime_put_sync(dev->dev);
+
+	priv->mdss = &dpu_mdss->base;
+
+	return ret;
+
+irq_error:
+	_dpu_mdss_irq_domain_fini(dpu_mdss);
+irq_domain_error:
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_parse_err:
+	devm_kfree(&pdev->dev, mp->clk_config);
+	if (dpu_mdss->mmio)
+		devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+	dpu_mdss->mmio = NULL;
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 0000000..4ac2b0c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1971 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT	21
+#define PHASE_STEP_UNIT_SCALE   ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL		15
+
+#define SHARP_STRENGTH_DEFAULT	32
+#define SHARP_EDGE_THR_DEFAULT	112
+#define SHARP_SMOOTH_THR_DEFAULT	8
+#define SHARP_NOISE_THR_DEFAULT	2
+
+#define DPU_NAME_SIZE  12
+
+#define DPU_PLANE_COLOR_FILL_FLAG	BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+	R0,
+	R1,
+	R_MAX
+};
+
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+#define DEFAULT_REFRESH_RATE	60
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ *	this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+	DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+	DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+	DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+	struct drm_plane base;
+
+	struct mutex lock;
+
+	enum dpu_sspp pipe;
+	uint32_t features;      /* capabilities from catalog */
+	uint32_t nformats;
+	uint32_t formats[64];
+
+	struct dpu_hw_pipe *pipe_hw;
+	struct dpu_hw_pipe_cfg pipe_cfg;
+	struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+	uint32_t color_fill;
+	bool is_error;
+	bool is_rt_pipe;
+	bool is_virtual;
+	struct list_head mplane_list;
+	struct dpu_mdss_cfg *catalog;
+
+	struct dpu_csc_cfg *csc_ptr;
+
+	const struct dpu_sspp_sub_blks *pipe_sblk;
+	char pipe_name[DPU_NAME_SIZE];
+
+	/* debugfs related stuff */
+	struct dentry *debugfs_root;
+	struct dpu_debugfs_regset32 debugfs_src;
+	struct dpu_debugfs_regset32 debugfs_scaler;
+	struct dpu_debugfs_regset32 debugfs_csc;
+	bool debugfs_default_scale;
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+	struct msm_drm_private *priv;
+
+	if (!plane || !plane->dev)
+		return NULL;
+	priv = plane->dev->dev_private;
+	if (!priv)
+		return NULL;
+	return to_dpu_kms(priv->kms);
+}
+
+static bool dpu_plane_enabled(struct drm_plane_state *state)
+{
+	return state && state->fb && state->crtc;
+}
+
+static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
+{
+	return state && state->crtc;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane:		Pointer to drm plane
+ * @fmt:		Pointer to source buffer format
+ * @src_wdith:		width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+		const struct dpu_format *fmt, u32 src_width)
+{
+	struct dpu_plane *pdpu, *tmp;
+	struct dpu_plane_state *pstate;
+	u32 fixed_buff_size;
+	u32 total_fl;
+
+	if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+		DPU_ERROR("invalid arguments\n");
+		return 0;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pstate = to_dpu_plane_state(plane->state);
+	fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+
+	list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+		if (!dpu_plane_enabled(tmp->base.state))
+			continue;
+		DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
+				pdpu->base.base.id, tmp->base.base.id,
+				src_width,
+				drm_rect_width(&tmp->pipe_cfg.src_rect));
+		src_width = max_t(u32, src_width,
+				  drm_rect_width(&tmp->pipe_cfg.src_rect));
+	}
+
+	if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+		if (fmt->chroma_sample == DPU_CHROMA_420) {
+			/* NV12 */
+			total_fl = (fixed_buff_size / 2) /
+				((src_width + 32) * fmt->bpp);
+		} else {
+			/* non NV12 */
+			total_fl = (fixed_buff_size / 2) * 2 /
+				((src_width + 32) * fmt->bpp);
+		}
+	} else {
+		if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+			total_fl = (fixed_buff_size / 2) * 2 /
+				((src_width + 32) * fmt->bpp);
+		} else {
+			total_fl = (fixed_buff_size) * 2 /
+				((src_width + 32) * fmt->bpp);
+		}
+	}
+
+	DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+			plane->base.id, pdpu->pipe - SSPP_VIG0,
+			(char *)&fmt->base.pixel_format,
+			src_width, total_fl);
+
+	return total_fl;
+}
+
+/**
+ * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl:		Pointer to LUT table
+ * @total_fl:		fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+		u32 total_fl)
+{
+	int i;
+
+	if (!tbl || !tbl->nentry || !tbl->entries)
+		return 0;
+
+	for (i = 0; i < tbl->nentry; i++)
+		if (total_fl <= tbl->entries[i].fl)
+			return tbl->entries[i].lut;
+
+	/* if last fl is zero, use as default */
+	if (!tbl->entries[i-1].fl)
+		return tbl->entries[i-1].lut;
+
+	return 0;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane:		Pointer to drm plane
+ * @fb:			Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+		struct drm_framebuffer *fb)
+{
+	struct dpu_plane *pdpu;
+	const struct dpu_format *fmt = NULL;
+	u64 qos_lut;
+	u32 total_fl = 0, lut_usage;
+
+	if (!plane || !fb) {
+		DPU_ERROR("invalid arguments plane %d fb %d\n",
+				plane != 0, fb != 0);
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	} else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
+		return;
+	}
+
+	if (!pdpu->is_rt_pipe) {
+		lut_usage = DPU_QOS_LUT_USAGE_NRT;
+	} else {
+		fmt = dpu_get_dpu_format_ext(
+				fb->format->format,
+				fb->modifier);
+		total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+				drm_rect_width(&pdpu->pipe_cfg.src_rect));
+
+		if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+			lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+		else
+			lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+	}
+
+	qos_lut = _dpu_plane_get_qos_lut(
+			&pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
+	pdpu->pipe_qos_cfg.creq_lut = qos_lut;
+
+	trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+			(fmt) ? fmt->base.pixel_format : 0,
+			pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+	DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+			plane->base.id,
+			pdpu->pipe - SSPP_VIG0,
+			fmt ? (char *)&fmt->base.pixel_format : NULL,
+			pdpu->is_rt_pipe, total_fl, qos_lut);
+
+	pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane:		Pointer to drm plane
+ * @fb:			Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+		struct drm_framebuffer *fb)
+{
+	struct dpu_plane *pdpu;
+	const struct dpu_format *fmt = NULL;
+	u32 danger_lut, safe_lut;
+
+	if (!plane || !fb) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	} else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
+		return;
+	}
+
+	if (!pdpu->is_rt_pipe) {
+		danger_lut = pdpu->catalog->perf.danger_lut_tbl
+				[DPU_QOS_LUT_USAGE_NRT];
+		safe_lut = pdpu->catalog->perf.safe_lut_tbl
+				[DPU_QOS_LUT_USAGE_NRT];
+	} else {
+		fmt = dpu_get_dpu_format_ext(
+				fb->format->format,
+				fb->modifier);
+
+		if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+			danger_lut = pdpu->catalog->perf.danger_lut_tbl
+					[DPU_QOS_LUT_USAGE_LINEAR];
+			safe_lut = pdpu->catalog->perf.safe_lut_tbl
+					[DPU_QOS_LUT_USAGE_LINEAR];
+		} else {
+			danger_lut = pdpu->catalog->perf.danger_lut_tbl
+					[DPU_QOS_LUT_USAGE_MACROTILE];
+			safe_lut = pdpu->catalog->perf.safe_lut_tbl
+					[DPU_QOS_LUT_USAGE_MACROTILE];
+		}
+	}
+
+	pdpu->pipe_qos_cfg.danger_lut = danger_lut;
+	pdpu->pipe_qos_cfg.safe_lut = safe_lut;
+
+	trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+			(fmt) ? fmt->base.pixel_format : 0,
+			(fmt) ? fmt->fetch_mode : 0,
+			pdpu->pipe_qos_cfg.danger_lut,
+			pdpu->pipe_qos_cfg.safe_lut);
+
+	DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+		plane->base.id,
+		pdpu->pipe - SSPP_VIG0,
+		fmt ? (char *)&fmt->base.pixel_format : NULL,
+		fmt ? fmt->fetch_mode : -1,
+		pdpu->pipe_qos_cfg.danger_lut,
+		pdpu->pipe_qos_cfg.safe_lut);
+
+	pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+			&pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane:		Pointer to drm plane
+ * @enable:		true to enable QoS control
+ * @flags:		QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+	bool enable, u32 flags)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	} else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
+		return;
+	}
+
+	if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+		pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
+		pdpu->pipe_qos_cfg.danger_vblank =
+				pdpu->pipe_sblk->danger_vblank;
+		pdpu->pipe_qos_cfg.vblank_en = enable;
+	}
+
+	if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+		/* this feature overrules previous VBLANK_CTRL */
+		pdpu->pipe_qos_cfg.vblank_en = false;
+		pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+	}
+
+	if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+		pdpu->pipe_qos_cfg.danger_safe_en = enable;
+
+	if (!pdpu->is_rt_pipe) {
+		pdpu->pipe_qos_cfg.vblank_en = false;
+		pdpu->pipe_qos_cfg.danger_safe_en = false;
+	}
+
+	DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+		plane->base.id,
+		pdpu->pipe - SSPP_VIG0,
+		pdpu->pipe_qos_cfg.danger_safe_en,
+		pdpu->pipe_qos_cfg.vblank_en,
+		pdpu->pipe_qos_cfg.creq_vblank,
+		pdpu->pipe_qos_cfg.danger_vblank,
+		pdpu->is_rt_pipe);
+
+	pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+			&pdpu->pipe_qos_cfg);
+}
+
+int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+	struct dpu_plane *pdpu;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!plane || !plane->dev) {
+		DPU_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->is_rt_pipe)
+		goto end;
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+end:
+	return 0;
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane:		Pointer to drm plane
+ * @crtc:		Pointer to drm crtc
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+		struct drm_crtc *crtc)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_vbif_set_ot_params ot_params;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!plane || !plane->dev || !crtc) {
+		DPU_ERROR("invalid arguments plane %d crtc %d\n",
+				plane != 0, crtc != 0);
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&ot_params, 0, sizeof(ot_params));
+	ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+	ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+	ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+	ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+	ot_params.is_wfd = !pdpu->is_rt_pipe;
+	ot_params.frame_rate = crtc->mode.vrefresh;
+	ot_params.vbif_idx = VBIF_RT;
+	ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+	ot_params.rd = true;
+
+	dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane:		Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_vbif_set_qos_params qos_params;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!plane || !plane->dev) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = VBIF_RT;
+	qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+	qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+	qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+	qos_params.is_rt = pdpu->is_rt_pipe;
+
+	DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+			plane->base.id, qos_params.num,
+			qos_params.vbif_idx,
+			qos_params.xin_id, qos_params.is_rt,
+			qos_params.clk_ctrl);
+
+	dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+/**
+ * _dpu_plane_get_aspace: gets the address space
+ */
+static int _dpu_plane_get_aspace(
+		struct dpu_plane *pdpu,
+		struct dpu_plane_state *pstate,
+		struct msm_gem_address_space **aspace)
+{
+	struct dpu_kms *kms;
+
+	if (!pdpu || !pstate || !aspace) {
+		DPU_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	kms = _dpu_plane_get_kms(&pdpu->base);
+	if (!kms) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	*aspace = kms->base.aspace;
+
+	return 0;
+}
+
+static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+		struct dpu_plane_state *pstate,
+		struct dpu_hw_pipe_cfg *pipe_cfg,
+		struct drm_framebuffer *fb)
+{
+	struct dpu_plane *pdpu;
+	struct msm_gem_address_space *aspace = NULL;
+	int ret;
+
+	if (!plane || !pstate || !pipe_cfg || !fb) {
+		DPU_ERROR(
+			"invalid arg(s), plane %d state %d cfg %d fb %d\n",
+			plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
+		return;
+	}
+
+	ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
+		return;
+	}
+
+	ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+	if (ret == -EAGAIN)
+		DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+	else if (ret)
+		DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+	else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+		trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+					    &pipe_cfg->layout,
+					    pstate->multirect_index);
+		pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+						pstate->multirect_index);
+	}
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+		struct dpu_plane_state *pstate,
+		uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+		struct dpu_hw_scaler3_cfg *scale_cfg,
+		const struct dpu_format *fmt,
+		uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+	uint32_t i;
+
+	if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+			!chroma_subsmpl_v) {
+		DPU_ERROR(
+			"pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+			!!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
+			chroma_subsmpl_v);
+		return;
+	}
+
+	memset(scale_cfg, 0, sizeof(*scale_cfg));
+	memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
+
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+		mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+		mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+		scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+		scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+		scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+		scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+		scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+		scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+	for (i = 0; i < DPU_MAX_PLANES; i++) {
+		scale_cfg->src_width[i] = src_w;
+		scale_cfg->src_height[i] = src_h;
+		if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+			scale_cfg->src_width[i] /= chroma_subsmpl_h;
+			scale_cfg->src_height[i] /= chroma_subsmpl_v;
+		}
+		scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+		scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+		pstate->pixel_ext.num_ext_pxls_top[i] =
+			scale_cfg->src_height[i];
+		pstate->pixel_ext.num_ext_pxls_left[i] =
+			scale_cfg->src_width[i];
+	}
+	if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+		&& (src_w == dst_w))
+		return;
+
+	scale_cfg->dst_width = dst_w;
+	scale_cfg->dst_height = dst_h;
+	scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+	scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+	scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+	scale_cfg->lut_flag = 0;
+	scale_cfg->blend_cfg = 1;
+	scale_cfg->enable = 1;
+}
+
+static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+{
+	static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+		{
+			/* S15.16 format */
+			0x00012A00, 0x00000000, 0x00019880,
+			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+			0x00012A00, 0x00020480, 0x00000000,
+		},
+		/* signed bias */
+		{ 0xfff0, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		/* unsigned clamp */
+		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+		{ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+	};
+	static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+		{
+			/* S15.16 format */
+			0x00012A00, 0x00000000, 0x00019880,
+			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+			0x00012A00, 0x00020480, 0x00000000,
+			},
+		/* signed bias */
+		{ 0xffc0, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		/* unsigned clamp */
+		{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+		{ 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+	};
+
+	if (!pdpu) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
+		pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+	else
+		pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+
+	DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+			pdpu->csc_ptr->csc_mv[0],
+			pdpu->csc_ptr->csc_mv[1],
+			pdpu->csc_ptr->csc_mv[2]);
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+		struct dpu_plane_state *pstate,
+		const struct dpu_format *fmt, bool color_fill)
+{
+	struct dpu_hw_pixel_ext *pe;
+	uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+	if (!pdpu || !fmt || !pstate) {
+		DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+				pdpu != 0, fmt != 0, pstate != 0);
+		return;
+	}
+
+	pe = &pstate->pixel_ext;
+
+	/* don't chroma subsample if decimating */
+	chroma_subsmpl_h =
+		drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+	chroma_subsmpl_v =
+		drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+	/* update scaler. calculate default config for QSEED3 */
+	_dpu_plane_setup_scaler3(pdpu, pstate,
+			drm_rect_width(&pdpu->pipe_cfg.src_rect),
+			drm_rect_height(&pdpu->pipe_cfg.src_rect),
+			drm_rect_width(&pdpu->pipe_cfg.dst_rect),
+			drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+			&pstate->scaler3_cfg, fmt,
+			chroma_subsmpl_h, chroma_subsmpl_v);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu:   Pointer to DPU plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+		uint32_t color, uint32_t alpha)
+{
+	const struct dpu_format *fmt;
+	const struct drm_plane *plane;
+	struct dpu_plane_state *pstate;
+
+	if (!pdpu || !pdpu->base.state) {
+		DPU_ERROR("invalid plane\n");
+		return -EINVAL;
+	}
+
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
+		return -EINVAL;
+	}
+
+	plane = &pdpu->base;
+	pstate = to_dpu_plane_state(plane->state);
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	/*
+	 * select fill format to match user property expectation,
+	 * h/w only supports RGB variants
+	 */
+	fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+	/* update sspp */
+	if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+		pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+				(color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+				pstate->multirect_index);
+
+		/* override scaler/decimation if solid fill */
+		pdpu->pipe_cfg.src_rect.x1 = 0;
+		pdpu->pipe_cfg.src_rect.y1 = 0;
+		pdpu->pipe_cfg.src_rect.x2 =
+			drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+		pdpu->pipe_cfg.src_rect.y2 =
+			drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+		_dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+
+		if (pdpu->pipe_hw->ops.setup_format)
+			pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+					fmt, DPU_SSPP_SOLID_FILL,
+					pstate->multirect_index);
+
+		if (pdpu->pipe_hw->ops.setup_rects)
+			pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+					&pdpu->pipe_cfg,
+					pstate->multirect_index);
+
+		if (pdpu->pipe_hw->ops.setup_pe)
+			pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+					&pstate->pixel_ext);
+
+		if (pdpu->pipe_hw->ops.setup_scaler &&
+				pstate->multirect_index != DPU_SSPP_RECT_1)
+			pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+					&pdpu->pipe_cfg, &pstate->pixel_ext,
+					&pstate->scaler3_cfg);
+	}
+
+	return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+	struct dpu_plane_state *pstate;
+
+	if (!drm_state)
+		return;
+
+	pstate = to_dpu_plane_state(drm_state);
+
+	pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+	pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+	struct dpu_plane_state *pstate[R_MAX];
+	const struct drm_plane_state *drm_state[R_MAX];
+	struct drm_rect src[R_MAX], dst[R_MAX];
+	struct dpu_plane *dpu_plane[R_MAX];
+	const struct dpu_format *fmt[R_MAX];
+	int i, buffer_lines;
+	unsigned int max_tile_height = 1;
+	bool parallel_fetch_qualified = true;
+	bool has_tiled_rect = false;
+
+	for (i = 0; i < R_MAX; i++) {
+		const struct msm_format *msm_fmt;
+
+		drm_state[i] = i ? plane->r1 : plane->r0;
+		msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+		fmt[i] = to_dpu_format(msm_fmt);
+
+		if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+			has_tiled_rect = true;
+			if (fmt[i]->tile_height > max_tile_height)
+				max_tile_height = fmt[i]->tile_height;
+		}
+	}
+
+	for (i = 0; i < R_MAX; i++) {
+		int width_threshold;
+
+		pstate[i] = to_dpu_plane_state(drm_state[i]);
+		dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+		if (pstate[i] == NULL) {
+			DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+				drm_state[i]->plane->base.id);
+			return -EINVAL;
+		}
+
+		src[i].x1 = drm_state[i]->src_x >> 16;
+		src[i].y1 = drm_state[i]->src_y >> 16;
+		src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+		src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+		dst[i] = drm_plane_state_dest(drm_state[i]);
+
+		if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+		    drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+			DPU_ERROR_PLANE(dpu_plane[i],
+				"scaling is not supported in multirect mode\n");
+			return -EINVAL;
+		}
+
+		if (DPU_FORMAT_IS_YUV(fmt[i])) {
+			DPU_ERROR_PLANE(dpu_plane[i],
+				"Unsupported format for multirect mode\n");
+			return -EINVAL;
+		}
+
+		/**
+		 * SSPP PD_MEM is split half - one for each RECT.
+		 * Tiled formats need 5 lines of buffering while fetching
+		 * whereas linear formats need only 2 lines.
+		 * So we cannot support more than half of the supported SSPP
+		 * width for tiled formats.
+		 */
+		width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+		if (has_tiled_rect)
+			width_threshold /= 2;
+
+		if (parallel_fetch_qualified &&
+		    drm_rect_width(&src[i]) > width_threshold)
+			parallel_fetch_qualified = false;
+
+	}
+
+	/* Validate RECT's and set the mode */
+
+	/* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+	if (parallel_fetch_qualified) {
+		pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+		pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+		goto done;
+	}
+
+	/* TIME_MX Mode */
+	buffer_lines = 2 * max_tile_height;
+
+	if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+	    dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+		pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+		pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+	} else {
+		DPU_ERROR(
+			"No multirect mode possible for the planes (%d - %d)\n",
+			drm_state[R0]->plane->base.id,
+			drm_state[R1]->plane->base.id);
+		return -EINVAL;
+	}
+
+done:
+	if (dpu_plane[R0]->is_virtual) {
+		pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
+		pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
+	} else {
+		pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+		pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+	};
+
+	DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+		pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+	DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+		pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+	return 0;
+}
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+		u32 *flush_sspp)
+{
+	struct dpu_plane_state *pstate;
+
+	if (!plane || !flush_sspp) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	pstate = to_dpu_plane_state(plane->state);
+
+	*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+		struct drm_plane_state *new_state)
+{
+	struct drm_framebuffer *fb = new_state->fb;
+	struct dpu_plane *pdpu = to_dpu_plane(plane);
+	struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+	struct dpu_hw_fmt_layout layout;
+	struct drm_gem_object *obj;
+	struct msm_gem_object *msm_obj;
+	struct dma_fence *fence;
+	struct msm_gem_address_space *aspace;
+	int ret;
+
+	if (!new_state->fb)
+		return 0;
+
+	DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+	ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
+		return ret;
+	}
+
+	/* cache aspace */
+	pstate->aspace = aspace;
+
+	/*
+	 * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+	 *       we can use msm_atomic_prepare_fb() instead of doing the
+	 *       implicit fence and fb prepare by hand here.
+	 */
+	obj = msm_framebuffer_bo(new_state->fb, 0);
+	msm_obj = to_msm_bo(obj);
+	fence = reservation_object_get_excl_rcu(msm_obj->resv);
+	if (fence)
+		drm_atomic_set_fence_for_plane(new_state, fence);
+
+	if (pstate->aspace) {
+		ret = msm_framebuffer_prepare(new_state->fb,
+				pstate->aspace);
+		if (ret) {
+			DPU_ERROR("failed to prepare framebuffer\n");
+			return ret;
+		}
+	}
+
+	/* validate framebuffer layout before commit */
+	ret = dpu_format_populate_layout(pstate->aspace,
+			new_state->fb, &layout);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+		struct drm_plane_state *old_state)
+{
+	struct dpu_plane *pdpu = to_dpu_plane(plane);
+	struct dpu_plane_state *old_pstate;
+
+	if (!old_state || !old_state->fb)
+		return;
+
+	old_pstate = to_dpu_plane_state(old_state);
+
+	DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+	msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+				   struct drm_rect *fb_rect,
+				   uint32_t min_src_size)
+{
+	/* Ensure fb size is supported */
+	if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+	    drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+		return false;
+
+	/* Ensure src rect is above the minimum size */
+	if (drm_rect_width(src) < min_src_size ||
+	    drm_rect_height(src) < min_src_size)
+		return false;
+
+	/* Ensure src is fully encapsulated in fb */
+	return drm_rect_intersect(fb_rect, src) &&
+		drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	int ret = 0;
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+	const struct dpu_format *fmt;
+	struct drm_rect src, dst, fb_rect = { 0 };
+	uint32_t max_upscale = 1, max_downscale = 1;
+	uint32_t min_src_size, max_linewidth;
+	int hscale = 1, vscale = 1;
+
+	if (!plane || !state) {
+		DPU_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pstate = to_dpu_plane_state(state);
+
+	if (!pdpu->pipe_sblk) {
+		DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	src.x1 = state->src_x >> 16;
+	src.y1 = state->src_y >> 16;
+	src.x2 = src.x1 + (state->src_w >> 16);
+	src.y2 = src.y1 + (state->src_h >> 16);
+
+	dst = drm_plane_state_dest(state);
+
+	fb_rect.x2 = state->fb->width;
+	fb_rect.y2 = state->fb->height;
+
+	max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+
+	if (pdpu->features & DPU_SSPP_SCALER) {
+		max_downscale = pdpu->pipe_sblk->maxdwnscale;
+		max_upscale = pdpu->pipe_sblk->maxupscale;
+	}
+	if (drm_rect_width(&src) < drm_rect_width(&dst))
+		hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
+	else
+		hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
+	if (drm_rect_height(&src) < drm_rect_height(&dst))
+		vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
+	else
+		vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
+
+	DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
+		dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
+
+	if (!dpu_plane_enabled(state))
+		goto exit;
+
+	fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+
+	min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+	if (DPU_FORMAT_IS_YUV(fmt) &&
+		(!(pdpu->features & DPU_SSPP_SCALER) ||
+		 !(pdpu->features & (BIT(DPU_SSPP_CSC)
+		 | BIT(DPU_SSPP_CSC_10BIT))))) {
+		DPU_ERROR_PLANE(pdpu,
+				"plane doesn't have scaler/csc for yuv\n");
+		ret = -EINVAL;
+
+	/* check src bounds */
+	} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+		DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&src));
+		ret = -E2BIG;
+
+	/* valid yuv image */
+	} else if (DPU_FORMAT_IS_YUV(fmt) &&
+		   (src.x1 & 0x1 || src.y1 & 0x1 ||
+		    drm_rect_width(&src) & 0x1 ||
+		    drm_rect_height(&src) & 0x1)) {
+		DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&src));
+		ret = -EINVAL;
+
+	/* min dst support */
+	} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+		DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&dst));
+		ret = -EINVAL;
+
+	/* check decimated source width */
+	} else if (drm_rect_width(&src) > max_linewidth) {
+		DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+				DRM_RECT_ARG(&src), max_linewidth);
+		ret = -E2BIG;
+
+	/* check scaler capability */
+	} else if (hscale < 0 || vscale < 0) {
+		DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
+				DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
+		ret = -E2BIG;
+	}
+
+exit:
+	return ret;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	if (!state->fb)
+		return 0;
+
+	DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
+
+	return dpu_plane_sspp_atomic_check(plane, state);
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+
+	if (!plane || !plane->state) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pstate = to_dpu_plane_state(plane->state);
+
+	/*
+	 * These updates have to be done immediately before the plane flush
+	 * timing, and may not be moved to the atomic_update/mode_set functions.
+	 */
+	if (pdpu->is_error)
+		/* force white frame with 100% alpha pipe output on error */
+		_dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+	else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+		/* force 100% alpha */
+		_dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+	else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
+		pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+
+	/* flag h/w flush complete */
+	if (plane->state)
+		pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane)
+		return;
+
+	pdpu = to_dpu_plane(plane);
+	pdpu->is_error = error;
+}
+
+static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	uint32_t nplanes, src_flags;
+	struct dpu_plane *pdpu;
+	struct drm_plane_state *state;
+	struct dpu_plane_state *pstate;
+	struct dpu_plane_state *old_pstate;
+	const struct dpu_format *fmt;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb;
+	int ret, min_scale;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return -EINVAL;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return -EINVAL;
+	} else if (!old_state) {
+		DPU_ERROR("invalid old state\n");
+		return -EINVAL;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	state = plane->state;
+
+	pstate = to_dpu_plane_state(state);
+
+	old_pstate = to_dpu_plane_state(old_state);
+
+	crtc = state->crtc;
+	fb = state->fb;
+	if (!crtc || !fb) {
+		DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
+				crtc != 0, fb != 0);
+		return -EINVAL;
+	}
+	fmt = to_dpu_format(msm_framebuffer_format(fb));
+	nplanes = fmt->num_planes;
+
+	memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+
+	_dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+
+	pstate->pending = true;
+
+	pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+	_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+	min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale);
+	ret = drm_atomic_helper_check_plane_state(state, crtc->state, min_scale,
+					  pdpu->pipe_sblk->maxupscale << 16,
+					  true, false);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+		return ret;
+	}
+
+	DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
+			", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
+			crtc->base.id, DRM_RECT_ARG(&state->dst),
+			(char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
+
+	pdpu->pipe_cfg.src_rect = state->src;
+
+	/* state->src is 16.16, src_rect is not */
+	pdpu->pipe_cfg.src_rect.x1 >>= 16;
+	pdpu->pipe_cfg.src_rect.x2 >>= 16;
+	pdpu->pipe_cfg.src_rect.y1 >>= 16;
+	pdpu->pipe_cfg.src_rect.y2 >>= 16;
+
+	pdpu->pipe_cfg.dst_rect = state->dst;
+
+	_dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+
+	/* override for color fill */
+	if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+		/* skip remaining processing on color fill */
+		return 0;
+	}
+
+	if (pdpu->pipe_hw->ops.setup_rects) {
+		pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+				&pdpu->pipe_cfg,
+				pstate->multirect_index);
+	}
+
+	if (pdpu->pipe_hw->ops.setup_pe &&
+			(pstate->multirect_index != DPU_SSPP_RECT_1))
+		pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+				&pstate->pixel_ext);
+
+	/**
+	 * when programmed in multirect mode, scalar block will be
+	 * bypassed. Still we need to update alpha and bitwidth
+	 * ONLY for RECT0
+	 */
+	if (pdpu->pipe_hw->ops.setup_scaler &&
+			pstate->multirect_index != DPU_SSPP_RECT_1)
+		pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+				&pdpu->pipe_cfg, &pstate->pixel_ext,
+				&pstate->scaler3_cfg);
+
+	if (pdpu->pipe_hw->ops.setup_multirect)
+		pdpu->pipe_hw->ops.setup_multirect(
+				pdpu->pipe_hw,
+				pstate->multirect_index,
+				pstate->multirect_mode);
+
+	if (pdpu->pipe_hw->ops.setup_format) {
+		src_flags = 0x0;
+
+		/* update format */
+		pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+				pstate->multirect_index);
+
+		if (pdpu->pipe_hw->ops.setup_cdp) {
+			struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+			memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+
+			cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+					[DPU_PERF_CDP_USAGE_RT].rd_enable;
+			cdp_cfg->ubwc_meta_enable =
+					DPU_FORMAT_IS_UBWC(fmt);
+			cdp_cfg->tile_amortize_enable =
+					DPU_FORMAT_IS_UBWC(fmt) ||
+					DPU_FORMAT_IS_TILE(fmt);
+			cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+			pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+		}
+
+		/* update csc */
+		if (DPU_FORMAT_IS_YUV(fmt))
+			_dpu_plane_setup_csc(pdpu);
+		else
+			pdpu->csc_ptr = 0;
+	}
+
+	_dpu_plane_set_qos_lut(plane, fb);
+	_dpu_plane_set_danger_lut(plane, fb);
+
+	if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+		_dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+		_dpu_plane_set_ot_limit(plane, crtc);
+	}
+
+	_dpu_plane_set_qos_remap(plane);
+	return 0;
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	struct dpu_plane *pdpu;
+	struct drm_plane_state *state;
+	struct dpu_plane_state *pstate;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return;
+	} else if (!old_state) {
+		DPU_ERROR("invalid old state\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	state = plane->state;
+	pstate = to_dpu_plane_state(state);
+
+	trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+				pstate->multirect_mode);
+
+	pstate->pending = true;
+
+	if (is_dpu_plane_virtual(plane) &&
+			pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
+		pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
+				DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	struct dpu_plane *pdpu;
+	struct drm_plane_state *state;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pdpu->is_error = false;
+	state = plane->state;
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	if (!dpu_plane_sspp_enabled(state)) {
+		_dpu_plane_atomic_disable(plane, old_state);
+	} else {
+		int ret;
+
+		ret = dpu_plane_sspp_atomic_update(plane, old_state);
+		/* atomic_check should have ensured that this doesn't fail */
+		WARN_ON(ret < 0);
+	}
+}
+
+void dpu_plane_restore(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane || !plane->state) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	/* last plane state is same as current state */
+	dpu_plane_atomic_update(plane, plane->state);
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	if (pdpu) {
+		_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+		mutex_destroy(&pdpu->lock);
+
+		drm_plane_helper_disable(plane, NULL);
+
+		/* this will destroy the states as well */
+		drm_plane_cleanup(plane);
+
+		if (pdpu->pipe_hw)
+			dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+		kfree(pdpu);
+	}
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	struct dpu_plane_state *pstate;
+
+	if (!plane || !state) {
+		DPU_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
+		return;
+	}
+
+	pstate = to_dpu_plane_state(state);
+
+	/* remove ref count for frame buffers */
+	if (state->fb)
+		drm_framebuffer_put(state->fb);
+
+	kfree(pstate);
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+	struct dpu_plane_state *old_state;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return NULL;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return NULL;
+	}
+
+	old_state = to_dpu_plane_state(plane->state);
+	pdpu = to_dpu_plane(plane);
+	pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+	if (!pstate) {
+		DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+		return NULL;
+	}
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	pstate->pending = false;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+	return &pstate->base;
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	/* remove previous state, if present */
+	if (plane->state) {
+		dpu_plane_destroy_state(plane, plane->state);
+		plane->state = 0;
+	}
+
+	pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+	if (!pstate) {
+		DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+		return;
+	}
+
+	pstate->base.plane = plane;
+
+	plane->state = &pstate->base;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t _dpu_plane_danger_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct dpu_kms *kms = file->private_data;
+	struct dpu_mdss_cfg *cfg = kms->catalog;
+	int len = 0;
+	char buf[40] = {'\0'};
+
+	if (!cfg)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0; /* the end */
+
+	len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+	struct drm_plane *plane;
+
+	drm_for_each_plane(plane, kms->dev) {
+		if (plane->fb && plane->state) {
+			dpu_plane_danger_signal_ctrl(plane, enable);
+			DPU_DEBUG("plane:%d img:%dx%d ",
+				plane->base.id, plane->fb->width,
+				plane->fb->height);
+			DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+				plane->state->src_x >> 16,
+				plane->state->src_y >> 16,
+				plane->state->src_w >> 16,
+				plane->state->src_h >> 16,
+				plane->state->crtc_x, plane->state->crtc_y,
+				plane->state->crtc_w, plane->state->crtc_h);
+		} else {
+			DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+		}
+	}
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct dpu_kms *kms = file->private_data;
+	struct dpu_mdss_cfg *cfg = kms->catalog;
+	int disable_panic;
+	char buf[10];
+
+	if (!cfg)
+		return -EFAULT;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtoint(buf, 0, &disable_panic))
+		return -EFAULT;
+
+	if (disable_panic) {
+		/* Disable panic signal for all active pipes */
+		DPU_DEBUG("Disabling danger:\n");
+		_dpu_plane_set_danger_state(kms, false);
+		kms->has_danger_ctrl = false;
+	} else {
+		/* Enable panic signal for all active pipes */
+		DPU_DEBUG("Enabling danger:\n");
+		kms->has_danger_ctrl = true;
+		_dpu_plane_set_danger_state(kms, true);
+	}
+
+	return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+	.open = simple_open,
+	.read = _dpu_plane_danger_read,
+	.write = _dpu_plane_danger_write,
+};
+
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_kms *kms;
+	struct msm_drm_private *priv;
+	const struct dpu_sspp_sub_blks *sblk = 0;
+	const struct dpu_sspp_cfg *cfg = 0;
+
+	if (!plane || !plane->dev) {
+		DPU_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+
+	if (pdpu && pdpu->pipe_hw)
+		cfg = pdpu->pipe_hw->cap;
+	if (cfg)
+		sblk = cfg->sblk;
+
+	if (!sblk)
+		return 0;
+
+	/* create overall sub-directory for the pipe */
+	pdpu->debugfs_root =
+		debugfs_create_dir(pdpu->pipe_name,
+				plane->dev->primary->debugfs_root);
+
+	if (!pdpu->debugfs_root)
+		return -ENOMEM;
+
+	/* don't error check these */
+	debugfs_create_x32("features", 0600,
+			pdpu->debugfs_root, &pdpu->features);
+
+	/* add register dump support */
+	dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
+			sblk->src_blk.base + cfg->base,
+			sblk->src_blk.len,
+			kms);
+	dpu_debugfs_create_regset32("src_blk", 0400,
+			pdpu->debugfs_root, &pdpu->debugfs_src);
+
+	if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+			cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+		dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
+				sblk->scaler_blk.base + cfg->base,
+				sblk->scaler_blk.len,
+				kms);
+		dpu_debugfs_create_regset32("scaler_blk", 0400,
+				pdpu->debugfs_root,
+				&pdpu->debugfs_scaler);
+		debugfs_create_bool("default_scaling",
+				0600,
+				pdpu->debugfs_root,
+				&pdpu->debugfs_default_scale);
+	}
+
+	if (cfg->features & BIT(DPU_SSPP_CSC) ||
+			cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
+		dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
+				sblk->csc_blk.base + cfg->base,
+				sblk->csc_blk.len,
+				kms);
+		dpu_debugfs_create_regset32("csc_blk", 0400,
+				pdpu->debugfs_root, &pdpu->debugfs_csc);
+	}
+
+	debugfs_create_u32("xin_id",
+			0400,
+			pdpu->debugfs_root,
+			(u32 *) &cfg->xin_id);
+	debugfs_create_u32("clk_ctrl",
+			0400,
+			pdpu->debugfs_root,
+			(u32 *) &cfg->clk_ctrl);
+	debugfs_create_x32("creq_vblank",
+			0600,
+			pdpu->debugfs_root,
+			(u32 *) &sblk->creq_vblank);
+	debugfs_create_x32("danger_vblank",
+			0600,
+			pdpu->debugfs_root,
+			(u32 *) &sblk->danger_vblank);
+
+	debugfs_create_file("disable_danger",
+			0600,
+			pdpu->debugfs_root,
+			kms, &dpu_plane_danger_enable);
+
+	return 0;
+}
+
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane)
+		return;
+	pdpu = to_dpu_plane(plane);
+
+	debugfs_remove_recursive(pdpu->debugfs_root);
+}
+#else
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+	return 0;
+}
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+}
+#endif
+
+static int dpu_plane_late_register(struct drm_plane *plane)
+{
+	return _dpu_plane_init_debugfs(plane);
+}
+
+static void dpu_plane_early_unregister(struct drm_plane *plane)
+{
+	_dpu_plane_destroy_debugfs(plane);
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+		.update_plane = drm_atomic_helper_update_plane,
+		.disable_plane = drm_atomic_helper_disable_plane,
+		.destroy = dpu_plane_destroy,
+		.reset = dpu_plane_reset,
+		.atomic_duplicate_state = dpu_plane_duplicate_state,
+		.atomic_destroy_state = dpu_plane_destroy_state,
+		.late_register = dpu_plane_late_register,
+		.early_unregister = dpu_plane_early_unregister,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+		.prepare_fb = dpu_plane_prepare_fb,
+		.cleanup_fb = dpu_plane_cleanup_fb,
+		.atomic_check = dpu_plane_atomic_check,
+		.atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+	return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+bool is_dpu_plane_virtual(struct drm_plane *plane)
+{
+	return plane ? to_dpu_plane(plane)->is_virtual : false;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs, u32 master_plane_id)
+{
+	struct drm_plane *plane = NULL, *master_plane = NULL;
+	const struct dpu_format_extended *format_list;
+	struct dpu_plane *pdpu;
+	struct msm_drm_private *priv;
+	struct dpu_kms *kms;
+	enum drm_plane_type type;
+	int zpos_max = DPU_ZPOS_MAX;
+	int ret = -EINVAL;
+
+	if (!dev) {
+		DPU_ERROR("[%u]device is NULL\n", pipe);
+		goto exit;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		DPU_ERROR("[%u]private data is NULL\n", pipe);
+		goto exit;
+	}
+
+	if (!priv->kms) {
+		DPU_ERROR("[%u]invalid KMS reference\n", pipe);
+		goto exit;
+	}
+	kms = to_dpu_kms(priv->kms);
+
+	if (!kms->catalog) {
+		DPU_ERROR("[%u]invalid catalog reference\n", pipe);
+		goto exit;
+	}
+
+	/* create and zero local structure */
+	pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+	if (!pdpu) {
+		DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/* cache local stuff for later */
+	plane = &pdpu->base;
+	pdpu->pipe = pipe;
+	pdpu->is_virtual = (master_plane_id != 0);
+	INIT_LIST_HEAD(&pdpu->mplane_list);
+	master_plane = drm_plane_find(dev, NULL, master_plane_id);
+	if (master_plane) {
+		struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
+
+		list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
+	}
+
+	/* initialize underlying h/w driver */
+	pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
+							master_plane_id != 0);
+	if (IS_ERR(pdpu->pipe_hw)) {
+		DPU_ERROR("[%u]SSPP init failed\n", pipe);
+		ret = PTR_ERR(pdpu->pipe_hw);
+		goto clean_plane;
+	} else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+		DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+		goto clean_sspp;
+	}
+
+	/* cache features mask for later */
+	pdpu->features = pdpu->pipe_hw->cap->features;
+	pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
+	if (!pdpu->pipe_sblk) {
+		DPU_ERROR("[%u]invalid sblk\n", pipe);
+		goto clean_sspp;
+	}
+
+	if (!master_plane_id)
+		format_list = pdpu->pipe_sblk->format_list;
+	else
+		format_list = pdpu->pipe_sblk->virt_format_list;
+
+	pdpu->nformats = dpu_populate_formats(format_list,
+				pdpu->formats,
+				0,
+				ARRAY_SIZE(pdpu->formats));
+
+	if (!pdpu->nformats) {
+		DPU_ERROR("[%u]no valid formats for plane\n", pipe);
+		goto clean_sspp;
+	}
+
+	if (pdpu->features & BIT(DPU_SSPP_CURSOR))
+		type = DRM_PLANE_TYPE_CURSOR;
+	else if (primary_plane)
+		type = DRM_PLANE_TYPE_PRIMARY;
+	else
+		type = DRM_PLANE_TYPE_OVERLAY;
+	ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+				pdpu->formats, pdpu->nformats,
+				NULL, type, NULL);
+	if (ret)
+		goto clean_sspp;
+
+	pdpu->catalog = kms->catalog;
+
+	if (kms->catalog->mixer_count &&
+		kms->catalog->mixer[0].sblk->maxblendstages) {
+		zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
+		if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
+			zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
+	}
+
+	ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+	if (ret)
+		DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+	/* success! finalize initialization */
+	drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+	/* save user friendly pipe name for later */
+	snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
+
+	mutex_init(&pdpu->lock);
+
+	DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+					pipe, plane->base.id, master_plane_id);
+	return plane;
+
+clean_sspp:
+	if (pdpu && pdpu->pipe_hw)
+		dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+	kfree(pdpu);
+exit:
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 0000000..f6fe6dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base:	base drm plane state object
+ * @property_state: Local storage for msm_prop properties
+ * @property_values:	cached plane property values
+ * @aspace:	pointer to address space for input/output buffers
+ * @input_fence:	dereferenced input fence pointer
+ * @stage:	assigned by crtc blender
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending:	whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
+ * @cdp_cfg:	CDP configuration
+ */
+struct dpu_plane_state {
+	struct drm_plane_state base;
+	struct msm_gem_address_space *aspace;
+	void *input_fence;
+	enum dpu_stage stage;
+	uint32_t multirect_index;
+	uint32_t multirect_mode;
+	bool pending;
+
+	/* scaler configuration */
+	struct dpu_hw_scaler3_cfg scaler3_cfg;
+	struct dpu_hw_pixel_ext pixel_ext;
+
+	struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+	const struct drm_plane_state *r0;
+	const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+	container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane:   Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * is_dpu_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ *          false - if the plane is primary
+ */
+bool is_dpu_plane_virtual(struct drm_plane *plane);
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush mask
+ * @plane:   Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+		u32 *flush_sspp);
+
+/**
+ * dpu_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_restore(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_kickoff - final plane operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_kickoff(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev:   Pointer to DRM device
+ * @pipe:  dpu hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ *                   a regular plane initialization. A non-zero primary plane
+ *                   id will be passed for a virtual pipe initialization.
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ *				      against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_wait_input_fence - wait for input fence object
+ * @plane:   Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane:  Pointer to DRM plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+		uint32_t color, uint32_t alpha);
+
+/**
+ * dpu_plane_set_revalidate - sets revalidate flag which forces a full
+ *	validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
new file mode 100644
index 0000000..a75eebc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include "dpu_power_handle.h"
+#include "dpu_trace.h"
+
+static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
+	[DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
+	[DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
+	[DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
+};
+
+const char *dpu_power_handle_get_dbus_name(u32 bus_id)
+{
+	if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
+		return data_bus_name[bus_id];
+
+	return NULL;
+}
+
+static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
+		u32 event_type)
+{
+	struct dpu_power_event *event;
+
+	list_for_each_entry(event, &phandle->event_list, list) {
+		if (event->event_type & event_type)
+			event->cb_fnc(event_type, event->usr);
+	}
+}
+
+struct dpu_power_client *dpu_power_client_create(
+	struct dpu_power_handle *phandle, char *client_name)
+{
+	struct dpu_power_client *client;
+	static u32 id;
+
+	if (!client_name || !phandle) {
+		pr_err("client name is null or invalid power data\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
+	if (!client)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_lock(&phandle->phandle_lock);
+	strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+	client->usecase_ndx = VOTE_INDEX_DISABLE;
+	client->id = id;
+	client->active = true;
+	pr_debug("client %s created:%pK id :%d\n", client_name,
+		client, id);
+	id++;
+	list_add(&client->list, &phandle->power_client_clist);
+	mutex_unlock(&phandle->phandle_lock);
+
+	return client;
+}
+
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+	struct dpu_power_client *client)
+{
+	if (!client  || !phandle) {
+		pr_err("reg bus vote: invalid client handle\n");
+	} else if (!client->active) {
+		pr_err("dpu power deinit already done\n");
+		kfree(client);
+	} else {
+		pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+			client->name, client, client->id);
+		mutex_lock(&phandle->phandle_lock);
+		list_del_init(&client->list);
+		mutex_unlock(&phandle->phandle_lock);
+		kfree(client);
+	}
+}
+
+void dpu_power_resource_init(struct platform_device *pdev,
+	struct dpu_power_handle *phandle)
+{
+	phandle->dev = &pdev->dev;
+
+	INIT_LIST_HEAD(&phandle->power_client_clist);
+	INIT_LIST_HEAD(&phandle->event_list);
+
+	mutex_init(&phandle->phandle_lock);
+}
+
+void dpu_power_resource_deinit(struct platform_device *pdev,
+	struct dpu_power_handle *phandle)
+{
+	struct dpu_power_client *curr_client, *next_client;
+	struct dpu_power_event *curr_event, *next_event;
+
+	if (!phandle || !pdev) {
+		pr_err("invalid input param\n");
+		return;
+	}
+
+	mutex_lock(&phandle->phandle_lock);
+	list_for_each_entry_safe(curr_client, next_client,
+			&phandle->power_client_clist, list) {
+		pr_err("client:%s-%d still registered with refcount:%d\n",
+				curr_client->name, curr_client->id,
+				curr_client->refcount);
+		curr_client->active = false;
+		list_del(&curr_client->list);
+	}
+
+	list_for_each_entry_safe(curr_event, next_event,
+			&phandle->event_list, list) {
+		pr_err("event:%d, client:%s still registered\n",
+				curr_event->event_type,
+				curr_event->client_name);
+		curr_event->active = false;
+		list_del(&curr_event->list);
+	}
+	mutex_unlock(&phandle->phandle_lock);
+}
+
+int dpu_power_resource_enable(struct dpu_power_handle *phandle,
+	struct dpu_power_client *pclient, bool enable)
+{
+	bool changed = false;
+	u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+	struct dpu_power_client *client;
+
+	if (!phandle || !pclient) {
+		pr_err("invalid input argument\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&phandle->phandle_lock);
+	if (enable)
+		pclient->refcount++;
+	else if (pclient->refcount)
+		pclient->refcount--;
+
+	if (pclient->refcount)
+		pclient->usecase_ndx = VOTE_INDEX_LOW;
+	else
+		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+	list_for_each_entry(client, &phandle->power_client_clist, list) {
+		if (client->usecase_ndx < VOTE_INDEX_MAX &&
+		    client->usecase_ndx > max_usecase_ndx)
+			max_usecase_ndx = client->usecase_ndx;
+	}
+
+	if (phandle->current_usecase_ndx != max_usecase_ndx) {
+		changed = true;
+		prev_usecase_ndx = phandle->current_usecase_ndx;
+		phandle->current_usecase_ndx = max_usecase_ndx;
+	}
+
+	pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+		__builtin_return_address(0), changed, max_usecase_ndx,
+		pclient->name, pclient->id, enable, pclient->refcount);
+
+	if (!changed)
+		goto end;
+
+	if (enable) {
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_PRE_ENABLE);
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_POST_ENABLE);
+
+	} else {
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_PRE_DISABLE);
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_POST_DISABLE);
+	}
+
+end:
+	mutex_unlock(&phandle->phandle_lock);
+	return 0;
+}
+
+struct dpu_power_event *dpu_power_handle_register_event(
+		struct dpu_power_handle *phandle,
+		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+		void *usr, char *client_name)
+{
+	struct dpu_power_event *event;
+
+	if (!phandle) {
+		pr_err("invalid power handle\n");
+		return ERR_PTR(-EINVAL);
+	} else if (!cb_fnc || !event_type) {
+		pr_err("no callback fnc or event type\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
+	if (!event)
+		return ERR_PTR(-ENOMEM);
+
+	event->event_type = event_type;
+	event->cb_fnc = cb_fnc;
+	event->usr = usr;
+	strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+	event->active = true;
+
+	mutex_lock(&phandle->phandle_lock);
+	list_add(&event->list, &phandle->event_list);
+	mutex_unlock(&phandle->phandle_lock);
+
+	return event;
+}
+
+void dpu_power_handle_unregister_event(
+		struct dpu_power_handle *phandle,
+		struct dpu_power_event *event)
+{
+	if (!phandle || !event) {
+		pr_err("invalid phandle or event\n");
+	} else if (!event->active) {
+		pr_err("power handle deinit already done\n");
+		kfree(event);
+	} else {
+		mutex_lock(&phandle->phandle_lock);
+		list_del_init(&event->list);
+		mutex_unlock(&phandle->phandle_lock);
+		kfree(event);
+	}
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
new file mode 100644
index 0000000..344f744
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DPU_POWER_HANDLE_H_
+#define _DPU_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	0
+#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
+#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	1600000000
+#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
+
+#include "dpu_io_util.h"
+
+/* event will be triggered before power handler disable */
+#define DPU_POWER_EVENT_PRE_DISABLE	0x1
+
+/* event will be triggered after power handler disable */
+#define DPU_POWER_EVENT_POST_DISABLE	0x2
+
+/* event will be triggered before power handler enable */
+#define DPU_POWER_EVENT_PRE_ENABLE	0x4
+
+/* event will be triggered after power handler enable */
+#define DPU_POWER_EVENT_POST_ENABLE	0x8
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+	VOTE_INDEX_DISABLE,
+	VOTE_INDEX_LOW,
+	VOTE_INDEX_MAX,
+};
+
+/**
+ * enum dpu_power_handle_data_bus_client - type of axi bus clients
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum dpu_power_handle_data_bus_client {
+	DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+	DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+	DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum DPU_POWER_HANDLE_DBUS_ID {
+	DPU_POWER_HANDLE_DBUS_ID_MNOC,
+	DPU_POWER_HANDLE_DBUS_ID_LLCC,
+	DPU_POWER_HANDLE_DBUS_ID_EBI,
+	DPU_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
+ * struct dpu_power_client: stores the power client for dpu driver
+ * @name:	name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount:	current refcount if multiple modules are using same
+ *              same client for enable/disable. Power module will
+ *              aggregate the refcount and vote accordingly for this
+ *              client.
+ * @id:		assigned during create. helps for debugging.
+ * @list:	list to attach power handle master list
+ * @ab:         arbitrated bandwidth for each bus client
+ * @ib:         instantaneous bandwidth for each bus client
+ * @active:	inidcates the state of dpu power handle
+ */
+struct dpu_power_client {
+	char name[MAX_CLIENT_NAME_LEN];
+	short usecase_ndx;
+	short refcount;
+	u32 id;
+	struct list_head list;
+	u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+	u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+	bool active;
+};
+
+/*
+ * struct dpu_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to DPU_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of dpu power handle
+ */
+struct dpu_power_event {
+	char client_name[MAX_CLIENT_NAME_LEN];
+	void (*cb_fnc)(u32 event_type, void *usr);
+	void *usr;
+	u32 event_type;
+	struct list_head list;
+	bool active;
+};
+
+/**
+ * struct dpu_power_handle: power handle main struct
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @event_list: current power handle event list
+ */
+struct dpu_power_handle {
+	struct list_head power_client_clist;
+	struct mutex phandle_lock;
+	struct device *dev;
+	u32 current_usecase_ndx;
+	struct list_head event_list;
+};
+
+/**
+ * dpu_power_resource_init() - initializes the dpu power handle
+ * @pdev:   platform device to search the power resources
+ * @pdata:  power handle to store the power resources
+ */
+void dpu_power_resource_init(struct platform_device *pdev,
+	struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_resource_deinit() - release the dpu power handle
+ * @pdev:   platform device for power resources
+ * @pdata:  power handle containing the resources
+ *
+ * Return: error code.
+ */
+void dpu_power_resource_deinit(struct platform_device *pdev,
+	struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_client_create() - create the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
+	char *client_name);
+
+/**
+ * dpu_power_client_destroy() - destroy the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+	struct dpu_power_client *client);
+
+/**
+ * dpu_power_resource_enable() - enable/disable the power resources
+ * @pdata:  power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int dpu_power_resource_enable(struct dpu_power_handle *pdata,
+	struct dpu_power_client *pclient, bool enable);
+
+/**
+ * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle:  power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
+		struct dpu_power_client *pclient, int enable);
+
+/**
+ * dpu_power_handle_register_event - register a callback function for an event.
+ *	Clients can register for multiple events with a single register.
+ *	Any block with access to phandle can register for the event
+ *	notification.
+ * @phandle:	power handle containing the resources
+ * @event_type:	event type to register; refer DPU_POWER_HANDLE_EVENT_*
+ * @cb_fnc:	pointer to desired callback function
+ * @usr:	user pointer to pass to callback on event trigger
+ *
+ * Return:	event pointer if success, or error code otherwise
+ */
+struct dpu_power_event *dpu_power_handle_register_event(
+		struct dpu_power_handle *phandle,
+		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+		void *usr, char *client_name);
+/**
+ * dpu_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle:	power handle containing the resources
+ * @event:	event pointer returned after power handle register
+ */
+void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
+		struct dpu_power_event *event);
+
+/**
+ * dpu_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id:	data bus identifier
+ * Return:	Pointer to name string if success; NULL otherwise
+ */
+const char *dpu_power_handle_get_dbus_name(u32 bus_id);
+
+#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 0000000..13c0a36
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+	((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+				(t).num_comp_enc == (r).num_enc && \
+				(t).num_intf == (r).num_intf)
+
+struct dpu_rm_topology_def {
+	enum dpu_rm_topology_name top_name;
+	int num_lm;
+	int num_comp_enc;
+	int num_intf;
+	int num_ctl;
+	int needs_split_display;
+};
+
+static const struct dpu_rm_topology_def g_top_table[] = {
+	{   DPU_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
+	{   DPU_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
+	{   DPU_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 2, true  },
+	{   DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
+};
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @top_ctrl:  topology control preference from kernel client
+ * @top:       selected topology for the display
+ * @hw_res:	   Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+	uint64_t top_ctrl;
+	const struct dpu_rm_topology_def *topology;
+	struct dpu_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct dpu_rm_rsvp - Use Case Reservation tagging structure
+ *	Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ *	By using as a tag, rather than lists of pointers to HW blocks used
+ *	we can avoid some list management since we don't know how many blocks
+ *	of each type a given use case may require.
+ * @list:	List head for list of all reservations
+ * @seq:	Global RSVP sequence number for debugging, especially for
+ *		differentiating differenct allocations for same encoder.
+ * @enc_id:	Reservations are tracked by Encoder DRM object ID.
+ *		CRTCs may be connected to multiple Encoders.
+ *		An encoder or connector id identifies the display path.
+ * @topology	DRM<->HW topology use case
+ */
+struct dpu_rm_rsvp {
+	struct list_head list;
+	uint32_t seq;
+	uint32_t enc_id;
+	enum dpu_rm_topology_name topology;
+};
+
+/**
+ * struct dpu_rm_hw_blk - hardware block tracking list member
+ * @list:	List head for list of all hardware blocks tracking items
+ * @rsvp:	Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt:	Temporary pointer used during reservation to the incoming
+ *		request. Will be swapped into rsvp if proposal is accepted
+ * @type:	Type of hardware block this structure tracks
+ * @id:		Hardware ID number, within it's own space, ie. LM_X
+ * @catalog:	Pointer to the hardware catalog entry for this block
+ * @hw:		Pointer to the hardware register access object for this block
+ */
+struct dpu_rm_hw_blk {
+	struct list_head list;
+	struct dpu_rm_rsvp *rsvp;
+	struct dpu_rm_rsvp *rsvp_nxt;
+	enum dpu_hw_blk_type type;
+	uint32_t id;
+	struct dpu_hw_blk *hw;
+};
+
+/**
+ * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum dpu_rm_dbg_rsvp_stage {
+	DPU_RM_STAGE_BEGIN,
+	DPU_RM_STAGE_AFTER_CLEAR,
+	DPU_RM_STAGE_AFTER_RSVPNEXT,
+	DPU_RM_STAGE_FINAL
+};
+
+static void _dpu_rm_print_rsvps(
+		struct dpu_rm *rm,
+		enum dpu_rm_dbg_rsvp_stage stage)
+{
+	struct dpu_rm_rsvp *rsvp;
+	struct dpu_rm_hw_blk *blk;
+	enum dpu_hw_blk_type type;
+
+	DPU_DEBUG("%d\n", stage);
+
+	list_for_each_entry(rsvp, &rm->rsvps, list) {
+		DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+			      rsvp->enc_id, rsvp->topology);
+	}
+
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (!blk->rsvp && !blk->rsvp_nxt)
+				continue;
+
+			DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
+				(blk->rsvp) ? blk->rsvp->seq : 0,
+				(blk->rsvp) ? blk->rsvp->enc_id : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+				blk->type, blk->id);
+		}
+	}
+}
+
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
+{
+	return rm->hw_mdp;
+}
+
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology)
+{
+	int i;
+
+	for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+			return g_top_table[i].top_name;
+
+	return DPU_RM_TOPOLOGY_NONE;
+}
+
+void dpu_rm_init_hw_iter(
+		struct dpu_rm_hw_iter *iter,
+		uint32_t enc_id,
+		enum dpu_hw_blk_type type)
+{
+	memset(iter, 0, sizeof(*iter));
+	iter->enc_id = enc_id;
+	iter->type = type;
+}
+
+static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+	struct list_head *blk_list;
+
+	if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
+		DPU_ERROR("invalid rm\n");
+		return false;
+	}
+
+	i->hw = NULL;
+	blk_list = &rm->hw_blks[i->type];
+
+	if (i->blk && (&i->blk->list == blk_list)) {
+		DPU_DEBUG("attempt resume iteration past last\n");
+		return false;
+	}
+
+	i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+	list_for_each_entry_continue(i->blk, blk_list, list) {
+		struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
+
+		if (i->blk->type != i->type) {
+			DPU_ERROR("found incorrect block type %d on %d list\n",
+					i->blk->type, i->type);
+			return false;
+		}
+
+		if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+			i->hw = i->blk->hw;
+			DPU_DEBUG("found type %d id %d for enc %d\n",
+					i->type, i->blk->id, i->enc_id);
+			return true;
+		}
+	}
+
+	DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+	return false;
+}
+
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+	bool ret;
+
+	mutex_lock(&rm->rm_lock);
+	ret = _dpu_rm_get_hw_locked(rm, i);
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
+
+static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
+{
+	switch (type) {
+	case DPU_HW_BLK_LM:
+		dpu_hw_lm_destroy(hw);
+		break;
+	case DPU_HW_BLK_CTL:
+		dpu_hw_ctl_destroy(hw);
+		break;
+	case DPU_HW_BLK_CDM:
+		dpu_hw_cdm_destroy(hw);
+		break;
+	case DPU_HW_BLK_PINGPONG:
+		dpu_hw_pingpong_destroy(hw);
+		break;
+	case DPU_HW_BLK_INTF:
+		dpu_hw_intf_destroy(hw);
+		break;
+	case DPU_HW_BLK_SSPP:
+		/* SSPPs are not managed by the resource manager */
+	case DPU_HW_BLK_TOP:
+		/* Top is a singleton, not managed in hw_blks list */
+	case DPU_HW_BLK_MAX:
+	default:
+		DPU_ERROR("unsupported block type %d\n", type);
+		break;
+	}
+}
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+
+	struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+	struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
+	enum dpu_hw_blk_type type;
+
+	if (!rm) {
+		DPU_ERROR("invalid rm\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+		list_del(&rsvp_cur->list);
+		kfree(rsvp_cur);
+	}
+
+
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+				list) {
+			list_del(&hw_cur->list);
+			_dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+			kfree(hw_cur);
+		}
+	}
+
+	dpu_hw_mdp_destroy(rm->hw_mdp);
+	rm->hw_mdp = NULL;
+
+	mutex_destroy(&rm->rm_lock);
+
+	return 0;
+}
+
+static int _dpu_rm_hw_blk_create(
+		struct dpu_rm *rm,
+		struct dpu_mdss_cfg *cat,
+		void __iomem *mmio,
+		enum dpu_hw_blk_type type,
+		uint32_t id,
+		void *hw_catalog_info)
+{
+	struct dpu_rm_hw_blk *blk;
+	struct dpu_hw_mdp *hw_mdp;
+	void *hw;
+
+	hw_mdp = rm->hw_mdp;
+
+	switch (type) {
+	case DPU_HW_BLK_LM:
+		hw = dpu_hw_lm_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_CTL:
+		hw = dpu_hw_ctl_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_CDM:
+		hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
+		break;
+	case DPU_HW_BLK_PINGPONG:
+		hw = dpu_hw_pingpong_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_INTF:
+		hw = dpu_hw_intf_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_SSPP:
+		/* SSPPs are not managed by the resource manager */
+	case DPU_HW_BLK_TOP:
+		/* Top is a singleton, not managed in hw_blks list */
+	case DPU_HW_BLK_MAX:
+	default:
+		DPU_ERROR("unsupported block type %d\n", type);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_OR_NULL(hw)) {
+		DPU_ERROR("failed hw object creation: type %d, err %ld\n",
+				type, PTR_ERR(hw));
+		return -EFAULT;
+	}
+
+	blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+	if (!blk) {
+		_dpu_rm_hw_destroy(type, hw);
+		return -ENOMEM;
+	}
+
+	blk->type = type;
+	blk->id = id;
+	blk->hw = hw;
+	list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+	return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+		struct dpu_mdss_cfg *cat,
+		void __iomem *mmio,
+		struct drm_device *dev)
+{
+	int rc, i;
+	enum dpu_hw_blk_type type;
+
+	if (!rm || !cat || !mmio || !dev) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	/* Clear, setup lists */
+	memset(rm, 0, sizeof(*rm));
+
+	mutex_init(&rm->rm_lock);
+
+	INIT_LIST_HEAD(&rm->rsvps);
+	for (type = 0; type < DPU_HW_BLK_MAX; type++)
+		INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+	rm->dev = dev;
+
+	/* Some of the sub-blocks require an mdptop to be created */
+	rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
+	if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+		rc = PTR_ERR(rm->hw_mdp);
+		rm->hw_mdp = NULL;
+		DPU_ERROR("failed: mdp hw not available\n");
+		goto fail;
+	}
+
+	/* Interrogate HW catalog and create tracking items for hw blocks */
+	for (i = 0; i < cat->mixer_count; i++) {
+		struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+		if (lm->pingpong == PINGPONG_MAX) {
+			DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+			continue;
+		}
+
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
+				cat->mixer[i].id, &cat->mixer[i]);
+		if (rc) {
+			DPU_ERROR("failed: lm hw not available\n");
+			goto fail;
+		}
+
+		if (!rm->lm_max_width) {
+			rm->lm_max_width = lm->sblk->maxwidth;
+		} else if (rm->lm_max_width != lm->sblk->maxwidth) {
+			/*
+			 * Don't expect to have hw where lm max widths differ.
+			 * If found, take the min.
+			 */
+			DPU_ERROR("unsupported: lm maxwidth differs\n");
+			if (rm->lm_max_width > lm->sblk->maxwidth)
+				rm->lm_max_width = lm->sblk->maxwidth;
+		}
+	}
+
+	for (i = 0; i < cat->pingpong_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
+				cat->pingpong[i].id, &cat->pingpong[i]);
+		if (rc) {
+			DPU_ERROR("failed: pp hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->intf_count; i++) {
+		if (cat->intf[i].type == INTF_NONE) {
+			DPU_DEBUG("skip intf %d with type none\n", i);
+			continue;
+		}
+
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
+				cat->intf[i].id, &cat->intf[i]);
+		if (rc) {
+			DPU_ERROR("failed: intf hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->ctl_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
+				cat->ctl[i].id, &cat->ctl[i]);
+		if (rc) {
+			DPU_ERROR("failed: ctl hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->cdm_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
+				cat->cdm[i].id, &cat->cdm[i]);
+		if (rc) {
+			DPU_ERROR("failed: cdm hw not available\n");
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	dpu_rm_destroy(rm);
+
+	return rc;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ *	proposed use case requirements, incl. hardwired dependent blocks like
+ *	pingpong
+ * @rm: dpu resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ *      blocks connected to the lm (pp) is available and appropriate
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ *      NULL if pp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ *              as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_rm_requirements *reqs,
+		struct dpu_rm_hw_blk *lm,
+		struct dpu_rm_hw_blk **pp,
+		struct dpu_rm_hw_blk *primary_lm)
+{
+	const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
+	struct dpu_rm_hw_iter iter;
+
+	*pp = NULL;
+
+	DPU_DEBUG("check lm %d pp %d\n",
+			   lm_cfg->id, lm_cfg->pingpong);
+
+	/* Check if this layer mixer is a peer of the proposed primary LM */
+	if (primary_lm) {
+		const struct dpu_lm_cfg *prim_lm_cfg =
+				to_dpu_hw_mixer(primary_lm->hw)->cap;
+
+		if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+			DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+					prim_lm_cfg->id);
+			return false;
+		}
+	}
+
+	/* Already reserved? */
+	if (RESERVED_BY_OTHER(lm, rsvp)) {
+		DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+		return false;
+	}
+
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		if (iter.blk->id == lm_cfg->pingpong) {
+			*pp = iter.blk;
+			break;
+		}
+	}
+
+	if (!*pp) {
+		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+		return false;
+	}
+
+	if (RESERVED_BY_OTHER(*pp, rsvp)) {
+		DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
+				(*pp)->id);
+		return false;
+	}
+
+	return true;
+}
+
+static int _dpu_rm_reserve_lms(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_rm_requirements *reqs)
+
+{
+	struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
+	struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
+	struct dpu_rm_hw_iter iter_i, iter_j;
+	int lm_count = 0;
+	int i, rc = 0;
+
+	if (!reqs->topology->num_lm) {
+		DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+		return -EINVAL;
+	}
+
+	/* Find a primary mixer */
+	dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
+	while (lm_count != reqs->topology->num_lm &&
+			_dpu_rm_get_hw_locked(rm, &iter_i)) {
+		memset(&lm, 0, sizeof(lm));
+		memset(&pp, 0, sizeof(pp));
+
+		lm_count = 0;
+		lm[lm_count] = iter_i.blk;
+
+		if (!_dpu_rm_check_lm_and_get_connected_blks(
+				rm, rsvp, reqs, lm[lm_count],
+				&pp[lm_count], NULL))
+			continue;
+
+		++lm_count;
+
+		/* Valid primary mixer found, find matching peers */
+		dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+
+		while (lm_count != reqs->topology->num_lm &&
+				_dpu_rm_get_hw_locked(rm, &iter_j)) {
+			if (iter_i.blk == iter_j.blk)
+				continue;
+
+			if (!_dpu_rm_check_lm_and_get_connected_blks(
+					rm, rsvp, reqs, iter_j.blk,
+					&pp[lm_count], iter_i.blk))
+				continue;
+
+			lm[lm_count] = iter_j.blk;
+			++lm_count;
+		}
+	}
+
+	if (lm_count != reqs->topology->num_lm) {
+		DPU_DEBUG("unable to find appropriate mixers\n");
+		return -ENAVAIL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(lm); i++) {
+		if (!lm[i])
+			break;
+
+		lm[i]->rsvp_nxt = rsvp;
+		pp[i]->rsvp_nxt = rsvp;
+
+		trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
+					 pp[i]->id);
+	}
+
+	return rc;
+}
+
+static int _dpu_rm_reserve_ctls(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		const struct dpu_rm_topology_def *top)
+{
+	struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
+	struct dpu_rm_hw_iter iter;
+	int i = 0;
+
+	memset(&ctls, 0, sizeof(ctls));
+
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+		unsigned long features = ctl->caps->features;
+		bool has_split_display;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp))
+			continue;
+
+		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+		DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+
+		if (top->needs_split_display != has_split_display)
+			continue;
+
+		ctls[i] = iter.blk;
+		DPU_DEBUG("ctl %d match\n", iter.blk->id);
+
+		if (++i == top->num_ctl)
+			break;
+	}
+
+	if (i != top->num_ctl)
+		return -ENAVAIL;
+
+	for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+		ctls[i]->rsvp_nxt = rsvp;
+		trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
+					  rsvp->enc_id);
+	}
+
+	return 0;
+}
+
+static int _dpu_rm_reserve_cdm(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		uint32_t id,
+		enum dpu_hw_blk_type type)
+{
+	struct dpu_rm_hw_iter iter;
+
+	DRM_DEBUG_KMS("type %d id %d\n", type, id);
+
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
+		const struct dpu_cdm_cfg *caps = cdm->caps;
+		bool match = false;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp))
+			continue;
+
+		if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
+			match = test_bit(id, &caps->intf_connect);
+
+		DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
+			      iter.blk->type, iter.blk->id, rsvp->enc_id,
+			      caps->intf_connect, match);
+
+		if (!match)
+			continue;
+
+		trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
+					 rsvp->enc_id);
+		iter.blk->rsvp_nxt = rsvp;
+		break;
+	}
+
+	if (!iter.hw) {
+		DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+		return -ENAVAIL;
+	}
+
+	return 0;
+}
+
+static int _dpu_rm_reserve_intf(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		uint32_t id,
+		enum dpu_hw_blk_type type,
+		bool needs_cdm)
+{
+	struct dpu_rm_hw_iter iter;
+	int ret = 0;
+
+	/* Find the block entry in the rm, and note the reservation */
+	dpu_rm_init_hw_iter(&iter, 0, type);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		if (iter.blk->id != id)
+			continue;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+			DPU_ERROR("type %d id %d already reserved\n", type, id);
+			return -ENAVAIL;
+		}
+
+		iter.blk->rsvp_nxt = rsvp;
+		trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
+					  rsvp->enc_id);
+		break;
+	}
+
+	/* Shouldn't happen since intfs are fixed at probe */
+	if (!iter.hw) {
+		DPU_ERROR("couldn't find type %d id %d\n", type, id);
+		return -EINVAL;
+	}
+
+	if (needs_cdm)
+		ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
+
+	return ret;
+}
+
+static int _dpu_rm_reserve_intf_related_hw(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_encoder_hw_resources *hw_res)
+{
+	int i, ret = 0;
+	u32 id;
+
+	for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+		if (hw_res->intfs[i] == INTF_MODE_NONE)
+			continue;
+		id = i + INTF_0;
+		ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+				DPU_HW_BLK_INTF, hw_res->needs_cdm);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int _dpu_rm_make_next_rsvp(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_rm_requirements *reqs)
+{
+	int ret;
+	struct dpu_rm_topology_def topology;
+
+	/* Create reservation info, tag reserved blocks with it as we go */
+	rsvp->seq = ++rm->rsvp_next_seq;
+	rsvp->enc_id = enc->base.id;
+	rsvp->topology = reqs->topology->top_name;
+	list_add_tail(&rsvp->list, &rm->rsvps);
+
+	ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+	if (ret) {
+		DPU_ERROR("unable to find appropriate mixers\n");
+		return ret;
+	}
+
+	/*
+	 * Do assignment preferring to give away low-resource CTLs first:
+	 * - Check mixers without Split Display
+	 * - Only then allow to grab from CTLs with split display capability
+	 */
+	_dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
+	if (ret && !reqs->topology->needs_split_display) {
+		memcpy(&topology, reqs->topology, sizeof(topology));
+		topology.needs_split_display = true;
+		_dpu_rm_reserve_ctls(rm, rsvp, &topology);
+	}
+	if (ret) {
+		DPU_ERROR("unable to find appropriate CTL\n");
+		return ret;
+	}
+
+	/* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
+	ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct dpu_rm_requirements *reqs,
+		struct msm_display_topology req_topology)
+{
+	int i;
+
+	memset(reqs, 0, sizeof(*reqs));
+
+	dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+	for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+					req_topology)) {
+			reqs->topology = &g_top_table[i];
+			break;
+		}
+	}
+
+	if (!reqs->topology) {
+		DPU_ERROR("invalid topology for the display\n");
+		return -EINVAL;
+	}
+
+	/**
+	 * Set the requirement based on caps if not set from user space
+	 * This will ensure to select LM tied with DS blocks
+	 * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+	 */
+	if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+		conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+		reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
+
+	DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
+		      reqs->hw_res.display_num_of_h_tiles);
+	DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+		      reqs->topology->num_lm, reqs->topology->num_ctl,
+		      reqs->topology->top_name,
+		      reqs->topology->needs_split_display);
+
+	return 0;
+}
+
+static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc)
+{
+	struct dpu_rm_rsvp *i;
+
+	if (!rm || !enc) {
+		DPU_ERROR("invalid params\n");
+		return NULL;
+	}
+
+	if (list_empty(&rm->rsvps))
+		return NULL;
+
+	list_for_each_entry(i, &rm->rsvps, list)
+		if (i->enc_id == enc->base.id)
+			return i;
+
+	return NULL;
+}
+
+static struct drm_connector *_dpu_rm_get_connector(
+		struct drm_encoder *enc)
+{
+	struct drm_connector *conn = NULL;
+	struct list_head *connector_list =
+			&enc->dev->mode_config.connector_list;
+
+	list_for_each_entry(conn, connector_list, head)
+		if (conn->encoder == enc)
+			return conn;
+
+	return NULL;
+}
+
+/**
+ * _dpu_rm_release_rsvp - release resources and release a reservation
+ * @rm:	KMS handle
+ * @rsvp:	RSVP pointer to release and release resources for
+ */
+static void _dpu_rm_release_rsvp(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct drm_connector *conn)
+{
+	struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
+	struct dpu_rm_hw_blk *blk;
+	enum dpu_hw_blk_type type;
+
+	if (!rsvp)
+		return;
+
+	DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+	list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+		if (rsvp == rsvp_c) {
+			list_del(&rsvp_c->list);
+			break;
+		}
+	}
+
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (blk->rsvp == rsvp) {
+				blk->rsvp = NULL;
+				DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
+						rsvp->seq, rsvp->enc_id,
+						blk->type, blk->id);
+			}
+			if (blk->rsvp_nxt == rsvp) {
+				blk->rsvp_nxt = NULL;
+				DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
+						rsvp->seq, rsvp->enc_id,
+						blk->type, blk->id);
+			}
+		}
+	}
+
+	kfree(rsvp);
+}
+
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+{
+	struct dpu_rm_rsvp *rsvp;
+	struct drm_connector *conn;
+
+	if (!rm || !enc) {
+		DPU_ERROR("invalid params\n");
+		return;
+	}
+
+	mutex_lock(&rm->rm_lock);
+
+	rsvp = _dpu_rm_get_rsvp(rm, enc);
+	if (!rsvp) {
+		DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+		goto end;
+	}
+
+	conn = _dpu_rm_get_connector(enc);
+	if (!conn) {
+		DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
+		goto end;
+	}
+
+	_dpu_rm_release_rsvp(rm, rsvp, conn);
+end:
+	mutex_unlock(&rm->rm_lock);
+}
+
+static int _dpu_rm_commit_rsvp(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_rm_hw_blk *blk;
+	enum dpu_hw_blk_type type;
+	int ret = 0;
+
+	/* Swap next rsvp to be the active */
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (blk->rsvp_nxt) {
+				blk->rsvp = blk->rsvp_nxt;
+				blk->rsvp_nxt = NULL;
+			}
+		}
+	}
+
+	if (!ret)
+		DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
+			      rsvp->topology);
+
+	return ret;
+}
+
+int dpu_rm_reserve(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct msm_display_topology topology,
+		bool test_only)
+{
+	struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+	struct dpu_rm_requirements reqs;
+	int ret;
+
+	if (!rm || !enc || !crtc_state || !conn_state) {
+		DPU_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	/* Check if this is just a page-flip */
+	if (!drm_atomic_crtc_needs_modeset(crtc_state))
+		return 0;
+
+	DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+		      conn_state->connector->base.id, enc->base.id,
+		      crtc_state->crtc->base.id, test_only);
+
+	mutex_lock(&rm->rm_lock);
+
+	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
+
+	ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
+			conn_state, &reqs, topology);
+	if (ret) {
+		DPU_ERROR("failed to populate hw requirements\n");
+		goto end;
+	}
+
+	/*
+	 * We only support one active reservation per-hw-block. But to implement
+	 * transactional semantics for test-only, and for allowing failure while
+	 * modifying your existing reservation, over the course of this
+	 * function we can have two reservations:
+	 * Current: Existing reservation
+	 * Next: Proposed reservation. The proposed reservation may fail, or may
+	 *       be discarded if in test-only mode.
+	 * If reservation is successful, and we're not in test-only, then we
+	 * replace the current with the next.
+	 */
+	rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+	if (!rsvp_nxt) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
+
+	/*
+	 * User can request that we clear out any reservation during the
+	 * atomic_check phase by using this CLEAR bit
+	 */
+	if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+		DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+				rsvp_cur->seq, rsvp_cur->enc_id);
+		_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+		rsvp_cur = NULL;
+		_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
+	}
+
+	/* Check the proposed reservation, store it in hw's "next" field */
+	ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+			rsvp_nxt, &reqs);
+
+	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
+
+	if (ret) {
+		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+		_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+	} else if (test_only && !RM_RQ_LOCK(&reqs)) {
+		/*
+		 * Normally, if test_only, test the reservation and then undo
+		 * However, if the user requests LOCK, then keep the reservation
+		 * made during the atomic_check phase.
+		 */
+		DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+				rsvp_nxt->seq, rsvp_nxt->enc_id);
+		_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+	} else {
+		if (test_only && RM_RQ_LOCK(&reqs))
+			DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+					rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+		_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+		ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+	}
+
+	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
+
+end:
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 0000000..ffd1841
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+/**
+ * enum dpu_rm_topology_name - HW resource use case in use by connector
+ * @DPU_RM_TOPOLOGY_NONE:                 No topology in use currently
+ * @DPU_RM_TOPOLOGY_SINGLEPIPE:           1 LM, 1 PP, 1 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE:             2 LM, 2 PP, 2 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE:     2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum dpu_rm_topology_name {
+	DPU_RM_TOPOLOGY_NONE = 0,
+	DPU_RM_TOPOLOGY_SINGLEPIPE,
+	DPU_RM_TOPOLOGY_DUALPIPE,
+	DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+	DPU_RM_TOPOLOGY_MAX,
+};
+
+/**
+ * enum dpu_rm_topology_control - HW resource use case in use by connector
+ * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ *                              test, reserve the resources for this display.
+ *                              Normal behavior would not impact the reservation
+ *                              list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ *                               release any reservation held by this display.
+ *                               Normal behavior would not impact the
+ *                               reservation list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_DS  : Require layer mixers with DS capabilities
+ */
+enum dpu_rm_topology_control {
+	DPU_RM_TOPCTL_RESERVE_LOCK,
+	DPU_RM_TOPCTL_RESERVE_CLEAR,
+	DPU_RM_TOPCTL_DS,
+};
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ *	list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct dpu_rm {
+	struct drm_device *dev;
+	struct list_head rsvps;
+	struct list_head hw_blks[DPU_HW_BLK_MAX];
+	struct dpu_hw_mdp *hw_mdp;
+	uint32_t lm_max_width;
+	uint32_t rsvp_next_seq;
+	struct mutex rm_lock;
+};
+
+/**
+ *  struct dpu_rm_hw_blk - resource manager internal structure
+ *	forward declaration for single iterator definition without void pointer
+ */
+struct dpu_rm_hw_blk;
+
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+	void *hw;
+	struct dpu_rm_hw_blk *blk;
+	uint32_t enc_id;
+	enum dpu_hw_blk_type type;
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ *	for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+		struct dpu_mdss_cfg *cat,
+		void __iomem *mmio,
+		struct drm_device *dev);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ *	the use connections and user requirements, specified through related
+ *	topology control properties, and reserve hardware blocks to that
+ *	display chain.
+ *	HW blocks can then be accessed through dpu_rm_get_* functions.
+ *	HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @topology: Pointer to topology info for the display
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+		struct drm_encoder *drm_enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct msm_display_topology topology,
+		bool test_only);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ *	HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+
+/**
+ * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
+ *	This is never reserved, and is usable by any display.
+ * @rm: DPU Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ *	using dpu_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void dpu_rm_init_hw_iter(
+		struct dpu_rm_hw_iter *iter,
+		uint32_t enc_id,
+		enum dpu_hw_blk_type type);
+/**
+ * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ *	Meant to do a single pass through the hardware list to iteratively
+ *	retrieve hardware blocks of a given type for a given encoder.
+ *	Initialize an iterator object.
+ *	Set hw block type of interest. Set encoder id of interest, 0 for any.
+ *	Function returns first hw of type for that encoder.
+ *	Subsequent calls will return the next reserved hw of that type in-order.
+ *	Iterator HW pointer will be null on failure to find hw.
+ * @rm: DPU Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+
+/**
+ * dpu_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int dpu_rm_check_property_topctl(uint64_t val);
+
+/**
+ * dpu_rm_get_topology_name - returns the name of the the given topology
+ *                            definition
+ * @topology: topology definition
+ * @Return: name of the topology
+ */
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology);
+
+#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 0000000..ae0ca50
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,1007 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+	TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+		u32 lut, u32 lut_usage),
+	TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(bool, rt)
+			__field(u32, fl)
+			__field(u64, lut)
+			__field(u32, lut_usage)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->rt = rt;
+			__entry->fl = fl;
+			__entry->lut = lut;
+			__entry->lut_usage = lut_usage;
+	),
+	TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+			__entry->pnum, __entry->fmt,
+			__entry->rt, __entry->fl,
+			__entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+	TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+		u32 safe_lut),
+	TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(u32, mode)
+			__field(u32, danger_lut)
+			__field(u32, safe_lut)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->mode = mode;
+			__entry->danger_lut = danger_lut;
+			__entry->safe_lut = safe_lut;
+	),
+	TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+			__entry->pnum, __entry->fmt,
+			__entry->mode, __entry->danger_lut,
+			__entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+	TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+	TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, xin_id)
+			__field(u32, rd_lim)
+			__field(u32, vbif_idx)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->xin_id = xin_id;
+			__entry->rd_lim = rd_lim;
+			__entry->vbif_idx = vbif_idx;
+	),
+	TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+			__entry->pnum, __entry->xin_id, __entry->rd_lim,
+			__entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_perf_update_bus,
+	TP_PROTO(int client, unsigned long long ab_quota,
+	unsigned long long ib_quota),
+	TP_ARGS(client, ab_quota, ib_quota),
+	TP_STRUCT__entry(
+			__field(int, client)
+			__field(u64, ab_quota)
+			__field(u64, ib_quota)
+	),
+	TP_fast_assign(
+			__entry->client = client;
+			__entry->ab_quota = ab_quota;
+			__entry->ib_quota = ib_quota;
+	),
+	TP_printk("Request client:%d ab=%llu ib=%llu",
+			__entry->client,
+			__entry->ab_quota,
+			__entry->ib_quota)
+)
+
+
+TRACE_EVENT(dpu_cmd_release_bw,
+	TP_PROTO(u32 crtc_id),
+	TP_ARGS(crtc_id),
+	TP_STRUCT__entry(
+			__field(u32, crtc_id)
+	),
+	TP_fast_assign(
+			__entry->crtc_id = crtc_id;
+	),
+	TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(trace_name, name)
+			__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+			__entry->pid = pid;
+			__assign_str(trace_name, name);
+			__entry->trace_begin = trace_begin;
+	),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+	TP_PROTO(int pid, char *name, int value),
+	TP_ARGS(pid, name, value),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(counter_name, name)
+			__field(int, value)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(counter_name, name);
+			__entry->value = value;
+	),
+	TP_printk("%d|%s|%d", __entry->pid,
+			__get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+	TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+			u64 bw_ctl_ebi, u32 core_clk_rate,
+			bool stop_req, u32 update_bus, u32 update_clk),
+	TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
+		stop_req, update_bus, update_clk),
+	TP_STRUCT__entry(
+			__field(u32, crtc)
+			__field(u64, bw_ctl_mnoc)
+			__field(u64, bw_ctl_llcc)
+			__field(u64, bw_ctl_ebi)
+			__field(u32, core_clk_rate)
+			__field(bool, stop_req)
+			__field(u32, update_bus)
+			__field(u32, update_clk)
+	),
+	TP_fast_assign(
+			__entry->crtc = crtc;
+			__entry->bw_ctl_mnoc = bw_ctl_mnoc;
+			__entry->bw_ctl_llcc = bw_ctl_llcc;
+			__entry->bw_ctl_ebi = bw_ctl_ebi;
+			__entry->core_clk_rate = core_clk_rate;
+			__entry->stop_req = stop_req;
+			__entry->update_bus = update_bus;
+			__entry->update_clk = update_clk;
+	),
+	 TP_printk(
+		"crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+			__entry->crtc,
+			__entry->bw_ctl_mnoc,
+			__entry->bw_ctl_llcc,
+			__entry->bw_ctl_ebi,
+			__entry->core_clk_rate,
+			__entry->stop_req,
+			__entry->update_bus,
+			__entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_intr_idx,	intr_idx	)
+		__field(	int,			hw_idx		)
+		__field(	int,			irq_idx		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intr_idx = intr_idx;
+		__entry->hw_idx = hw_idx;
+		__entry->irq_idx = irq_idx;
+	),
+	TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+		  __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+		  __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_intr_idx,	intr_idx	)
+		__field(	int,			hw_idx		)
+		__field(	int,			irq_idx		)
+		__field(	enum dpu_pingpong,	pp_idx		)
+		__field(	int,			atomic_cnt	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intr_idx = intr_idx;
+		__entry->hw_idx = hw_idx;
+		__entry->irq_idx = irq_idx;
+		__entry->pp_idx = pp_idx;
+		__entry->atomic_cnt = atomic_cnt;
+	),
+	TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+		  __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+		  __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+	),
+	TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+	TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+	TP_ARGS(drm_id, hdisplay, vdisplay),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	int,			hdisplay	)
+		__field(	int,			vdisplay	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->hdisplay = hdisplay;
+		__entry->vdisplay = vdisplay;
+	),
+	TP_printk("id=%u, mode=%dx%d",
+		  __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+	TP_PROTO(uint32_t drm_id, int val),
+	TP_ARGS(drm_id, val),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id	)
+		__field(	int,		val	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->val = val;
+	),
+	TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+	TP_PROTO(uint32_t drm_id, int count),
+	TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+	TP_PROTO(uint32_t drm_id, int ctl_idx),
+	TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+	TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+	TP_ARGS(drm_id, flags, private_flags),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	unsigned int,		flags		)
+		__field(	int,			private_flags	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->flags = flags;
+		__entry->private_flags = private_flags;
+	),
+	TP_printk("id=%u, flags=%u, private_flags=%d",
+		  __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	bool,			enable		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->enable = enable;
+	),
+	TP_printk("id=%u, enable=%s",
+		  __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+	TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+		 int rc_state, const char *stage),
+	TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id			)
+		__field(	u32,		sw_event		)
+		__field(	bool,		idle_pc_supported	)
+		__field(	int,		rc_state		)
+		__string(	stage_str,	stage			)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->sw_event = sw_event;
+		__entry->idle_pc_supported = idle_pc_supported;
+		__entry->rc_state = rc_state;
+		__assign_str(stage_str, stage);
+	),
+	TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+		  __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+		  __entry->idle_pc_supported ? "true" : "false",
+		  __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+	TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+	TP_ARGS(drm_id, event, intf_idx),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id		)
+		__field(	u32,		event		)
+		__field(	enum dpu_intf,	intf_idx	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->event = event;
+		__entry->intf_idx = intf_idx;
+	),
+	TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+		  __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+	TP_PROTO(uint32_t drm_id, unsigned int idx,
+		 unsigned long frame_busy_mask),
+	TP_ARGS(drm_id, idx, frame_busy_mask),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	unsigned int,		idx		)
+		__field(	unsigned long,		frame_busy_mask	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->idx = idx;
+		__entry->frame_busy_mask = frame_busy_mask;
+	),
+	TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+		  __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+	TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+		 int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+	TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+		pending_flush_ret),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id			)
+		__field(	enum dpu_intf,	intf_idx		)
+		__field(	int,		pending_kickoff_cnt	)
+		__field(	int,		ctl_idx			)
+		__field(	u32,		pending_flush_ret	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intf_idx = intf_idx;
+		__entry->pending_kickoff_cnt = pending_kickoff_cnt;
+		__entry->ctl_idx = ctl_idx;
+		__entry->pending_flush_ret = pending_flush_ret;
+	),
+	TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+		  "pending_flush_ret=%u", __entry->drm_id,
+		  __entry->intf_idx, __entry->pending_kickoff_cnt,
+		  __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+	TP_PROTO(uint32_t drm_id, ktime_t time),
+	TP_ARGS(drm_id, time),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id	)
+		__field(	ktime_t,	time	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->time = time;
+	),
+	TP_printk("id=%u, time=%lld", __entry->drm_id,
+		  ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+	TP_PROTO(uint32_t drm_id, ktime_t time),
+	TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+	TP_PROTO(uint32_t drm_id, ktime_t time),
+	TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id	)
+		__field(	u32,		event	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->event = event;
+	),
+	TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+	TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+		 s64 expected_time, int atomic_cnt),
+	TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id		)
+		__field(	int32_t,	hw_id		)
+		__field(	int,		rc		)
+		__field(	s64,		time		)
+		__field(	s64,		expected_time	)
+		__field(	int,		atomic_cnt	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->hw_id = hw_id;
+		__entry->rc = rc;
+		__entry->time = time;
+		__entry->expected_time = expected_time;
+		__entry->atomic_cnt = atomic_cnt;
+	),
+	TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+		  __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+		  __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+	TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+		 int refcnt),
+	TP_ARGS(drm_id, pp, enable, refcnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	enum dpu_pingpong,	pp	)
+		__field(	bool,			enable	)
+		__field(	int,			refcnt	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->pp = pp;
+		__entry->enable = enable;
+		__entry->refcnt = refcnt;
+	),
+	TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+		  __entry->pp, __entry->enable ? "true" : "false",
+		  __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+	TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+		 u32 event),
+	TP_ARGS(drm_id, pp, new_count, event),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_pingpong,	pp		)
+		__field(	int,			new_count	)
+		__field(	u32,			event		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->pp = pp;
+		__entry->new_count = new_count;
+		__entry->event = event;
+	),
+	TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+		  __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+	TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+		 int kickoff_count, u32 event),
+	TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_pingpong,	pp		)
+		__field(	int,			timeout_count	)
+		__field(	int,			kickoff_count	)
+		__field(	u32,			event		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->pp = pp;
+		__entry->timeout_count = timeout_count;
+		__entry->kickoff_count = kickoff_count;
+		__entry->event = event;
+	),
+	TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+		  __entry->drm_id, __entry->pp, __entry->timeout_count,
+		  __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+	TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+	TP_ARGS(drm_id, intf_idx),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id			)
+		__field(	enum dpu_intf,	intf_idx		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intf_idx = intf_idx;
+	),
+	TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+	TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+		 int refcnt),
+	TP_ARGS(drm_id, intf_idx, enable, refcnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id		)
+		__field(	enum dpu_intf,	intf_idx	)
+		__field(	bool,		enable		)
+		__field(	int,		refcnt		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intf_idx = intf_idx;
+		__entry->enable = enable;
+		__entry->refcnt = refcnt;
+	),
+	TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+		  __entry->intf_idx, __entry->enable ? "true" : "false",
+		  __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+	TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+		 struct drm_plane_state *state, struct dpu_plane_state *pstate,
+		 uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+		 uint64_t modifier),
+	TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+		pixel_format, modifier),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		crtc_id		)
+		__field(	uint32_t,		plane_id	)
+		__field(	struct drm_plane_state*,state		)
+		__field(	struct dpu_plane_state*,pstate		)
+		__field(	uint32_t,		stage_idx	)
+		__field(	enum dpu_sspp,		sspp		)
+		__field(	uint32_t,		pixel_format	)
+		__field(	uint64_t,		modifier	)
+	),
+	TP_fast_assign(
+		__entry->crtc_id = crtc_id;
+		__entry->plane_id = plane_id;
+		__entry->state = state;
+		__entry->pstate = pstate;
+		__entry->stage_idx = stage_idx;
+		__entry->sspp = sspp;
+		__entry->pixel_format = pixel_format;
+		__entry->modifier = modifier;
+	),
+	TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
+		  "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+		  "multirect_index:%d multirect_mode:%u pix_format:%u "
+		  "modifier:%llu",
+		  __entry->crtc_id, __entry->plane_id,
+		  __entry->state->fb ? __entry->state->fb->base.id : -1,
+		  __entry->state->src_w >> 16,  __entry->state->src_h >> 16,
+		  __entry->state->src_x >> 16,  __entry->state->src_y >> 16,
+		  __entry->state->crtc_w,  __entry->state->crtc_h,
+		  __entry->state->crtc_x,  __entry->state->crtc_y,
+		  __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
+		  __entry->pstate->multirect_index,
+		  __entry->pstate->multirect_mode, __entry->pixel_format,
+		  __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+	TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+	TP_ARGS(drm_id, mixer, bounds),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	int,			mixer	)
+		__field(	struct drm_rect *,	bounds	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->mixer = mixer;
+		__entry->bounds = bounds;
+	),
+	TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+		  __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+	TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+		 struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enc_id, enable, crtc),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	uint32_t,		enc_id	)
+		__field(	bool,			enable	)
+		__field(	struct dpu_crtc *,	crtc	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->enc_id = enc_id;
+		__entry->enable = enable;
+		__entry->crtc = crtc;
+	),
+	TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
+		  "vblank_req:%s}",
+		  __entry->drm_id, __entry->enc_id,
+		  __entry->enable ? "true" : "false",
+		  __entry->crtc->enabled ? "true" : "false",
+		  __entry->crtc->suspend ? "true" : "false",
+		  __entry->crtc->vblank_requested ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	bool,			enable	)
+		__field(	struct dpu_crtc *,	crtc	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->enable = enable;
+		__entry->crtc = crtc;
+	),
+	TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+		  __entry->drm_id, __entry->enable ? "true" : "false",
+		  __entry->crtc->enabled ? "true" : "false",
+		  __entry->crtc->suspend ? "true" : "false",
+		  __entry->crtc->vblank_requested ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+	TP_PROTO(uint32_t drm_id, int frame_pending),
+	TP_ARGS(drm_id, frame_pending),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	int,			frame_pending	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->frame_pending = frame_pending;
+	),
+	TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+		  __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+	TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+		 enum dpu_sspp_multirect_index multirect_index),
+	TP_ARGS(index, layout, multirect_index),
+	TP_STRUCT__entry(
+		__field(	enum dpu_sspp,			index	)
+		__field(	struct dpu_hw_fmt_layout*,	layout	)
+		__field(	enum dpu_sspp_multirect_index,	multirect_index)
+	),
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->layout = layout;
+		__entry->multirect_index = multirect_index;
+	),
+	TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+		  "multirect_index:%d", __entry->index, __entry->layout->width,
+		  __entry->layout->height, __entry->layout->plane_addr[0],
+		  __entry->layout->plane_size[0],
+		  __entry->layout->plane_addr[1],
+		  __entry->layout->plane_size[1],
+		  __entry->layout->plane_addr[2],
+		  __entry->layout->plane_size[2],
+		  __entry->layout->plane_addr[3],
+		  __entry->layout->plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+	TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+	TP_ARGS(drm_id, is_virtual, multirect_mode),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	bool,			is_virtual	)
+		__field(	uint32_t,		multirect_mode	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->is_virtual = is_virtual;
+		__entry->multirect_mode = multirect_mode;
+	),
+	TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+		  __entry->is_virtual ? "true" : "false",
+		  __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		id	)
+		__field(	enum dpu_hw_blk_type,	type	)
+		__field(	uint32_t,		enc_id	)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->type = type;
+		__entry->enc_id = enc_id;
+	),
+	TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
+		  __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
+		 uint32_t pp_id),
+	TP_ARGS(id, type, enc_id, pp_id),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		id	)
+		__field(	enum dpu_hw_blk_type,	type	)
+		__field(	uint32_t,		enc_id	)
+		__field(	uint32_t,		pp_id	)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->type = type;
+		__entry->enc_id = enc_id;
+		__entry->pp_id = pp_id;
+	),
+	TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
+		  __entry->type, __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+	TP_PROTO(enum dpu_vbif index, u32 xin_id),
+	TP_ARGS(index, xin_id),
+	TP_STRUCT__entry(
+		__field(	enum dpu_vbif,	index	)
+		__field(	u32,		xin_id	)
+	),
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->xin_id = xin_id;
+	),
+	TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+	TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+	TP_ARGS(pp, cfg),
+	TP_STRUCT__entry(
+		__field(	enum dpu_pingpong,	pp	)
+		__field(	u32,			cfg	)
+	),
+	TP_fast_assign(
+		__entry->pp = pp;
+		__entry->cfg = cfg;
+	),
+	TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count),
+	TP_STRUCT__entry(
+		__field(	int,	irq_idx		)
+		__field(	int,	enable_count	)
+	),
+	TP_fast_assign(
+		__entry->irq_idx = irq_idx;
+		__entry->enable_count = enable_count;
+	),
+	TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
+		  __entry->enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
+	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+	TP_ARGS(irq_idx, callback),
+	TP_STRUCT__entry(
+		__field(	int,				irq_idx	)
+		__field(	struct dpu_irq_callback *,	callback)
+	),
+	TP_fast_assign(
+		__entry->irq_idx = irq_idx;
+		__entry->callback = callback;
+	),
+	TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+		  __entry->callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
+	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+	TP_ARGS(irq_idx, callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
+	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+	TP_ARGS(irq_idx, callback)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+	TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+	TP_ARGS(dev, stop_req, clk_rate),
+	TP_STRUCT__entry(
+		__field(	struct drm_device *,	dev		)
+		__field(	bool,			stop_req	)
+		__field(	u64,			clk_rate	)
+	),
+	TP_fast_assign(
+		__entry->dev = dev;
+		__entry->stop_req = stop_req;
+		__entry->clk_rate = clk_rate;
+	),
+	TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+		  __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+	trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 0000000..2955282
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif:	Pointer to hardware vbif driver
+ * @xin_id:	Client interface identifier
+ * @return:	0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+	ktime_t timeout;
+	bool status;
+	int rc;
+
+	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+		DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+	for (;;) {
+		status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+		if (status)
+			break;
+		if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+			status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+			break;
+		}
+		usleep_range(501, 1000);
+	}
+
+	if (!status) {
+		rc = -ETIMEDOUT;
+		DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+				vbif->idx - VBIF_0, xin_id);
+	} else {
+		rc = 0;
+		DPU_DEBUG("VBIF %d client %d is halted\n",
+				vbif->idx - VBIF_0, xin_id);
+	}
+
+	return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @ot_lim:	Pointer to OT limit to be modified
+ * @params:	Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+		u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+	u64 pps;
+	const struct dpu_vbif_dynamic_ot_tbl *tbl;
+	u32 i;
+
+	if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+		return;
+
+	/* Dynamic OT setting done only for WFD */
+	if (!params->is_wfd)
+		return;
+
+	pps = params->frame_rate;
+	pps *= params->width;
+	pps *= params->height;
+
+	tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+			&vbif->cap->dynamic_ot_wr_tbl;
+
+	for (i = 0; i < tbl->count; i++) {
+		if (pps <= tbl->cfg[i].pps) {
+			*ot_lim = tbl->cfg[i].ot_limit;
+			break;
+		}
+	}
+
+	DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+			vbif->idx - VBIF_0, params->xin_id,
+			params->width, params->height, params->frame_rate,
+			pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ * @return:	OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+	struct dpu_vbif_set_ot_params *params)
+{
+	u32 ot_lim = 0;
+	u32 val;
+
+	if (!vbif || !vbif->cap) {
+		DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	if (vbif->cap->default_ot_wr_limit && !params->rd)
+		ot_lim = vbif->cap->default_ot_wr_limit;
+	else if (vbif->cap->default_ot_rd_limit && params->rd)
+		ot_lim = vbif->cap->default_ot_rd_limit;
+
+	/*
+	 * If default ot is not set from dt/catalog,
+	 * then do not configure it.
+	 */
+	if (ot_lim == 0)
+		goto exit;
+
+	/* Modify the limits if the target and the use case requires it */
+	_dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+	if (vbif && vbif->ops.get_limit_conf) {
+		val = vbif->ops.get_limit_conf(vbif,
+				params->xin_id, params->rd);
+		if (val == ot_lim)
+			ot_lim = 0;
+	}
+
+exit:
+	DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+			vbif->idx - VBIF_0, params->xin_id, ot_lim);
+	return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_ot_params *params)
+{
+	struct dpu_hw_vbif *vbif = NULL;
+	struct dpu_hw_mdp *mdp;
+	bool forced_on = false;
+	u32 ot_lim;
+	int ret, i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = dpu_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		if (dpu_kms->hw_vbif[i] &&
+				dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
+			vbif = dpu_kms->hw_vbif[i];
+	}
+
+	if (!vbif || !mdp) {
+		DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+				vbif != 0, mdp != 0);
+		return;
+	}
+
+	if (!mdp->ops.setup_clk_force_ctrl ||
+			!vbif->ops.set_limit_conf ||
+			!vbif->ops.set_halt_ctrl)
+		return;
+
+	/* set write_gather_en for all write clients */
+	if (vbif->ops.set_write_gather_en && !params->rd)
+		vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+	ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+	if (ot_lim == 0)
+		goto exit;
+
+	trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+		params->vbif_idx);
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+	ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+	if (ret)
+		trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+	return;
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_qos_params *params)
+{
+	struct dpu_hw_vbif *vbif = NULL;
+	struct dpu_hw_mdp *mdp;
+	bool forced_on = false;
+	const struct dpu_vbif_qos_tbl *qos_tbl;
+	int i;
+
+	if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = dpu_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		if (dpu_kms->hw_vbif[i] &&
+				dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
+			vbif = dpu_kms->hw_vbif[i];
+			break;
+		}
+	}
+
+	if (!vbif || !vbif->cap) {
+		DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+		return;
+	}
+
+	if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+		DPU_DEBUG("qos remap not supported\n");
+		return;
+	}
+
+	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+			&vbif->cap->qos_nrt_tbl;
+
+	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+		DPU_DEBUG("qos tbl not defined\n");
+		return;
+	}
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+		DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+				params->vbif_idx, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+	}
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+	struct dpu_hw_vbif *vbif;
+	u32 i, pnd, src;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid argument\n");
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		vbif = dpu_kms->hw_vbif[i];
+		if (vbif && vbif->ops.clear_errors) {
+			vbif->ops.clear_errors(vbif, &pnd, &src);
+			if (pnd || src) {
+				DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
+					      vbif->idx - VBIF_0, pnd, src);
+			}
+		}
+	}
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+	struct dpu_hw_vbif *vbif;
+	int i, j;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid argument\n");
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		vbif = dpu_kms->hw_vbif[i];
+		if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+			for (j = 0; j < vbif->cap->memtype_count; j++)
+				vbif->ops.set_mem_type(
+						vbif, j, vbif->cap->memtype[j]);
+		}
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+	debugfs_remove_recursive(dpu_kms->debugfs_vbif);
+	dpu_kms->debugfs_vbif = NULL;
+}
+
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+	char vbif_name[32];
+	struct dentry *debugfs_vbif;
+	int i, j;
+
+	dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
+	if (!dpu_kms->debugfs_vbif) {
+		DPU_ERROR("failed to create vbif debugfs\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+		struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+		debugfs_vbif = debugfs_create_dir(vbif_name,
+				dpu_kms->debugfs_vbif);
+
+		debugfs_create_u32("features", 0600, debugfs_vbif,
+			(u32 *)&vbif->features);
+
+		debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+			(u32 *)&vbif->xin_halt_timeout);
+
+		debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+			(u32 *)&vbif->default_ot_rd_limit);
+
+		debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+			(u32 *)&vbif->default_ot_wr_limit);
+
+		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+			struct dpu_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_rd_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_pps", j);
+			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+
+		for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+			struct dpu_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_wr_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_pps", j);
+			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+	}
+
+	return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 0000000..f17af52
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+	u32 xin_id;
+	u32 num;
+	u32 width;
+	u32 height;
+	u32 frame_rate;
+	bool rd;
+	bool is_wfd;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+	u32 xin_id;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+	bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+	u32 vbif_idx;
+	u32 xin_id;
+	u32 clk_ctrl;
+	u32 num;
+	bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms:	DPU handler
+ * @params:	Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms:	DPU handler
+ * @params:	Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms:	DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms:	DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
+#else
+static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
+		struct dentry *debugfs_root)
+{
+	return 0;
+}
+static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644
index 0000000..4f12e5c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -0,0 +1,1376 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+	((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+	(((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+	/* Venus NV12:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_NV12,
+
+	/* Venus NV21:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * V U V U V U V U V U V U . . . .  ^
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Padding & Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_NV21,
+	/* Venus NV12_MVTB:
+	 * Two YUV 4:2:0 images/views one after the other
+	 * in a top-bottom layout, same as NV12
+	 * with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+	 * . . . . . . . . . . . . . . . .              |             View_1
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              V               |
+	 * U V U V U V U V U V U V . . . .  ^                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+	 * . . . . . . . . . . . . . . . .  |                           |
+	 * . . . . . . . . . . . . . . . .  V                           V
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+	 * . . . . . . . . . . . . . . . .              |             View_2
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              V               |
+	 * U V U V U V U V U V U V . . . .  ^                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+	 * . . . . . . . . . . . . . . . .  |                           |
+	 * . . . . . . . . . . . . . . . .  V                           V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * View_1 begin at: 0 (zero)
+	 * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((2*(Y_Stride * Y_Scanlines)
+	 *          + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+	 */
+	COLOR_FMT_NV12_MVTB,
+	/*
+	 * The buffer can be of 2 types:
+	 * (1) Venus NV12 UBWC Progressive
+	 * (2) Venus NV12 UBWC Interlaced
+	 *
+	 * (1) Venus NV12 UBWC Progressive Buffer Format:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 * Y_Stride = align(Width, 128)
+	 * UV_Stride = align(Width, 128)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 *
+	 *
+	 * (2) Venus NV12 UBWC Interlaced Buffer Format:
+	 * Compressed Macro-tile format for NV12 interlaced.
+	 * Contains 8 planes in the following order -
+	 * (A) Y_Meta_Top_Field_Plane
+	 * (B) Y_UBWC_Top_Field_Plane
+	 * (C) UV_Meta_Top_Field_Plane
+	 * (D) UV_UBWC_Top_Field_Plane
+	 * (E) Y_Meta_Bottom_Field_Plane
+	 * (F) Y_UBWC_Bottom_Field_Plane
+	 * (G) UV_Meta_Bottom_Field_Plane
+	 * (H) UV_UBWC_Bottom_Field_Plane
+	 * Y_Meta_Top_Field_Plane consists of meta information to decode
+	 * compressed tile data for Y_UBWC_Top_Field_Plane.
+	 * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+	 * format for top field of an interlaced frame.
+	 * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+	 * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+	 * 8 bit Y samples for top field of an interlaced frame.
+	 *
+	 * UV_Meta_Top_Field_Plane consists of meta information to decode
+	 * compressed tile data in UV_UBWC_Top_Field_Plane.
+	 * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+	 * format for top field of an interlaced frame.
+	 * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+	 * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+	 * 8 bit subsampled color difference samples for top field of an
+	 * interlaced frame.
+	 *
+	 * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+	 * independently decodable and randomly accessible. There is no
+	 * dependency between tiles.
+	 *
+	 * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+	 * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+	 * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+	 * format for bottom field of an interlaced frame.
+	 * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+	 * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+	 * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+	 *
+	 * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+	 * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+	 * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+	 * macro-tile format for bottom field of an interlaced frame.
+	 * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+	 * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+	 * uncompressed 8 bit subsampled color difference samples for bottom
+	 * field of an interlaced frame.
+	 *
+	 * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+	 * independently decodable and randomly accessible. There is no
+	 * dependency between tiles.
+	 *
+	 * <-----Y_TF_Meta_Stride---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . . Half_height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_TF_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-Compressed tile Y_TF Stride->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_TF_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----UV_TF_Meta_Stride---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_TF_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <-Compressed tile UV_TF Stride->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_TF_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * <-----Y_BF_Meta_Stride---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . . Half_height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_BF_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-Compressed tile Y_BF Stride->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_BF_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----UV_BF_Meta_Stride---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_BF_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <-Compressed tile UV_BF Stride->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_BF_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 * Half_height = (Height+1)>>1
+	 * Y_TF_Stride = align(Width, 128)
+	 * UV_TF_Stride = align(Width, 128)
+	 * Y_TF_Scanlines = align(Half_height, 32)
+	 * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+	 * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+	 * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+	 * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+	 * Y_TF_Meta_Plane_size =
+	 *     align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+	 * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+	 * UV_TF_Meta_Plane_size =
+	 *     align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+	 * Y_BF_Stride = align(Width, 128)
+	 * UV_BF_Stride = align(Width, 128)
+	 * Y_BF_Scanlines = align(Half_height, 32)
+	 * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+	 * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+	 * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+	 * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+	 * Y_BF_Meta_Plane_size =
+	 *     align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+	 * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+	 * UV_BF_Meta_Plane_size =
+	 *     align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+	 *           Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+	 *			 Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+	 *           Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+	 *           + max(Extradata, Y_TF_Stride * 48), 4096)
+	 */
+	COLOR_FMT_NV12_UBWC,
+	/* Venus NV12 10-bit UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 4/3, 128)
+	 * UV_Stride = align(Width * 4/3, 128)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 */
+	COLOR_FMT_NV12_BPP10_UBWC,
+	/* Venus RGBA8888 format:
+	 * Contains 1 plane in the following order -
+	 * (A) RGBA plane
+	 *
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 128)
+	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Plane_size + Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA8888,
+	/* Venus RGBA8888 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 128)
+	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA8888_UBWC,
+	/* Venus RGBA1010102 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 256)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA1010102_UBWC,
+	/* Venus RGB565 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGB plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 2, 128)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGB565_UBWC,
+	/* P010 UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 2, 256)
+	 * UV_Stride = align(Width * 2, 256)
+	 * Y_Scanlines = align(Height, 16)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 */
+	COLOR_FMT_P010_UBWC,
+	/* Venus P010:
+	 * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+	 * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width * 2 aligned to 128
+	 * UV_Stride : Width * 2 aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC	COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC		COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC		COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010		COLOR_FMT_P010
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+	(void)height;
+	(void)width;
+
+	/*
+	 * In the future, calculate the size based on the w/h but just
+	 * hardcode it for now since 16K satisfies all current usecases.
+	 */
+	return 16 * 1024;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	case COLOR_FMT_P010:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width*2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	case COLOR_FMT_P010:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width*2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		alignment = 16;
+		break;
+	default:
+		return 0;
+	}
+	sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+	return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 16;
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 32;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+	return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+	int y_tile_width = 0, y_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_width = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_tile_width = 48;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+	y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+	return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+	int y_tile_height = 0, y_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		y_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+	y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+	return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+	int uv_tile_width = 0, uv_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_width = 16;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		uv_tile_width = 24;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+	uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+	return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+	int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		uv_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+	uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+	return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment = 0, stride = 0, bpp = 4;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 128;
+		break;
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 256;
+		bpp = 2;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+		alignment = 256;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+	return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment = 0, scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 32;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+	return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+	int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_width = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+	rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+	return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+	int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+	rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+	return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(
+	int color_fmt, int width, int height)
+{
+	const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+	unsigned int uv_alignment = 0, size = 0;
+	unsigned int y_plane, uv_plane, y_stride,
+		uv_stride, y_sclines, uv_sclines;
+	unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+	unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+	unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+	unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+	unsigned int rgb_stride = 0, rgb_scanlines = 0;
+	unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+	unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+	if (!width || !height)
+		goto invalid_input;
+
+	y_stride = VENUS_Y_STRIDE(color_fmt, width);
+	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+	rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+	rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_P010:
+		uv_alignment = 4096;
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines + uv_alignment;
+		size = y_plane + uv_plane +
+				MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_MVTB:
+		uv_alignment = 4096;
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines + uv_alignment;
+		size = y_plane + uv_plane;
+		size = 2 * size + extra_size;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines =
+			VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+			y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines =
+			VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+			uv_meta_scanlines, 4096);
+
+		size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane)*2 +
+			MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane +
+			MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_RGBA8888:
+		rgb_plane = MSM_MEDIA_ALIGN(rgb_stride  * rgb_scanlines, 4096);
+		size = rgb_plane;
+		size =  MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+							4096);
+		rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+					height);
+		rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+					rgb_meta_scanlines, 4096);
+		size = rgb_ubwc_plane + rgb_meta_plane;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+	int color_fmt, int width, int height)
+{
+	unsigned int offset = 0;
+	unsigned int y_plane, uv_plane, y_stride,
+		uv_stride, y_sclines, uv_sclines;
+	if (!width || !height)
+		goto invalid_input;
+
+	y_stride = VENUS_Y_STRIDE(color_fmt, width);
+	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_MVTB:
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines;
+		offset = y_plane + uv_plane;
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return offset;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 89cb608..3d15cd9 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -882,6 +882,30 @@
 			0xe4);
 }
 
+static void dp_catalog_ctrl_lane_pnswap(struct dp_catalog_ctrl *ctrl,
+						u8 ln_pnswap)
+{
+	struct dp_catalog_private *catalog;
+	struct dp_io_data *io_data;
+	u32 cfg0, cfg1;
+
+	catalog = dp_catalog_get_priv(ctrl);
+
+	cfg0 = 0x0a;
+	cfg1 = 0x0a;
+
+	cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
+	cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
+	cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
+	cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
+
+	io_data = catalog->io.dp_ln_tx0;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg0);
+
+	io_data = catalog->io.dp_ln_tx1;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg1);
+}
+
 static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
 						bool enable)
 {
@@ -952,12 +976,10 @@
 }
 
 static void dp_catalog_panel_config_msa(struct dp_catalog_panel *panel,
-					u32 rate, u32 stream_rate_khz,
-					bool fixed_nvid)
+					u32 rate, u32 stream_rate_khz)
 {
 	u32 pixel_m, pixel_n;
 	u32 mvid, nvid;
-	u64 mvid_calc;
 	u32 const nvid_fixed = 0x8000;
 	u32 const link_rate_hbr2 = 540000;
 	u32 const link_rate_hbr3 = 810000;
@@ -977,57 +999,39 @@
 	}
 
 	catalog = dp_catalog_get_priv(panel);
-	if (fixed_nvid) {
-		pr_debug("use fixed NVID=0x%x\n", nvid_fixed);
-		nvid = nvid_fixed;
+	io_data = catalog->io.dp_mmss_cc;
 
-		pr_debug("link rate=%dkbps, stream_rate_khz=%uKhz\n",
-			rate, stream_rate_khz);
+	if (panel->stream_id == DP_STREAM_1)
+		strm_reg_off = MMSS_DP_PIXEL1_M - MMSS_DP_PIXEL_M;
 
-		/*
-		 * For intermediate results, use 64 bit arithmetic to avoid
-		 * loss of precision.
-		 */
-		mvid_calc = (u64) stream_rate_khz * nvid;
-		mvid_calc = div_u64(mvid_calc, rate);
+	pixel_m = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_M + strm_reg_off);
+	pixel_n = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_N + strm_reg_off);
+	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
 
-		/*
-		 * truncate back to 32 bits as this final divided value will
-		 * always be within the range of a 32 bit unsigned int.
-		 */
-		mvid = (u32) mvid_calc;
+	mvid = (pixel_m & 0xFFFF) * 5;
+	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
 
-		if (panel->widebus_en) {
-			mvid <<= 1;
-			nvid <<= 1;
-		}
-	} else {
-		io_data = catalog->io.dp_mmss_cc;
+	if (nvid < nvid_fixed) {
+		u32 temp;
 
-		if (panel->stream_id == DP_STREAM_1)
-			strm_reg_off = MMSS_DP_PIXEL1_M - MMSS_DP_PIXEL_M;
-
-		pixel_m = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_M + strm_reg_off);
-		pixel_n = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_N + strm_reg_off);
-		pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
-
-		mvid = (pixel_m & 0xFFFF) * 5;
-		nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
-		pr_debug("rate = %d\n", rate);
-
-		if (panel->widebus_en)
-			mvid <<= 1;
-
-		if (link_rate_hbr2 == rate)
-			nvid *= 2;
-
-		if (link_rate_hbr3 == rate)
-			nvid *= 3;
+		temp = (nvid_fixed / nvid) * nvid;
+		mvid = (nvid_fixed / nvid) * mvid;
+		nvid = temp;
 	}
 
+	pr_debug("rate = %d\n", rate);
+
+	if (panel->widebus_en)
+		mvid <<= 1;
+
+	if (link_rate_hbr2 == rate)
+		nvid *= 2;
+
+	if (link_rate_hbr3 == rate)
+		nvid *= 3;
+
 	io_data = catalog->io.dp_link;
 
 	if (panel->stream_id == DP_STREAM_1) {
@@ -2529,6 +2533,7 @@
 		.state_ctrl     = dp_catalog_ctrl_state_ctrl,
 		.config_ctrl    = dp_catalog_ctrl_config_ctrl,
 		.lane_mapping   = dp_catalog_ctrl_lane_mapping,
+		.lane_pnswap    = dp_catalog_ctrl_lane_pnswap,
 		.mainlink_ctrl  = dp_catalog_ctrl_mainlink_ctrl,
 		.set_pattern    = dp_catalog_ctrl_set_pattern,
 		.reset          = dp_catalog_ctrl_reset,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 9d536b4..85ed209 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -1,6 +1,19 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+<<<<<<< HEAD
+=======
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+>>>>>>> aacf58a... drm/msm/dp: Add P/N swap support for dp phy
  */
 
 #ifndef _DP_CATALOG_H_
@@ -93,6 +106,7 @@
 	void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u8 ln_cnt);
 	void (*lane_mapping)(struct dp_catalog_ctrl *ctrl, bool flipped,
 				char *lane_map);
+	void (*lane_pnswap)(struct dp_catalog_ctrl *ctrl, u8 ln_pnswap);
 	void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
 	void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
 	void (*reset)(struct dp_catalog_ctrl *ctrl);
@@ -223,7 +237,7 @@
 	void (*config_spd)(struct dp_catalog_panel *panel);
 	void (*config_misc)(struct dp_catalog_panel *panel);
 	void (*config_msa)(struct dp_catalog_panel *panel,
-			u32 rate, u32 stream_rate_khz, bool fixed_nvid);
+			u32 rate, u32 stream_rate_khz);
 	void (*update_transfer_unit)(struct dp_catalog_panel *panel);
 	void (*config_ctrl)(struct dp_catalog_panel *panel, u32 cfg);
 	void (*config_dto)(struct dp_catalog_panel *panel, bool ack);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v200.c b/drivers/gpu/drm/msm/dp/dp_catalog_v200.c
index da02f7a..132e50e 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog_v200.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog_v200.c
@@ -118,12 +118,10 @@
 }
 
 static void dp_catalog_panel_config_msa_v200(struct dp_catalog_panel *panel,
-					u32 rate, u32 stream_rate_khz,
-					bool fixed_nvid)
+					u32 rate, u32 stream_rate_khz)
 {
 	u32 pixel_m, pixel_n;
 	u32 mvid, nvid;
-	u64 mvid_calc;
 	u32 const nvid_fixed = 0x8000;
 	u32 const link_rate_hbr2 = 540000;
 	u32 const link_rate_hbr3 = 810000;
@@ -143,58 +141,40 @@
 	}
 
 	catalog = dp_catalog_get_priv_v200(panel);
-	if (fixed_nvid) {
-		pr_debug("use fixed NVID=0x%x\n", nvid_fixed);
-		nvid = nvid_fixed;
+	io_data = catalog->io->dp_mmss_cc;
 
-		pr_debug("link rate=%dkbps, stream_rate_khz=%uKhz\n",
-			rate, stream_rate_khz);
+	if (panel->stream_id == DP_STREAM_1)
+		strm_reg_off = MMSS_DP_PIXEL1_M_V200 -
+					MMSS_DP_PIXEL_M_V200;
 
-		/*
-		 * For intermediate results, use 64 bit arithmetic to avoid
-		 * loss of precision.
-		 */
-		mvid_calc = (u64) stream_rate_khz * nvid;
-		mvid_calc = div_u64(mvid_calc, rate);
+	pixel_m = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_M_V200 + strm_reg_off);
+	pixel_n = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_N_V200 + strm_reg_off);
+	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
 
-		/*
-		 * truncate back to 32 bits as this final divided value will
-		 * always be within the range of a 32 bit unsigned int.
-		 */
-		mvid = (u32) mvid_calc;
+	mvid = (pixel_m & 0xFFFF) * 5;
+	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
 
-		if (panel->widebus_en) {
-			mvid <<= 1;
-			nvid <<= 1;
-		}
-	} else {
-		io_data = catalog->io->dp_mmss_cc;
+	if (nvid < nvid_fixed) {
+		u32 temp;
 
-		if (panel->stream_id == DP_STREAM_1)
-			strm_reg_off = MMSS_DP_PIXEL1_M_V200 -
-						MMSS_DP_PIXEL_M_V200;
-
-		pixel_m = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_M_V200 + strm_reg_off);
-		pixel_n = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_N_V200 + strm_reg_off);
-		pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
-
-		mvid = (pixel_m & 0xFFFF) * 5;
-		nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
-		pr_debug("rate = %d\n", rate);
-
-		if (panel->widebus_en)
-			mvid <<= 1;
-
-		if (link_rate_hbr2 == rate)
-			nvid *= 2;
-
-		if (link_rate_hbr3 == rate)
-			nvid *= 3;
+		temp = (nvid_fixed / nvid) * nvid;
+		mvid = (nvid_fixed / nvid) * mvid;
+		nvid = temp;
 	}
 
+	pr_debug("rate = %d\n", rate);
+
+	if (panel->widebus_en)
+		mvid <<= 1;
+
+	if (link_rate_hbr2 == rate)
+		nvid *= 2;
+
+	if (link_rate_hbr3 == rate)
+		nvid *= 3;
+
 	io_data = catalog->io->dp_link;
 
 	if (panel->stream_id == DP_STREAM_1) {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
index 50b8859..51fa987 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
@@ -122,12 +122,10 @@
 }
 
 static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel,
-					u32 rate, u32 stream_rate_khz,
-					bool fixed_nvid)
+					u32 rate, u32 stream_rate_khz)
 {
 	u32 pixel_m, pixel_n;
 	u32 mvid, nvid, reg_off = 0, mvid_off = 0, nvid_off = 0;
-	u64 mvid_calc;
 	u32 const nvid_fixed = 0x8000;
 	u32 const link_rate_hbr2 = 540000;
 	u32 const link_rate_hbr3 = 810000;
@@ -145,57 +143,39 @@
 	}
 
 	catalog = dp_catalog_get_priv_v420(panel);
-	if (fixed_nvid) {
-		pr_debug("use fixed NVID=0x%x\n", nvid_fixed);
-		nvid = nvid_fixed;
+	io_data = catalog->io->dp_mmss_cc;
 
-		pr_debug("link rate=%dkbps, stream_rate_khz=%uKhz\n",
-			rate, stream_rate_khz);
+	if (panel->stream_id == DP_STREAM_1)
+		reg_off = MMSS_DP_PIXEL1_M_V420 - MMSS_DP_PIXEL_M_V420;
 
-		/*
-		 * For intermediate results, use 64 bit arithmetic to avoid
-		 * loss of precision.
-		 */
-		mvid_calc = (u64) stream_rate_khz * nvid;
-		mvid_calc = div_u64(mvid_calc, rate);
+	pixel_m = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_M_V420 + reg_off);
+	pixel_n = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_N_V420 + reg_off);
+	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
 
-		/*
-		 * truncate back to 32 bits as this final divided value will
-		 * always be within the range of a 32 bit unsigned int.
-		 */
-		mvid = (u32) mvid_calc;
+	mvid = (pixel_m & 0xFFFF) * 5;
+	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
 
-		if (panel->widebus_en) {
-			mvid <<= 1;
-			nvid <<= 1;
-		}
-	} else {
-		io_data = catalog->io->dp_mmss_cc;
+	if (nvid < nvid_fixed) {
+		u32 temp;
 
-		if (panel->stream_id == DP_STREAM_1)
-			reg_off = MMSS_DP_PIXEL1_M_V420 - MMSS_DP_PIXEL_M_V420;
-
-		pixel_m = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_M_V420 + reg_off);
-		pixel_n = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_N_V420 + reg_off);
-		pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
-
-		mvid = (pixel_m & 0xFFFF) * 5;
-		nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
-		pr_debug("rate = %d\n", rate);
-
-		if (panel->widebus_en)
-			mvid <<= 1;
-
-		if (link_rate_hbr2 == rate)
-			nvid *= 2;
-
-		if (link_rate_hbr3 == rate)
-			nvid *= 3;
+		temp = (nvid_fixed / nvid) * nvid;
+		mvid = (nvid_fixed / nvid) * mvid;
+		nvid = temp;
 	}
 
+	pr_debug("rate = %d\n", rate);
+
+	if (panel->widebus_en)
+		mvid <<= 1;
+
+	if (link_rate_hbr2 == rate)
+		nvid *= 2;
+
+	if (link_rate_hbr3 == rate)
+		nvid *= 3;
+
 	io_data = catalog->io->dp_link;
 
 	if (panel->stream_id == DP_STREAM_1) {
@@ -286,6 +266,30 @@
 	}
 }
 
+static void dp_catalog_ctrl_lane_pnswap_v420(struct dp_catalog_ctrl *ctrl,
+						u8 ln_pnswap)
+{
+	struct dp_catalog_private_v420 *catalog;
+	struct dp_io_data *io_data;
+	u32 cfg0, cfg1;
+
+	catalog = dp_catalog_get_priv_v420(ctrl);
+
+	cfg0 = 0x0a;
+	cfg1 = 0x0a;
+
+	cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
+	cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
+	cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
+	cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
+
+	io_data = catalog->io->dp_ln_tx0;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg0);
+
+	io_data = catalog->io->dp_ln_tx1;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg1);
+}
+
 static void dp_catalog_put_v420(struct dp_catalog *catalog)
 {
 	struct dp_catalog_private_v420 *catalog_priv;
@@ -336,6 +340,7 @@
 	catalog->panel.config_msa  = dp_catalog_panel_config_msa_v420;
 	catalog->ctrl.phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg_v420;
 	catalog->ctrl.update_vx_px = dp_catalog_ctrl_update_vx_px_v420;
+	catalog->ctrl.lane_pnswap = dp_catalog_ctrl_lane_pnswap_v420;
 
 	/* Set the default execution mode to hardware mode */
 	dp_catalog_set_exe_mode_v420(catalog, "hw");
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 75a2f16..d84417e 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -161,6 +161,8 @@
 	if (enable) {
 		ctrl->catalog->lane_mapping(ctrl->catalog, ctrl->orientation,
 						ctrl->parser->l_map);
+		ctrl->catalog->lane_pnswap(ctrl->catalog,
+						ctrl->parser->l_pnswap);
 		ctrl->catalog->mst_config(ctrl->catalog, ctrl->mst_mode);
 		ctrl->catalog->config_ctrl(ctrl->catalog,
 				ctrl->link->link_params.lane_count);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index e5f1b3e..e581303 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -11,7 +11,6 @@
 #include "dp_power.h"
 #include "dp_catalog.h"
 #include "dp_aux.h"
-#include "dp_ctrl.h"
 #include "dp_debug.h"
 #include "drm_connector.h"
 #include "sde_connector.h"
@@ -41,6 +40,8 @@
 	struct device *dev;
 	struct dp_debug dp_debug;
 	struct dp_parser *parser;
+	struct dp_ctrl *ctrl;
+	struct mutex lock;
 };
 
 static int dp_debug_get_edid_buf(struct dp_debug_private *debug)
@@ -90,6 +91,8 @@
 	if (!debug)
 		return -ENODEV;
 
+	mutex_lock(&debug->lock);
+
 	if (*ppos)
 		goto bail;
 
@@ -161,6 +164,7 @@
 	 */
 	pr_info("[%s]\n", edid ? "SET" : "CLEAR");
 
+	mutex_unlock(&debug->lock);
 	return rc;
 }
 
@@ -180,6 +184,8 @@
 	if (!debug)
 		return -ENODEV;
 
+	mutex_lock(&debug->lock);
+
 	if (*ppos)
 		goto bail;
 
@@ -260,6 +266,7 @@
 	} else
 		debug->aux->dpcd_updated(debug->aux);
 
+	mutex_unlock(&debug->lock);
 	return rc;
 }
 
@@ -747,7 +754,7 @@
 		const char __user *user_buff, size_t count, loff_t *ppos)
 {
 	struct dp_debug_private *debug = file->private_data;
-	char *buf;
+	char buf[SZ_32];
 	size_t len = 0;
 
 	if (!debug)
@@ -757,7 +764,9 @@
 		return 0;
 
 	len = min_t(size_t, count, SZ_32 - 1);
-	buf = memdup_user(user_buff, len);
+	if (copy_from_user(buf, user_buff, len))
+		goto end;
+
 	buf[len] = '\0';
 
 	if (sscanf(buf, "%3s", debug->exe_mode) != 1)
@@ -1437,6 +1446,7 @@
 
 		if (dp_debug_get_dpcd_buf(debug)) {
 			devm_kfree(debug->dev, debug->edid);
+			debug->edid = NULL;
 			return;
 		}
 
@@ -1444,6 +1454,9 @@
 		debug->aux->set_sim_mode(debug->aux, true,
 			debug->edid, debug->dpcd);
 	} else {
+		debug->aux->abort(debug->aux);
+		debug->ctrl->abort(debug->ctrl);
+
 		debug->aux->set_sim_mode(debug->aux, false, NULL, NULL);
 		debug->dp_debug.sim_mode = false;
 
@@ -1482,6 +1495,8 @@
 	if (*ppos)
 		return 0;
 
+	mutex_lock(&debug->lock);
+
 	/* Leave room for termination char */
 	len = min_t(size_t, count, SZ_8 - 1);
 	if (copy_from_user(buf, user_buff, len))
@@ -1494,6 +1509,7 @@
 
 	dp_debug_set_sim_mode(debug, sim);
 end:
+	mutex_unlock(&debug->lock);
 	return len;
 }
 
@@ -1941,6 +1957,14 @@
 		       DEBUG_NAME, rc);
 	}
 
+	file = debugfs_create_u32("max_lclk_khz", 0644, dir,
+			&debug->parser->max_lclk_khz);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs max_lclk_khz failed, rc=%d\n",
+		       DEBUG_NAME, rc);
+	}
+
 	return 0;
 
 error_remove_dir:
@@ -1972,7 +1996,9 @@
 
 	debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
 
+	mutex_lock(&debug->lock);
 	dp_debug_set_sim_mode(debug, false);
+	mutex_unlock(&debug->lock);
 }
 
 struct dp_debug *dp_debug_get(struct dp_debug_in *in)
@@ -1981,7 +2007,8 @@
 	struct dp_debug_private *debug;
 	struct dp_debug *dp_debug;
 
-	if (!in->dev || !in->panel || !in->hpd || !in->link || !in->catalog) {
+	if (!in->dev || !in->panel || !in->hpd || !in->link ||
+	    !in->catalog || !in->ctrl) {
 		pr_err("invalid input\n");
 		rc = -EINVAL;
 		goto error;
@@ -2002,12 +2029,15 @@
 	debug->connector = in->connector;
 	debug->catalog = in->catalog;
 	debug->parser = in->parser;
+	debug->ctrl = in->ctrl;
 
 	dp_debug = &debug->dp_debug;
 	dp_debug->vdisplay = 0;
 	dp_debug->hdisplay = 0;
 	dp_debug->vrefresh = 0;
 
+	mutex_init(&debug->lock);
+
 	rc = dp_debug_init(dp_debug);
 	if (rc) {
 		devm_kfree(in->dev, debug);
@@ -2059,6 +2089,8 @@
 
 	dp_debug_deinit(dp_debug);
 
+	mutex_destroy(&debug->lock);
+
 	if (debug->edid)
 		devm_kfree(debug->dev, debug->edid);
 
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index dfbc652..11b890e 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -7,6 +7,7 @@
 #define _DP_DEBUG_H_
 
 #include "dp_panel.h"
+#include "dp_ctrl.h"
 #include "dp_link.h"
 #include "dp_usbpd.h"
 #include "dp_aux.h"
@@ -63,6 +64,7 @@
 	struct drm_connector **connector;
 	struct dp_catalog *catalog;
 	struct dp_parser *parser;
+	struct dp_ctrl *ctrl;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index ccfa611f..b326a50 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -103,6 +103,8 @@
 
 	u32 tot_dsc_blks_in_use;
 
+	bool process_hpd_connect;
+
 	struct notifier_block usb_nb;
 };
 
@@ -111,11 +113,6 @@
 	{}
 };
 
-static bool dp_display_framework_ready(struct dp_display_private *dp)
-{
-	return dp->dp_display.post_open ? false : true;
-}
-
 static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp)
 {
 	return dp->link->hdcp_status.hdcp_version && dp->hdcp.ops;
@@ -236,16 +233,64 @@
 		struct sde_hdcp_ops *ops = dev->ops;
 		void *fd = dev->fd;
 
-		if (!fd || !ops || (dp->hdcp.source_cap & dev->ver))
+		if (!fd || !ops)
 			continue;
 
-		if (ops->feature_supported(fd))
+		if (ops->set_mode && ops->set_mode(fd, dp->mst.mst_active))
+			continue;
+
+		if (!(dp->hdcp.source_cap & dev->ver) &&
+				ops->feature_supported &&
+				ops->feature_supported(fd))
 			dp->hdcp.source_cap |= dev->ver;
 	}
 
 	dp_display_update_hdcp_status(dp, false);
 }
 
+static void dp_display_hdcp_register_streams(struct dp_display_private *dp)
+{
+	int rc;
+	size_t i;
+	struct sde_hdcp_ops *ops = dp->hdcp.ops;
+	void *data = dp->hdcp.data;
+
+	if (dp_display_is_ready(dp) && dp->mst.mst_active && ops &&
+			ops->register_streams){
+		struct stream_info streams[DP_STREAM_MAX];
+		int index = 0;
+
+		pr_debug("Registering all active panel streams with HDCP\n");
+		for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
+			if (!dp->active_panels[i])
+				continue;
+			streams[index].stream_id = i;
+			streams[index].virtual_channel =
+				dp->active_panels[i]->vcpi;
+			index++;
+		}
+
+		if (index > 0) {
+			rc = ops->register_streams(data, index, streams);
+			if (rc)
+				pr_err("failed to register streams. rc = %d\n",
+					rc);
+		}
+	}
+}
+
+static void dp_display_hdcp_deregister_stream(struct dp_display_private *dp,
+		enum dp_stream_id stream_id)
+{
+	if (dp->hdcp.ops->deregister_streams) {
+		struct stream_info stream = {stream_id,
+				dp->active_panels[stream_id]->vcpi};
+
+		pr_debug("Deregistering stream within HDCP library\n");
+		dp->hdcp.ops->deregister_streams(dp->hdcp.data, 1, &stream);
+	}
+}
+
 static void dp_display_hdcp_cb_work(struct work_struct *work)
 {
 	struct dp_display_private *dp;
@@ -255,12 +300,21 @@
 	void *data;
 	int rc = 0;
 	u32 hdcp_auth_state;
+	u8 sink_status = 0;
 
 	dp = container_of(dw, struct dp_display_private, hdcp_cb_work);
 
 	if (!dp->power_on || !dp->is_connected || atomic_read(&dp->aborted))
 		return;
 
+	drm_dp_dpcd_readb(dp->aux->drm_aux, DP_SINK_STATUS, &sink_status);
+	sink_status &= (DP_RECEIVE_PORT_0_STATUS | DP_RECEIVE_PORT_1_STATUS);
+	if (sink_status < 1) {
+		pr_debug("Sink not synchronized. Queuing again then exiting\n");
+		queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
+		return;
+	}
+
 	status = &dp->link->hdcp_status;
 
 	if (status->hdcp_state == HDCP_STATE_INACTIVE) {
@@ -268,6 +322,11 @@
 		dp_display_update_hdcp_info(dp);
 
 		if (dp_display_is_hdcp_enabled(dp)) {
+			if (dp->hdcp.ops && dp->hdcp.ops->on &&
+					dp->hdcp.ops->on(dp->hdcp.data)) {
+				dp_display_update_hdcp_status(dp, true);
+				return;
+			}
 			status->hdcp_state = HDCP_STATE_AUTHENTICATING;
 		} else {
 			dp_display_update_hdcp_status(dp, true);
@@ -294,11 +353,18 @@
 
 	switch (status->hdcp_state) {
 	case HDCP_STATE_AUTHENTICATING:
+		dp_display_hdcp_register_streams(dp);
 		if (dp->hdcp.ops && dp->hdcp.ops->authenticate)
 			rc = dp->hdcp.ops->authenticate(data);
 		break;
 	case HDCP_STATE_AUTH_FAIL:
 		if (dp_display_is_ready(dp) && dp->power_on) {
+			if (ops && ops->on && ops->on(data)) {
+				dp_display_update_hdcp_status(dp, true);
+				return;
+			}
+			dp_display_hdcp_register_streams(dp);
+			status->hdcp_state = HDCP_STATE_AUTHENTICATING;
 			if (ops && ops->reauthenticate) {
 				rc = ops->reauthenticate(data);
 				if (rc)
@@ -309,6 +375,7 @@
 		}
 		break;
 	default:
+		dp_display_hdcp_register_streams(dp);
 		break;
 	}
 }
@@ -502,36 +569,6 @@
 			envp);
 }
 
-static void dp_display_post_open(struct dp_display *dp_display)
-{
-	struct drm_connector *connector;
-	struct dp_display_private *dp;
-
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-	if (IS_ERR_OR_NULL(dp)) {
-		pr_err("invalid params\n");
-		return;
-	}
-
-	connector = dp->dp_display.base_connector;
-
-	if (!connector) {
-		pr_err("base connector not set\n");
-		return;
-	}
-
-	/* if cable is already connected, send notification */
-	if (dp->hpd->hpd_high)
-		queue_work(dp->wq, &dp->connect_work);
-	else
-		dp_display->post_open = NULL;
-}
-
 static int dp_display_send_hpd_notification(struct dp_display_private *dp)
 {
 	int ret = 0;
@@ -541,6 +578,8 @@
 
 	if (!dp->mst.mst_active)
 		dp->dp_display.is_sst_connected = hpd;
+	else
+		dp->dp_display.is_sst_connected = false;
 
 	reinit_completion(&dp->notification_comp);
 	dp_display_send_hpd_event(dp);
@@ -551,9 +590,6 @@
 	if (!dp->mst.mst_active && (dp->power_on == hpd))
 		goto skip_wait;
 
-	if (!dp_display_framework_ready(dp))
-		goto skip_wait;
-
 	if (!wait_for_completion_timeout(&dp->notification_comp,
 						HZ * 5)) {
 		pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
@@ -571,30 +607,47 @@
 	dp->panel->mst_state = state;
 }
 
-static void dp_display_process_mst_hpd_high(struct dp_display_private *dp)
+static void dp_display_process_mst_hpd_high(struct dp_display_private *dp,
+						bool mst_probe)
 {
 	bool is_mst_receiver;
 	struct dp_mst_hpd_info info;
+	int ret;
 
-	if (dp->parser->has_mst && dp->mst.drm_registered) {
-		DP_MST_DEBUG("mst_hpd_high work\n");
+	if (!dp->parser->has_mst || !dp->mst.drm_registered) {
+		DP_MST_DEBUG("mst not enabled. has_mst:%d, registered:%d\n",
+				dp->parser->has_mst, dp->mst.drm_registered);
+		return;
+	}
 
+	DP_MST_DEBUG("mst_hpd_high work. mst_probe:%d\n", mst_probe);
+
+	if (!dp->mst.mst_active) {
 		is_mst_receiver = dp->panel->read_mst_cap(dp->panel);
 
-		if (is_mst_receiver && !dp->mst.mst_active) {
-
-			/* clear sink mst state */
-			drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
-
-			dp_display_update_mst_state(dp, true);
-
-			info.mst_protocol = dp->parser->has_mst_sideband;
-			info.mst_port_cnt = dp->debug->mst_port_cnt;
-			info.edid = dp->debug->get_edid(dp->debug);
-
-			if (dp->mst.cbs.hpd)
-				dp->mst.cbs.hpd(&dp->dp_display, true, &info);
+		if (!is_mst_receiver) {
+			DP_MST_DEBUG("sink doesn't support mst\n");
+			return;
 		}
+
+		/* clear sink mst state */
+		drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
+
+		ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL,
+				 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+		if (ret < 0) {
+			pr_err("sink mst enablement failed\n");
+			return;
+		}
+
+		dp_display_update_mst_state(dp, true);
+	} else if (dp->mst.mst_active && mst_probe) {
+		info.mst_protocol = dp->parser->has_mst_sideband;
+		info.mst_port_cnt = dp->debug->mst_port_cnt;
+		info.edid = dp->debug->get_edid(dp->debug);
+
+		if (dp->mst.cbs.hpd)
+			dp->mst.cbs.hpd(&dp->dp_display, true, &info);
 	}
 
 	DP_MST_DEBUG("mst_hpd_high. mst_active:%d\n", dp->mst.mst_active);
@@ -648,7 +701,16 @@
 
 static int dp_display_process_hpd_high(struct dp_display_private *dp)
 {
-	int rc = 0;
+	int rc = -EINVAL;
+
+	mutex_lock(&dp->session_lock);
+
+	if (dp->is_connected) {
+		pr_debug("dp already connected, skipping hpd high\n");
+		mutex_unlock(&dp->session_lock);
+		rc = -EISCONN;
+		goto end;
+	}
 
 	dp->is_connected = true;
 
@@ -671,25 +733,32 @@
 	 * ETIMEDOUT --> cable may have been removed
 	 * ENOTCONN --> no downstream device connected
 	 */
-	if (rc == -ETIMEDOUT || rc == -ENOTCONN)
+	if (rc == -ETIMEDOUT || rc == -ENOTCONN) {
+		dp->is_connected = false;
 		goto end;
+	}
 
 	dp->link->process_request(dp->link);
 	dp->panel->handle_sink_request(dp->panel);
 
-	dp_display_process_mst_hpd_high(dp);
+	dp_display_process_mst_hpd_high(dp, false);
 
-	mutex_lock(&dp->session_lock);
 	rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
 				dp->panel->fec_en, false);
 	if (rc) {
-		mutex_unlock(&dp->session_lock);
+		dp->is_connected = false;
 		goto end;
 	}
+
+	dp->process_hpd_connect = false;
+
+	dp_display_process_mst_hpd_high(dp, true);
+end:
 	mutex_unlock(&dp->session_lock);
 
-	dp_display_send_hpd_notification(dp);
-end:
+	if (!rc)
+		dp_display_send_hpd_notification(dp);
+
 	return rc;
 }
 
@@ -715,6 +784,7 @@
 	int rc = 0;
 
 	dp->is_connected = false;
+	dp->process_hpd_connect = false;
 
 	dp_display_process_mst_hpd_low(dp);
 
@@ -755,11 +825,15 @@
 			goto end;
 	}
 
+	mutex_lock(&dp->session_lock);
 	dp_display_host_init(dp);
 
 	/* check for hpd high */
 	if (dp->hpd->hpd_high)
 		queue_work(dp->wq, &dp->connect_work);
+	else
+		dp->process_hpd_connect = true;
+	mutex_unlock(&dp->session_lock);
 end:
 	return rc;
 }
@@ -793,8 +867,10 @@
 {
 	int idx;
 	struct dp_panel *dp_panel;
+	struct dp_link_hdcp_status *status = &dp->link->hdcp_status;
 
-	if (dp_display_is_hdcp_enabled(dp)) {
+	if (dp_display_is_hdcp_enabled(dp) &&
+			status->hdcp_state != HDCP_STATE_INACTIVE) {
 		cancel_delayed_work_sync(&dp->hdcp_cb_work);
 		if (dp->hdcp.ops->off)
 			dp->hdcp.ops->off(dp->hdcp.data);
@@ -878,18 +954,12 @@
 		goto end;
 	}
 
-	/*
-	 * In case cable/dongle is disconnected during adb shell stop,
-	 * reset psm_enabled flag to false since it is no more needed
-	 */
-	if (dp->dp_display.post_open)
-		dp->debug->psm_enabled = false;
-
-	if (dp->debug->psm_enabled)
+	mutex_lock(&dp->session_lock);
+	if (dp->debug->psm_enabled && dp->core_initialized)
 		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
+	mutex_unlock(&dp->session_lock);
 
 	dp_display_disconnect_sync(dp);
-	dp->dp_display.post_open = NULL;
 
 	if (!dp->debug->sim_mode && !dp->parser->no_aux_switch
 	    && !dp->parser->gpio_aux_switch)
@@ -936,11 +1006,19 @@
 	struct dp_display_private *dp = container_of(work,
 			struct dp_display_private, attention_work);
 
-	if (dp->debug->mst_hpd_sim)
-		goto mst_attention;
+	mutex_lock(&dp->session_lock);
 
-	if (dp->link->process_request(dp->link))
+	if (dp->debug->mst_hpd_sim || !dp->core_initialized) {
+		mutex_unlock(&dp->session_lock);
+		goto mst_attention;
+	}
+
+	if (dp->link->process_request(dp->link)) {
+		mutex_unlock(&dp->session_lock);
 		goto cp_irq;
+	}
+
+	mutex_unlock(&dp->session_lock);
 
 	if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
 		if (dp_display_is_sink_count_zero(dp)) {
@@ -997,16 +1075,16 @@
 		return -ENODEV;
 	}
 
-	pr_debug("hpd_irq:%d, hpd_high:%d, power_on:%d\n",
+	pr_debug("hpd_irq:%d, hpd_high:%d, power_on:%d, is_connected:%d\n",
 			dp->hpd->hpd_irq, dp->hpd->hpd_high,
-			dp->power_on);
+			dp->power_on, dp->is_connected);
 
 	if (!dp->hpd->hpd_high)
 		dp_display_disconnect_sync(dp);
 	else if ((dp->hpd->hpd_irq && dp->core_initialized) ||
 			dp->debug->mst_hpd_sim)
 		queue_work(dp->wq, &dp->attention_work);
-	else if (!dp->power_on)
+	else if (dp->process_hpd_connect || !dp->is_connected)
 		queue_work(dp->wq, &dp->connect_work);
 	else
 		pr_debug("ignored\n");
@@ -1228,6 +1306,7 @@
 	debug_in.connector = &dp->dp_display.base_connector;
 	debug_in.catalog = dp->catalog;
 	debug_in.parser = dp->parser;
+	debug_in.ctrl = dp->ctrl;
 
 	dp->debug = dp_debug_get(&debug_in);
 	if (IS_ERR(dp->debug)) {
@@ -1399,7 +1478,7 @@
 
 static int dp_display_set_stream_info(struct dp_display *dp_display,
 			void *panel, u32 strm_id, u32 start_slot,
-			u32 num_slots, u32 pbn)
+			u32 num_slots, u32 pbn, int vcpi)
 {
 	int rc = 0;
 	struct dp_panel *dp_panel;
@@ -1432,7 +1511,7 @@
 	if (panel) {
 		dp_panel = panel;
 		dp_panel->set_stream_info(dp_panel, strm_id, start_slot,
-				num_slots, pbn);
+				num_slots, pbn, vcpi);
 	}
 
 	mutex_unlock(&dp->session_lock);
@@ -1539,8 +1618,6 @@
 	cancel_delayed_work_sync(&dp->hdcp_cb_work);
 	queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
 end:
-	/* clear framework event notifier */
-	dp_display->post_open = NULL;
 	dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
 
 	complete_all(&dp->notification_comp);
@@ -1552,7 +1629,9 @@
 {
 	struct dp_display_private *dp;
 	struct dp_panel *dp_panel = panel;
+	struct dp_link_hdcp_status *status;
 	int rc = 0;
+	size_t i;
 
 	if (!dp_display || !panel) {
 		pr_err("invalid input\n");
@@ -1563,19 +1642,35 @@
 
 	mutex_lock(&dp->session_lock);
 
+	status = &dp->link->hdcp_status;
+
 	if (!dp->power_on) {
 		pr_debug("stream already powered off, return\n");
 		goto end;
 	}
 
-	if (dp_display_is_hdcp_enabled(dp)) {
-		cancel_delayed_work_sync(&dp->hdcp_cb_work);
+	if (dp_display_is_hdcp_enabled(dp) &&
+			status->hdcp_state != HDCP_STATE_INACTIVE) {
+		flush_delayed_work(&dp->hdcp_cb_work);
+		if (dp->mst.mst_active) {
+			dp_display_hdcp_deregister_stream(dp,
+				dp_panel->stream_id);
+			for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
+				if (i != dp_panel->stream_id &&
+						dp->active_panels[i]) {
+					pr_debug("Streams are still active. Skip disabling HDCP\n");
+					goto stream;
+				}
+			}
+		}
+
 		if (dp->hdcp.ops->off)
 			dp->hdcp.ops->off(dp->hdcp.data);
 
 		dp_display_update_hdcp_status(dp, true);
 	}
 
+stream:
 	if (dp_panel->audio_supported)
 		dp_panel->audio->off(dp_panel->audio);
 
@@ -1689,14 +1784,6 @@
 		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
 		dp->debug->psm_enabled = true;
 
-		/*
-		 * In case of framework reboot, the DP off sequence is executed
-		 * without any notification from driver. Initialize post_open
-		 * callback to notify DP connection once framework restarts.
-		 */
-		dp_display->post_open = dp_display_post_open;
-		dp->dp_display.is_sst_connected = false;
-
 		dp->ctrl->off(dp->ctrl);
 		dp_display_host_deinit(dp);
 	}
@@ -1894,7 +1981,8 @@
 	if (free_dsc_blks >= required_dsc_blks)
 		dp_mode->capabilities |= DP_PANEL_CAPS_DSC;
 
-	pr_debug("in_use:%d, max:%d, free:%d, req:%d, caps:0x%x, width:%d\n",
+	if (dp_mode->capabilities & DP_PANEL_CAPS_DSC)
+		pr_debug("in_use:%d, max:%d, free:%d, req:%d, caps:0x%x, width:%d\n",
 			dp->tot_dsc_blks_in_use, dp->parser->max_dp_dsc_blks,
 			free_dsc_blks, required_dsc_blks, dp_mode->capabilities,
 			dp->parser->max_dp_dsc_input_width_pixs);
@@ -2258,6 +2346,77 @@
 	return 0;
 }
 
+static int dp_display_mst_connector_update_link_info(
+			struct dp_display *dp_display,
+			struct drm_connector *connector)
+{
+	int rc = 0;
+	struct sde_connector *sde_conn;
+	struct dp_panel *dp_panel;
+	struct dp_display_private *dp;
+
+	if (!dp_display || !connector) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	if (!dp->mst.drm_registered) {
+		pr_debug("drm mst not registered\n");
+		return -EPERM;
+	}
+
+	sde_conn = to_sde_connector(connector);
+	if (!sde_conn->drv_panel) {
+		pr_err("invalid panel for connector:%d\n", connector->base.id);
+		return -EINVAL;
+	}
+
+	dp_panel = sde_conn->drv_panel;
+
+	memcpy(dp_panel->dpcd, dp->panel->dpcd,
+			DP_RECEIVER_CAP_SIZE + 1);
+	memcpy(dp_panel->dsc_dpcd, dp->panel->dsc_dpcd,
+			DP_RECEIVER_DSC_CAP_SIZE + 1);
+	memcpy(&dp_panel->link_info, &dp->panel->link_info,
+			sizeof(dp_panel->link_info));
+
+	DP_MST_DEBUG("dp mst connector:%d link info updated\n");
+
+	return rc;
+}
+
+static int dp_display_mst_get_fixed_topology_port(
+			struct dp_display *dp_display,
+			u32 strm_id, u32 *port_num)
+{
+	struct dp_display_private *dp;
+	u32 port;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (strm_id >= DP_STREAM_MAX) {
+		pr_err("invalid stream id:%d\n", strm_id);
+		return -EINVAL;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	port = dp->parser->mst_fixed_port[strm_id];
+
+	if (!port || port > 255)
+		return -ENOENT;
+
+	if (port_num)
+		*port_num = port;
+
+	return 0;
+}
+
 static int dp_display_get_mst_caps(struct dp_display *dp_display,
 			struct dp_mst_caps *mst_caps)
 {
@@ -2331,7 +2490,7 @@
 	g_dp_display->unprepare     = dp_display_unprepare;
 	g_dp_display->request_irq   = dp_request_irq;
 	g_dp_display->get_debug     = dp_get_debug;
-	g_dp_display->post_open     = dp_display_post_open;
+	g_dp_display->post_open     = NULL;
 	g_dp_display->post_init     = dp_display_post_init;
 	g_dp_display->config_hdr    = dp_display_config_hdr;
 	g_dp_display->mst_install   = dp_display_mst_install;
@@ -2341,12 +2500,16 @@
 					dp_display_mst_connector_uninstall;
 	g_dp_display->mst_connector_update_edid =
 					dp_display_mst_connector_update_edid;
+	g_dp_display->mst_connector_update_link_info =
+				dp_display_mst_connector_update_link_info;
 	g_dp_display->get_mst_caps = dp_display_get_mst_caps;
 	g_dp_display->set_stream_info = dp_display_set_stream_info;
 	g_dp_display->update_pps = dp_display_update_pps;
 	g_dp_display->convert_to_dp_mode = dp_display_convert_to_dp_mode;
 	g_dp_display->mst_get_connector_info =
 					dp_display_mst_get_connector_info;
+	g_dp_display->mst_get_fixed_topology_port =
+					dp_display_mst_get_fixed_topology_port;
 
 	rc = component_add(&pdev->dev, &dp_display_comp_ops);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 410cee7..fe332af 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -100,13 +100,18 @@
 	int (*mst_connector_update_edid)(struct dp_display *dp_display,
 			struct drm_connector *connector,
 			struct edid *edid);
+	int (*mst_connector_update_link_info)(struct dp_display *dp_display,
+			struct drm_connector *connector);
 	int (*mst_get_connector_info)(struct dp_display *dp_display,
 			struct drm_connector *connector,
 			struct dp_mst_connector *mst_conn);
+	int (*mst_get_fixed_topology_port)(struct dp_display *dp_display,
+			u32 strm_id, u32 *port_num);
 	int (*get_mst_caps)(struct dp_display *dp_display,
 			struct dp_mst_caps *mst_caps);
 	int (*set_stream_info)(struct dp_display *dp_display, void *panel,
-			u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn);
+			u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn,
+			int vcpi);
 	void (*convert_to_dp_mode)(struct dp_display *dp_display, void *panel,
 			const struct drm_display_mode *drm_mode,
 			struct dp_display_mode *dp_mode);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 9b3bb24..b3b116a 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -114,7 +114,7 @@
 	}
 
 	/* for SST force stream id, start slot and total slots to 0 */
-	dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0);
+	dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0, 0);
 
 	rc = dp->enable(dp, bridge->dp_panel);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
index 3dd0fa1..f71c25e 100644
--- a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
+++ b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
@@ -51,7 +51,6 @@
 	u8 rx_status;
 	char abort_mask;
 
-	bool cp_irq_done;
 	bool polling;
 };
 
@@ -66,6 +65,25 @@
 	struct dp_hdcp2p2_int_set *int_set;
 };
 
+static inline int dp_hdcp2p2_valid_handle(struct dp_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib_ctx) {
+		pr_err("HDCP library needs to be acquired\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib) {
+		pr_err("invalid lib ops data\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl)
 {
 	enum hdcp_transport_wakeup_cmd cmd;
@@ -174,6 +192,7 @@
 	if (dp_hdcp2p2_copy_buf(ctrl, data))
 		goto exit;
 
+	ctrl->polling = false;
 	switch (data->cmd) {
 	case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
 		atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
@@ -216,38 +235,77 @@
 	atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
 }
 
+static int dp_hdcp2p2_register(void *input, bool mst_enabled)
+{
+	int rc;
+	enum sde_hdcp_2x_device_type device_type;
+	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
+
+	if (mst_enabled)
+		device_type = HDCP_TXMTR_DP_MST;
+	else
+		device_type = HDCP_TXMTR_DP;
+
+	return sde_hdcp_2x_enable(ctrl->lib_ctx, device_type);
+}
+
+static int dp_hdcp2p2_on(void *input)
+{
+	int rc = 0;
+	struct dp_hdcp2p2_ctrl *ctrl = input;
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
+
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
+
+	cdata.cmd = HDCP_2X_CMD_START;
+	cdata.context = ctrl->lib_ctx;
+	rc = ctrl->lib->wakeup(&cdata);
+	if (rc)
+		pr_err("Unable to start the HDCP 2.2 library (%d)\n", rc);
+
+	return rc;
+}
+
 static void dp_hdcp2p2_off(void *input)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
-	struct hdcp_transport_wakeup_data cdata = {
-					HDCP_TRANSPORT_CMD_AUTHENTICATE};
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
 		return;
-	}
 
-	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
-		pr_err("hdcp is off\n");
-		return;
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
+		cdata.cmd = HDCP_2X_CMD_STOP;
+		cdata.context = ctrl->lib_ctx;
+		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
 	}
 
 	dp_hdcp2p2_set_interrupts(ctrl, false);
 
 	dp_hdcp2p2_reset(ctrl);
 
-	cdata.context = input;
-	dp_hdcp2p2_wakeup(&cdata);
-
 	kthread_park(ctrl->thread);
+
+	sde_hdcp_2x_disable(ctrl->lib_ctx);
 }
 
 static int dp_hdcp2p2_authenticate(void *input)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = input;
 	struct hdcp_transport_wakeup_data cdata = {
 					HDCP_TRANSPORT_CMD_AUTHENTICATE};
-	int rc = 0;
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
 
 	dp_hdcp2p2_set_interrupts(ctrl, true);
 
@@ -370,44 +428,34 @@
 
 static bool dp_hdcp2p2_feature_supported(void *input)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = input;
 	struct sde_hdcp_2x_ops *lib = NULL;
 	bool supported = false;
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		goto end;
-	}
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return supported;
 
 	lib = ctrl->lib;
-	if (!lib) {
-		pr_err("invalid lib ops data\n");
-		goto end;
-	}
-
 	if (lib->feature_supported)
 		supported = lib->feature_supported(
 			ctrl->lib_ctx);
-end:
+
 	return supported;
 }
 
 static void dp_hdcp2p2_force_encryption(void *data, bool enable)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = data;
 	struct sde_hdcp_2x_ops *lib = NULL;
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
 		return;
-	}
 
 	lib = ctrl->lib;
-	if (!lib) {
-		pr_err("invalid lib ops data\n");
-		return;
-	}
-
 	if (lib->force_encryption)
 		lib->force_encryption(ctrl->lib_ctx, enable);
 }
@@ -493,26 +541,12 @@
 		return;
 	}
 
-	if (ctrl->rx_status) {
-		if (!ctrl->cp_irq_done) {
-			pr_debug("waiting for CP_IRQ\n");
-			ctrl->polling = true;
-			return;
-		}
-
-		if (ctrl->rx_status & ctrl->sink_rx_status) {
-			ctrl->cp_irq_done = false;
-			ctrl->sink_rx_status = 0;
-			ctrl->rx_status = 0;
-		}
-	}
-
 	dp_hdcp2p2_get_msg_from_sink(ctrl);
 }
 
 static void dp_hdcp2p2_link_check(struct dp_hdcp2p2_ctrl *ctrl)
 {
-	int rc = 0;
+	int rc = 0, retries = 10;
 	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
 
 	if (!ctrl) {
@@ -545,6 +579,11 @@
 		goto exit;
 	}
 
+	/* wait for polling to start till spec allowed timeout */
+	while (!ctrl->polling && retries--)
+		msleep(20);
+
+	/* check if sink has made a message available */
 	if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) {
 		ctrl->sink_rx_status = 0;
 		ctrl->rx_status = 0;
@@ -552,26 +591,19 @@
 		dp_hdcp2p2_get_msg_from_sink(ctrl);
 
 		ctrl->polling = false;
-	} else {
-		ctrl->cp_irq_done = true;
 	}
 exit:
 	if (rc)
 		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
 }
 
-static void dp_hdcp2p2_manage_session(struct dp_hdcp2p2_ctrl *ctrl)
+static void dp_hdcp2p2_start_auth(struct dp_hdcp2p2_ctrl *ctrl)
 {
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
-
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_START_AUTH};
 	cdata.context = ctrl->lib_ctx;
 
 	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
-		cdata.cmd = HDCP_2X_CMD_START;
-	else
-		cdata.cmd = HDCP_2X_CMD_STOP;
-
-	dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
 }
 
 static int dp_hdcp2p2_read_rx_status(struct dp_hdcp2p2_ctrl *ctrl,
@@ -617,34 +649,31 @@
 
 static int dp_hdcp2p2_cp_irq(void *input)
 {
-	int rc = 0;
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = input;
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
 
 	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
 		atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
 		pr_err("invalid hdcp state\n");
-		rc = -EINVAL;
-		goto error;
+		return -EINVAL;
 	}
 
 	ctrl->sink_rx_status = 0;
 	rc = dp_hdcp2p2_read_rx_status(ctrl, &ctrl->sink_rx_status);
 	if (rc) {
 		pr_err("failed to read rx status\n");
-		goto error;
+		return rc;
 	}
 
 	pr_debug("sink_rx_status=0x%x\n", ctrl->sink_rx_status);
 
 	if (!ctrl->sink_rx_status) {
 		pr_debug("not a hdcp 2.2 irq\n");
-		rc = -EINVAL;
-		goto error;
+		return -EINVAL;
 	}
 
 
@@ -652,8 +681,6 @@
 	wake_up(&ctrl->wait_q);
 
 	return 0;
-error:
-	return rc;
 }
 
 static int dp_hdcp2p2_isr(void *input)
@@ -721,6 +748,51 @@
 	return false;
 }
 
+static int dp_hdcp2p2_change_streams(struct dp_hdcp2p2_ctrl *ctrl,
+		struct sde_hdcp_2x_wakeup_data *cdata)
+{
+	if (!ctrl || cdata->num_streams == 0 || !cdata->streams) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib_ctx) {
+		pr_err("HDCP library needs to be acquired\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib) {
+		pr_err("invalid lib ops data\n");
+		return -EINVAL;
+	}
+
+	cdata->context = ctrl->lib_ctx;
+	return ctrl->lib->wakeup(cdata);
+}
+
+
+static int dp_hdcp2p2_register_streams(void *input, u8 num_streams,
+			struct stream_info *streams)
+{
+	struct dp_hdcp2p2_ctrl *ctrl = input;
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_OPEN_STREAMS};
+
+	cdata.streams = streams;
+	cdata.num_streams = num_streams;
+	return dp_hdcp2p2_change_streams(ctrl, &cdata);
+}
+
+static int dp_hdcp2p2_deregister_streams(void *input, u8 num_streams,
+			struct stream_info *streams)
+{
+	struct dp_hdcp2p2_ctrl *ctrl = input;
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_CLOSE_STREAMS};
+
+	cdata.streams = streams;
+	cdata.num_streams = num_streams;
+	return dp_hdcp2p2_change_streams(ctrl, &cdata);
+}
+
 void sde_dp_hdcp2p2_deinit(void *input)
 {
 	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
@@ -731,9 +803,13 @@
 		return;
 	}
 
-	cdata.cmd = HDCP_2X_CMD_STOP;
-	cdata.context = ctrl->lib_ctx;
-	dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
+		cdata.cmd = HDCP_2X_CMD_STOP;
+		cdata.context = ctrl->lib_ctx;
+		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	}
+
+	sde_hdcp_2x_deregister(ctrl->lib_ctx);
 
 	kthread_stop(ctrl->thread);
 
@@ -769,7 +845,10 @@
 			dp_hdcp2p2_send_msg(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_RECV_MESSAGE:
-			dp_hdcp2p2_recv_msg(ctrl);
+			if (ctrl->rx_status)
+				ctrl->polling = true;
+			else
+				dp_hdcp2p2_recv_msg(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
 			dp_hdcp2p2_send_auth_status(ctrl);
@@ -779,16 +858,13 @@
 			dp_hdcp2p2_send_auth_status(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_LINK_POLL:
-			if (ctrl->cp_irq_done)
-				dp_hdcp2p2_recv_msg(ctrl);
-			else
-				ctrl->polling = true;
+			ctrl->polling = true;
 			break;
 		case HDCP_TRANSPORT_CMD_LINK_CHECK:
 			dp_hdcp2p2_link_check(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_AUTHENTICATE:
-			dp_hdcp2p2_manage_session(ctrl);
+			dp_hdcp2p2_start_auth(ctrl);
 			break;
 		default:
 			break;
@@ -809,8 +885,12 @@
 		.feature_supported = dp_hdcp2p2_feature_supported,
 		.force_encryption = dp_hdcp2p2_force_encryption,
 		.sink_support = dp_hdcp2p2_supported,
+		.set_mode = dp_hdcp2p2_register,
+		.on = dp_hdcp2p2_on,
 		.off = dp_hdcp2p2_off,
 		.cp_irq = dp_hdcp2p2_cp_irq,
+		.register_streams = dp_hdcp2p2_register_streams,
+		.deregister_streams = dp_hdcp2p2_deregister_streams,
 	};
 
 	static struct hdcp_transport_ops client_ops = {
@@ -865,7 +945,6 @@
 	register_data.hdcp_data = &ctrl->lib_ctx;
 	register_data.client_ops = &client_ops;
 	register_data.ops = &hdcp2x_ops;
-	register_data.device_type = HDCP_TXMTR_DP;
 	register_data.client_data = ctrl;
 
 	rc = sde_hdcp_2x_register(&register_data);
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
index 76a5f21..a48fe5f 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
@@ -46,23 +46,23 @@
 
 	if (parser->no_aux_switch && parser->lphw_hpd) {
 		dp_hpd = dp_lphw_hpd_get(dev, parser, catalog, cb);
-		if (!dp_hpd) {
+		if (IS_ERR(dp_hpd)) {
 			pr_err("failed to get lphw hpd\n");
 			return dp_hpd;
 		}
 		dp_hpd->type = DP_HPD_LPHW;
 	} else if (parser->no_aux_switch) {
 		dp_hpd = dp_gpio_hpd_get(dev, cb);
-		if (!dp_hpd) {
+		if (IS_ERR(dp_hpd)) {
 			pr_err("failed to get gpio hpd\n");
-			goto out;
+			return dp_hpd;
 		}
 		dp_hpd->type = DP_HPD_GPIO;
 	} else {
 		dp_hpd = dp_usbpd_get(dev, cb);
-		if (!dp_hpd) {
+		if (IS_ERR(dp_hpd)) {
 			pr_err("failed to get usbpd\n");
-			goto out;
+			return dp_hpd;
 		}
 		dp_hpd->type = DP_HPD_USBPD;
 	}
@@ -74,7 +74,6 @@
 	if (!dp_hpd->isr)
 		dp_hpd->isr		= dp_hpd_isr;
 
-out:
 	return dp_hpd;
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_mst_drm.c b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
index f528485..508c6dc 100644
--- a/drivers/gpu/drm/msm/dp/dp_mst_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
@@ -21,8 +21,8 @@
 #include "dp_drm.h"
 
 #define DP_MST_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
+#define DP_MST_INFO_LOG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
 
-#define MAX_DP_MST_STREAMS		2
 #define MAX_DP_MST_DRM_ENCODERS		2
 #define MAX_DP_MST_DRM_BRIDGES		2
 #define HPD_STRING_SIZE			30
@@ -93,12 +93,18 @@
 	struct drm_display_mode drm_mode;
 	struct dp_display_mode dp_mode;
 	struct drm_connector *connector;
+	struct drm_connector *old_connector;
 	void *dp_panel;
+	void *old_dp_panel;
 
 	int vcpi;
 	int pbn;
 	int num_slots;
 	int start_slot;
+
+	u32 fixed_port_num;
+	bool fixed_port_added;
+	struct drm_connector *fixed_connector;
 };
 
 struct dp_mst_private {
@@ -111,6 +117,7 @@
 	struct dp_mst_sim_mode simulator;
 	struct mutex mst_lock;
 	enum dp_drv_state state;
+	bool mst_session_state;
 };
 
 struct dp_mst_encoder_info_cache {
@@ -167,10 +174,13 @@
 			mutex_lock(&mstb->mgr->lock);
 			list_del(&port->next);
 			mutex_unlock(&mstb->mgr->lock);
-			return;
+			goto put_port;
 		}
 		(*mstb->mgr->cbs->register_connector)(port->connector);
 	}
+
+put_port:
+	kref_put(&port->kref, NULL);
 }
 
 static void dp_mst_sim_link_probe_work(struct work_struct *work)
@@ -525,7 +535,8 @@
 
 		mst->dp_display->set_stream_info(mst->dp_display,
 				dp_bridge->dp_panel,
-				dp_bridge->id, start_slot, num_slots, pbn);
+				dp_bridge->id, start_slot, num_slots, pbn,
+				dp_bridge->vcpi);
 
 		pr_info("bridge:%d vcpi:%d start_slot:%d num_slots:%d, pbn:%d\n",
 			dp_bridge->id, dp_bridge->vcpi,
@@ -550,7 +561,8 @@
 
 		mst->dp_display->set_stream_info(mst->dp_display,
 				mst_bridge->dp_panel,
-				mst_bridge->id, start_slot, num_slots, pbn);
+				mst_bridge->id, start_slot, num_slots, pbn,
+				mst_bridge->vcpi);
 	}
 }
 
@@ -672,8 +684,6 @@
 	struct dp_display *dp;
 	struct dp_mst_private *mst;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -682,6 +692,9 @@
 	bridge = to_dp_mst_bridge(drm_bridge);
 	dp = bridge->display;
 
+	bridge->old_connector = NULL;
+	bridge->old_dp_panel = NULL;
+
 	if (!bridge->connector) {
 		pr_err("Invalid connector\n");
 		return;
@@ -718,7 +731,14 @@
 		_dp_mst_bridge_pre_enable_part2(bridge);
 	}
 
-	DP_MST_DEBUG("mst bridge [%d] pre enable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mode: id(%d) mode(%s), refresh(%d)\n",
+			bridge->id, bridge->drm_mode.name,
+			bridge->drm_mode.vrefresh);
+	DP_MST_INFO_LOG("dsc: id(%d) dsc(%d)\n", bridge->id,
+			bridge->dp_mode.timing.comp_info.comp_ratio);
+	DP_MST_INFO_LOG("channel: id(%d) vcpi(%d) start(%d) tot(%d)\n",
+			bridge->id, bridge->vcpi, bridge->start_slot,
+			bridge->num_slots);
 end:
 	mutex_unlock(&mst->mst_lock);
 }
@@ -729,8 +749,6 @@
 	struct dp_mst_bridge *bridge;
 	struct dp_display *dp;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -751,7 +769,8 @@
 		return;
 	}
 
-	DP_MST_DEBUG("mst bridge [%d] post enable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mst bridge [%d] post enable complete\n",
+			bridge->id);
 }
 
 static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge)
@@ -761,8 +780,6 @@
 	struct dp_display *dp;
 	struct dp_mst_private *mst;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -791,7 +808,7 @@
 
 	_dp_mst_bridge_pre_disable_part2(bridge);
 
-	DP_MST_DEBUG("mst bridge [%d] disable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mst bridge [%d] disable complete\n", bridge->id);
 
 	mutex_unlock(&mst->mst_lock);
 }
@@ -803,8 +820,6 @@
 	struct dp_display *dp;
 	struct dp_mst_private *mst;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -832,12 +847,17 @@
 	/* maintain the connector to encoder link during suspend/resume */
 	if (mst->state != PM_SUSPEND) {
 		/* Disconnect the connector and panel info from bridge */
+		mst->mst_bridge[bridge->id].old_connector =
+				mst->mst_bridge[bridge->id].connector;
+		mst->mst_bridge[bridge->id].old_dp_panel =
+				mst->mst_bridge[bridge->id].dp_panel;
 		mst->mst_bridge[bridge->id].connector = NULL;
 		mst->mst_bridge[bridge->id].dp_panel = NULL;
 		mst->mst_bridge[bridge->id].encoder_active_sts = false;
 	}
 
-	DP_MST_DEBUG("mst bridge [%d] post disable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mst bridge [%d] post disable complete\n",
+			bridge->id);
 }
 
 static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge,
@@ -856,13 +876,21 @@
 
 	bridge = to_dp_mst_bridge(drm_bridge);
 	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
+		if (!bridge->old_connector) {
+			pr_err("Invalid connector\n");
+			return;
+		}
+		bridge->connector = bridge->old_connector;
+		bridge->old_connector = NULL;
 	}
 
 	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		return;
+		if (!bridge->old_dp_panel) {
+			pr_err("Invalid dp_panel\n");
+			return;
+		}
+		bridge->dp_panel = bridge->old_dp_panel;
+		bridge->old_dp_panel = NULL;
 	}
 
 	dp = bridge->display;
@@ -877,6 +905,10 @@
 
 /* DP MST Bridge APIs */
 
+static struct drm_connector *
+dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
+				struct drm_encoder *encoder);
+
 static const struct drm_bridge_funcs dp_mst_bridge_ops = {
 	.attach       = dp_mst_bridge_attach,
 	.mode_fixup   = dp_mst_bridge_mode_fixup,
@@ -944,6 +976,23 @@
 
 	DP_MST_DEBUG("mst drm bridge init. bridge id:%d\n", i);
 
+	/*
+	 * If fixed topology port is defined, connector will be created
+	 * immediately.
+	 */
+	rc = display->mst_get_fixed_topology_port(display, bridge->id,
+			&bridge->fixed_port_num);
+	if (!rc) {
+		bridge->fixed_connector =
+			dp_mst_drm_fixed_connector_init(display,
+				bridge->encoder);
+		if (bridge->fixed_connector == NULL) {
+			pr_err("failed to create fixed connector\n");
+			rc = -ENOMEM;
+			goto end;
+		}
+	}
+
 	return 0;
 
 end:
@@ -1136,7 +1185,8 @@
 	}
 
 	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (!mst->mst_bridge[i].encoder_active_sts) {
+		if (!mst->mst_bridge[i].encoder_active_sts &&
+			!mst->mst_bridge[i].fixed_connector) {
 			mst->mst_bridge[i].encoder_active_sts = true;
 			mst->mst_bridge[i].connector = connector;
 			mst->mst_bridge[i].dp_panel = conn->drv_panel;
@@ -1343,6 +1393,7 @@
 
 	if (!connector) {
 		pr_err("mst sde_connector_init failed\n");
+		drm_modeset_unlock_all(dev);
 		return connector;
 	}
 
@@ -1350,6 +1401,7 @@
 	if (rc) {
 		pr_err("mst connector install failed\n");
 		sde_connector_destroy(connector);
+		drm_modeset_unlock_all(dev);
 		return NULL;
 	}
 
@@ -1372,7 +1424,7 @@
 	/* unlock connector and make it accessible */
 	drm_modeset_unlock_all(dev);
 
-	DP_MST_DEBUG("add mst connector:%d\n", connector->base.id);
+	DP_MST_INFO_LOG("add mst connector id:%d\n", connector->base.id);
 
 	return connector;
 }
@@ -1383,7 +1435,8 @@
 
 	connector->status = connector->funcs->detect(connector, false);
 
-	DP_MST_DEBUG("register mst connector:%d\n", connector->base.id);
+	DP_MST_INFO_LOG("register mst connector id:%d\n",
+			connector->base.id);
 	drm_connector_register(connector);
 }
 
@@ -1392,12 +1445,297 @@
 {
 	DP_MST_DEBUG("enter\n");
 
-	DP_MST_DEBUG("destroy mst connector:%d\n", connector->base.id);
+	DP_MST_INFO_LOG("destroy mst connector id:%d\n", connector->base.id);
 
 	drm_connector_unregister(connector);
 	drm_connector_put(connector);
 }
 
+static enum drm_connector_status
+dp_mst_fixed_connector_detect(struct drm_connector *connector, bool force,
+			void *display)
+{
+	struct dp_display *dp_display = display;
+	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
+	int i;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (mst->mst_bridge[i].fixed_connector != connector)
+			continue;
+
+		if (!mst->mst_bridge[i].fixed_port_added)
+			break;
+
+		return dp_mst_connector_detect(connector, force, display);
+	}
+
+	return connector_status_disconnected;
+}
+
+static struct drm_encoder *
+dp_mst_fixed_atomic_best_encoder(struct drm_connector *connector,
+			void *display, struct drm_connector_state *state)
+{
+	struct dp_display *dp_display = display;
+	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
+	struct sde_connector *conn = to_sde_connector(connector);
+	struct drm_encoder *enc = NULL;
+	u32 i;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (mst->mst_bridge[i].connector == connector) {
+			enc = mst->mst_bridge[i].encoder;
+			goto end;
+		}
+	}
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (mst->mst_bridge[i].fixed_connector == connector) {
+			mst->mst_bridge[i].encoder_active_sts = true;
+			mst->mst_bridge[i].connector = connector;
+			mst->mst_bridge[i].dp_panel = conn->drv_panel;
+			enc = mst->mst_bridge[i].encoder;
+			break;
+		}
+	}
+
+end:
+	if (enc)
+		DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n",
+			connector->base.id, i);
+	else
+		DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n",
+				connector->base.id);
+
+	return enc;
+}
+
+static u32 dp_mst_find_fixed_port_num(struct drm_dp_mst_branch *mstb,
+		struct drm_dp_mst_port *target)
+{
+	struct drm_dp_mst_port *port;
+	u32 port_num = 0;
+
+	/*
+	 * search through reversed order of adding sequence, so the port number
+	 * will be unique once topology is fixed
+	 */
+	list_for_each_entry_reverse(port, &mstb->ports, next) {
+		if (port->mstb)
+			port_num += dp_mst_find_fixed_port_num(port->mstb,
+						target);
+		else if (!port->input) {
+			++port_num;
+			if (port == target)
+				break;
+		}
+	}
+
+	return port_num;
+}
+
+static struct drm_connector *
+dp_mst_find_fixed_connector(struct dp_mst_private *dp_mst,
+		struct drm_dp_mst_port *port)
+{
+	struct dp_display *dp_display = dp_mst->dp_display;
+	struct drm_connector *connector = NULL;
+	struct sde_connector *c_conn;
+	u32 port_num;
+	int i;
+
+	mutex_lock(&port->mgr->lock);
+	port_num = dp_mst_find_fixed_port_num(port->mgr->mst_primary, port);
+	mutex_unlock(&port->mgr->lock);
+
+	if (!port_num)
+		return NULL;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (dp_mst->mst_bridge[i].fixed_port_num == port_num) {
+			connector = dp_mst->mst_bridge[i].fixed_connector;
+			c_conn = to_sde_connector(connector);
+			c_conn->mst_port = port;
+			dp_display->mst_connector_update_link_info(dp_display,
+					connector);
+			dp_mst->mst_bridge[i].fixed_port_added = true;
+			DP_MST_DEBUG("found fixed connector %d\n",
+					DRMID(connector));
+			break;
+		}
+	}
+
+	return connector;
+}
+
+static int
+dp_mst_find_first_available_encoder_idx(struct dp_mst_private *dp_mst)
+{
+	int enc_idx = MAX_DP_MST_DRM_BRIDGES;
+	int i;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (!dp_mst->mst_bridge[i].fixed_connector) {
+			enc_idx = i;
+			break;
+		}
+	}
+
+	return enc_idx;
+}
+
+static struct drm_connector *
+dp_mst_add_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port, const char *pathprop)
+{
+	struct dp_mst_private *dp_mst;
+	struct drm_device *dev;
+	struct dp_display *dp_display;
+	struct drm_connector *connector;
+	int i, enc_idx;
+
+	DP_MST_DEBUG("enter\n");
+
+	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
+
+	dp_display = dp_mst->dp_display;
+	dev = dp_display->drm_dev;
+
+	if (port->input || port->mstb)
+		enc_idx = MAX_DP_MST_DRM_BRIDGES;
+	else {
+		/* if port is already reserved, return immediately */
+		connector = dp_mst_find_fixed_connector(dp_mst, port);
+		if (connector != NULL)
+			return connector;
+
+		/* first available bridge index for non-reserved port */
+		enc_idx = dp_mst_find_first_available_encoder_idx(dp_mst);
+	}
+
+	/* add normal connector */
+	connector = dp_mst_add_connector(mgr, port, pathprop);
+	if (!connector) {
+		DP_MST_DEBUG("failed to add connector\n");
+		return NULL;
+	}
+
+	drm_modeset_lock_all(dev);
+
+	/* clear encoder list */
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+		connector->encoder_ids[i] = 0;
+
+	/* re-attach encoders from first available encoders */
+	for (i = enc_idx; i < MAX_DP_MST_DRM_BRIDGES; i++)
+		drm_connector_attach_encoder(connector,
+				dp_mst->mst_bridge[i].encoder);
+
+	drm_modeset_unlock_all(dev);
+
+	DP_MST_DEBUG("add mst connector:%d\n", connector->base.id);
+
+	return connector;
+}
+
+static void dp_mst_register_fixed_connector(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct dp_display *dp_display = c_conn->display;
+	struct dp_mst_private *dp_mst = dp_display->dp_mst_prv_info;
+	int i;
+
+	DP_MST_DEBUG("enter\n");
+
+	/* skip connector registered for fixed topology ports */
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (dp_mst->mst_bridge[i].fixed_connector == connector) {
+			DP_MST_DEBUG("found fixed connector %d\n",
+					DRMID(connector));
+			return;
+		}
+	}
+
+	dp_mst_register_connector(connector);
+}
+
+static void dp_mst_destroy_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
+					   struct drm_connector *connector)
+{
+	struct dp_mst_private *dp_mst;
+	int i;
+
+	DP_MST_DEBUG("enter\n");
+
+	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
+
+	/* skip connector destroy for fixed topology ports */
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (dp_mst->mst_bridge[i].fixed_connector == connector) {
+			dp_mst->mst_bridge[i].fixed_port_added = false;
+			DP_MST_DEBUG("destroy fixed connector %d\n",
+					DRMID(connector));
+			return;
+		}
+	}
+
+	dp_mst_destroy_connector(mgr, connector);
+}
+
+static struct drm_connector *
+dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
+			struct drm_encoder *encoder)
+{
+	static const struct sde_connector_ops dp_mst_connector_ops = {
+		.post_init  = NULL,
+		.detect     = dp_mst_fixed_connector_detect,
+		.get_modes  = dp_mst_connector_get_modes,
+		.mode_valid = dp_mst_connector_mode_valid,
+		.get_info   = dp_mst_connector_get_info,
+		.get_mode_info  = dp_mst_connector_get_mode_info,
+		.atomic_best_encoder = dp_mst_fixed_atomic_best_encoder,
+		.atomic_check = dp_mst_connector_atomic_check,
+		.config_hdr = dp_mst_connector_config_hdr,
+		.pre_destroy = dp_mst_connector_pre_destroy,
+	};
+	struct drm_device *dev;
+	struct drm_connector *connector;
+	int rc;
+
+	DP_MST_DEBUG("enter\n");
+
+	dev = dp_display->drm_dev;
+
+	connector = sde_connector_init(dev,
+				encoder,
+				NULL,
+				dp_display,
+				&dp_mst_connector_ops,
+				DRM_CONNECTOR_POLL_HPD,
+				DRM_MODE_CONNECTOR_DisplayPort);
+
+	if (!connector) {
+		pr_err("mst sde_connector_init failed\n");
+		return NULL;
+	}
+
+	rc = dp_display->mst_connector_install(dp_display, connector);
+	if (rc) {
+		pr_err("mst connector install failed\n");
+		sde_connector_destroy(connector);
+		return NULL;
+	}
+
+	drm_object_attach_property(&connector->base,
+			dev->mode_config.path_property, 0);
+	drm_object_attach_property(&connector->base,
+			dev->mode_config.tile_property, 0);
+
+	DP_MST_DEBUG("add mst fixed connector:%d\n", connector->base.id);
+
+	return connector;
+}
+
 static void dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 {
 	struct dp_mst_private *mst = container_of(mgr, struct dp_mst_private,
@@ -1411,7 +1749,7 @@
 
 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 
-	DP_MST_DEBUG("mst hot plug event\n");
+	DP_MST_INFO_LOG("mst hot plug event\n");
 }
 
 static void dp_mst_hpd_event_notify(struct dp_mst_private *mst, bool hpd_status)
@@ -1432,7 +1770,7 @@
 
 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 
-	DP_MST_DEBUG("%s finished\n", __func__);
+	DP_MST_INFO_LOG("%s finished\n", __func__);
 }
 
 /* DP Driver Callback OPs */
@@ -1444,7 +1782,9 @@
 	struct dp_display *dp = dp_display;
 	struct dp_mst_private *mst = dp->dp_mst_prv_info;
 
-	DP_MST_DEBUG("enter:\n");
+	mutex_lock(&mst->mst_lock);
+	mst->mst_session_state = hpd_status;
+	mutex_unlock(&mst->mst_lock);
 
 	if (!hpd_status)
 		rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr,
@@ -1466,9 +1806,7 @@
 
 	dp_mst_hpd_event_notify(mst, hpd_status);
 
-	DP_MST_DEBUG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
-
-	DP_MST_DEBUG("exit:\n");
+	DP_MST_INFO_LOG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
 }
 
 static void dp_mst_display_hpd_irq(void *dp_display,
@@ -1477,26 +1815,29 @@
 	int rc;
 	struct dp_display *dp = dp_display;
 	struct dp_mst_private *mst = dp->dp_mst_prv_info;
-	u8 esi[14], idx;
+	u8 esi[14];
 	unsigned int esi_res = DP_SINK_COUNT_ESI + 1;
 	bool handled;
 
-	DP_MST_DEBUG("enter:\n");
-
 	if (info->mst_hpd_sim) {
 		dp_mst_hotplug(&mst->mst_mgr);
 		return;
 	}
 
+	if (!mst->mst_session_state) {
+		pr_err("mst_hpd_irq received before mst session start\n");
+		return;
+	}
+
 	rc = drm_dp_dpcd_read(mst->caps.drm_aux, DP_SINK_COUNT_ESI,
 		esi, 14);
 	if (rc != 14) {
-		pr_err("dpcd sync status read failed, rlen=%d\n", rc);
-		goto end;
+		pr_err("dpcd sink status read failed, rlen=%d\n", rc);
+		return;
 	}
 
-	for (idx = 0; idx < 14; idx++)
-		DP_MST_DEBUG("mst irq: esi[%d]: 0x%x\n", idx, esi[idx]);
+	DP_MST_DEBUG("mst irq: esi1[0x%x] esi2[0x%x] esi3[%x]\n",
+			esi[1], esi[2], esi[3]);
 
 	rc = drm_dp_mst_hpd_irq(&mst->mst_mgr, esi, &handled);
 
@@ -1509,9 +1850,6 @@
 	}
 
 	DP_MST_DEBUG("mst display hpd_irq handled:%d rc:%d\n", handled, rc);
-
-end:
-	DP_MST_DEBUG("exit:\n");
 }
 
 static void dp_mst_set_state(void *dp_display, enum dp_drv_state mst_state)
@@ -1525,6 +1863,7 @@
 	}
 
 	mst->state = mst_state;
+	DP_MST_INFO_LOG("mst power state:%d\n", mst_state);
 }
 
 /* DP MST APIs */
@@ -1542,6 +1881,13 @@
 	.hotplug = dp_mst_hotplug,
 };
 
+static const struct drm_dp_mst_topology_cbs dp_mst_fixed_drm_cbs = {
+	.add_connector = dp_mst_add_fixed_connector,
+	.register_connector = dp_mst_register_fixed_connector,
+	.destroy_connector = dp_mst_destroy_fixed_connector,
+	.hotplug = dp_mst_hotplug,
+};
+
 static void dp_mst_sim_init(struct dp_mst_private *mst)
 {
 	INIT_WORK(&mst->simulator.probe_work, dp_mst_sim_link_probe_work);
@@ -1606,7 +1952,11 @@
 	}
 	memset(&dp_mst_enc_cache, 0, sizeof(dp_mst_enc_cache));
 
-	DP_MST_DEBUG("dp drm mst topology manager init completed\n");
+	/* choose fixed callback function if fixed topology is found */
+	if (!dp_display->mst_get_fixed_topology_port(dp_display, 0, NULL))
+		dp_mst.mst_mgr.cbs = &dp_mst_fixed_drm_cbs;
+
+	DP_MST_INFO_LOG("dp drm mst topology manager init completed\n");
 
 	return ret;
 
@@ -1637,6 +1987,6 @@
 
 	mutex_destroy(&mst->mst_lock);
 
-	DP_MST_DEBUG("dp drm mst topology manager deinit completed\n");
+	DP_MST_INFO_LOG("dp drm mst topology manager deinit completed\n");
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 4e06924..d98ebcf 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -1530,12 +1530,14 @@
 	struct dp_dsc_slices_per_line *rec;
 	int slice_width;
 	u32 ppr = dp_mode->timing.pixel_clk_khz/1000;
+	int max_slice_width;
 
 	comp_info->dsc_info.slice_per_pkt = 0;
 	for (i = 0; i < ARRAY_SIZE(slice_per_line_tbl); i++) {
 		rec = &slice_per_line_tbl[i];
 		if ((ppr > rec->min_ppr) && (ppr <= rec->max_ppr)) {
 			comp_info->dsc_info.slice_per_pkt = rec->num_slices;
+			i++;
 			break;
 		}
 	}
@@ -1543,9 +1545,21 @@
 	if (comp_info->dsc_info.slice_per_pkt == 0)
 		return -EINVAL;
 
+	max_slice_width = dp_panel->dsc_dpcd[12] * 320;
 	slice_width = (dp_mode->timing.h_active /
 				comp_info->dsc_info.slice_per_pkt);
 
+	while (slice_width >= max_slice_width) {
+		if (i == ARRAY_SIZE(slice_per_line_tbl))
+			return -EINVAL;
+
+		rec = &slice_per_line_tbl[i];
+		comp_info->dsc_info.slice_per_pkt = rec->num_slices;
+		slice_width = (dp_mode->timing.h_active /
+				comp_info->dsc_info.slice_per_pkt);
+		i++;
+	}
+
 	comp_info->dsc_info.block_pred_enable =
 			dp_panel->sink_dsc_caps.block_pred_en;
 	comp_info->dsc_info.vbr_enable = 0;
@@ -1657,8 +1671,8 @@
 	panel->minor = link_info->revision & 0x0f;
 	pr_debug("version: %d.%d\n", panel->major, panel->minor);
 
-	link_info->rate =
-		drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
+	link_info->rate = min_t(unsigned long, panel->parser->max_lclk_khz,
+		drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]));
 	pr_debug("link_rate=%d\n", link_info->rate);
 
 	link_info->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
@@ -2305,13 +2319,14 @@
 
 static int dp_panel_set_stream_info(struct dp_panel *dp_panel,
 		enum dp_stream_id stream_id, u32 ch_start_slot,
-			u32 ch_tot_slots, u32 pbn)
+			u32 ch_tot_slots, u32 pbn, int vcpi)
 {
 	if (!dp_panel || stream_id > DP_STREAM_MAX) {
 		pr_err("invalid input. stream_id: %d\n", stream_id);
 		return -EINVAL;
 	}
 
+	dp_panel->vcpi = vcpi;
 	dp_panel->stream_id = stream_id;
 	dp_panel->channel_start_slot = ch_start_slot;
 	dp_panel->channel_total_slots = ch_tot_slots;
@@ -2376,7 +2391,7 @@
 	if (!panel->custom_edid && dp_panel->edid_ctrl->edid)
 		sde_free_edid((void **)&dp_panel->edid_ctrl);
 
-	dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0);
+	dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0, 0);
 	memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo));
 	memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
 	panel->panel_on = false;
@@ -2701,47 +2716,22 @@
 	catalog->config_misc(catalog);
 }
 
-static bool dp_panel_use_fixed_nvid(struct dp_panel *dp_panel)
-{
-	u8 *dpcd = dp_panel->dpcd;
-	struct sde_connector *c_conn = to_sde_connector(dp_panel->connector);
-
-	/* use fixe mvid and nvid for MST streams */
-	if (c_conn->mst_port)
-		return true;
-
-	/*
-	 * For better interop experience, used a fixed NVID=0x8000
-	 * whenever connected to a VGA dongle downstream.
-	 */
-	if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) {
-		u8 type = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-			DP_DWN_STRM_PORT_TYPE_MASK;
-		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG)
-			return true;
-	}
-
-	return false;
-}
-
 static void dp_panel_config_msa(struct dp_panel *dp_panel)
 {
 	struct dp_panel_private *panel;
 	struct dp_catalog_panel *catalog;
 	u32 rate;
 	u32 stream_rate_khz;
-	bool fixed_nvid;
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 	catalog = panel->catalog;
 
 	catalog->widebus_en = dp_panel->widebus_en;
 
-	fixed_nvid = dp_panel_use_fixed_nvid(dp_panel);
 	rate = drm_dp_bw_code_to_link_rate(panel->link->link_params.bw_code);
 	stream_rate_khz = dp_panel->pinfo.pixel_clk_khz;
 
-	catalog->config_msa(catalog, rate, stream_rate_khz, fixed_nvid);
+	catalog->config_msa(catalog, rate, stream_rate_khz);
 }
 
 static int dp_panel_hw_cfg(struct dp_panel *dp_panel, bool enable)
@@ -2956,6 +2946,8 @@
 	if (in->base_panel) {
 		memcpy(dp_panel->dpcd, in->base_panel->dpcd,
 				DP_RECEIVER_CAP_SIZE + 1);
+		memcpy(dp_panel->dsc_dpcd, in->base_panel->dsc_dpcd,
+				DP_RECEIVER_DSC_CAP_SIZE + 1);
 		memcpy(&dp_panel->link_info, &in->base_panel->link_info,
 				sizeof(dp_panel->link_info));
 		dp_panel->mst_state = in->base_panel->mst_state;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 90d5346..dc96090 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -110,6 +110,7 @@
 	 * Client sets the stream id value using set_stream_id interface.
 	 */
 	enum dp_stream_id stream_id;
+	int vcpi;
 
 	u32 channel_start_slot;
 	u32 channel_total_slots;
@@ -154,7 +155,7 @@
 
 	int (*set_stream_info)(struct dp_panel *dp_panel,
 			enum dp_stream_id stream_id, u32 ch_start_slot,
-			u32 ch_tot_slots, u32 pbn);
+			u32 ch_tot_slots, u32 pbn, int vcpi);
 
 	int (*read_sink_status)(struct dp_panel *dp_panel, u8 *sts, u32 size);
 	int (*update_edid)(struct dp_panel *dp_panel, struct edid *edid);
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index b0a6d24..bc4369d 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -151,11 +151,22 @@
 			parser->l_map[i] = data[i];
 	}
 
+	data = of_get_property(of_node, "qcom,pn-swap-lane-map", &len);
+	if (data && (len == DP_MAX_PHY_LN)) {
+		for (i = 0; i < len; i++)
+			parser->l_pnswap |= (data[i] & 0x01) << i;
+	}
+
 	rc = of_property_read_u32(of_node,
 		"qcom,max-pclk-frequency-khz", &parser->max_pclk_khz);
 	if (rc)
 		parser->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
 
+	rc = of_property_read_u32(of_node,
+		"qcom,max-lclk-frequency-khz", &parser->max_lclk_khz);
+	if (rc)
+		parser->max_lclk_khz = DP_MAX_LINK_CLK_KHZ;
+
 	return 0;
 }
 
@@ -692,6 +703,7 @@
 static int dp_parser_mst(struct dp_parser *parser)
 {
 	struct device *dev = &parser->pdev->dev;
+	int i;
 
 	parser->has_mst = of_property_read_bool(dev->of_node,
 			"qcom,mst-enable");
@@ -699,6 +711,12 @@
 
 	pr_debug("mst parsing successful. mst:%d\n", parser->has_mst);
 
+	for (i = 0; i < MAX_DP_MST_STREAMS; i++) {
+		of_property_read_u32_index(dev->of_node,
+				"qcom,mst-fixed-topology-ports", i,
+				&parser->mst_fixed_port[i]);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 7fb90c9..9caa1a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -11,6 +11,8 @@
 #define DP_LABEL "MDSS DP DISPLAY"
 #define AUX_CFG_LEN	10
 #define DP_MAX_PIXEL_CLK_KHZ	675000
+#define DP_MAX_LINK_CLK_KHZ	810000
+#define MAX_DP_MST_STREAMS	2
 
 enum dp_pm_type {
 	DP_CORE_PM,
@@ -181,6 +183,9 @@
  * @mp: gpio, regulator and clock related data
  * @pinctrl: pin-control related data
  * @disp_data: controller's display related data
+ * @l_pnswap: P/N swap status on each lane
+ * @max_pclk_khz: maximum pixel clock supported for the platform
+ * @max_lclk_khz: maximum link clock supported for the platform
  * @hw_cfg: DP HW specific settings
  * @has_mst: MST feature enable status
  * @has_mst_sideband: MST sideband feature enable status
@@ -191,6 +196,7 @@
  * @max_dp_dsc_blks: maximum DSC blks for DP interface
  * @max_dp_dsc_input_width_pixs: Maximum input width for DSC block
  * @has_widebus: widebus (2PPC) feature eanble status
+  *@mst_fixed_port: mst port_num reserved for fixed topology
  * @parse: function to be called by client to parse device tree.
  * @get_io: function to be called by client to get io data.
  * @get_io_buf: function to be called by client to get io buffers.
@@ -205,8 +211,10 @@
 	struct dp_display_data disp_data;
 
 	u8 l_map[4];
+	u8 l_pnswap;
 	struct dp_aux_cfg aux_cfg[AUX_CFG_LEN];
 	u32 max_pclk_khz;
+	u32 max_lclk_khz;
 	struct dp_hw_cfg hw_cfg;
 	bool has_mst;
 	bool has_mst_sideband;
@@ -218,6 +226,7 @@
 	u32 max_dp_dsc_blks;
 	u32 max_dp_dsc_input_width_pixs;
 	bool lphw_hpd;
+	u32 mst_fixed_port[MAX_DP_MST_STREAMS];
 
 	int (*parse)(struct dp_parser *parser);
 	struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name);
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 5089f0c..7f9391d 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -351,12 +351,14 @@
 
 #define TXn_TX_EMP_POST1_LVL			(0x000C)
 #define TXn_TX_DRV_LVL				(0x001C)
+#define TXn_TX_POL_INV				(0x0064)
 
 #define DP_PHY_AUX_INTERRUPT_MASK_V420		(0x0054)
 #define DP_PHY_AUX_INTERRUPT_CLEAR_V420		(0x0058)
 #define DP_PHY_AUX_INTERRUPT_STATUS_V420	(0x00D8)
 #define DP_PHY_SPARE0_V420			(0x00C8)
 #define TXn_TX_DRV_LVL_V420			(0x0014)
+#define TXn_TX_POL_INV_V420			(0x005C)
 
 #define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		(0x004)
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index ae2ce71..15ad347 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -172,7 +172,7 @@
 {
 	struct dsi_display *dsi_display = display;
 	struct dsi_panel *panel;
-	u32 bl_scale, bl_scale_ad;
+	u32 bl_scale, bl_scale_sv;
 	u64 bl_temp;
 	int rc = 0;
 
@@ -193,12 +193,11 @@
 	bl_scale = panel->bl_config.bl_scale;
 	bl_temp = bl_lvl * bl_scale / MAX_BL_SCALE_LEVEL;
 
-	bl_scale_ad = panel->bl_config.bl_scale_ad;
-	bl_temp = (u32)bl_temp * bl_scale_ad / MAX_AD_BL_SCALE_LEVEL;
+	bl_scale_sv = panel->bl_config.bl_scale_sv;
+	bl_temp = (u32)bl_temp * bl_scale_sv / MAX_SV_BL_SCALE_LEVEL;
 
-	pr_debug("bl_scale = %u, bl_scale_ad = %u, bl_lvl = %u\n",
-		bl_scale, bl_scale_ad, (u32)bl_temp);
-
+	pr_debug("bl_scale = %u, bl_scale_sv = %u, bl_lvl = %u\n",
+		bl_scale, bl_scale_sv, (u32)bl_temp);
 	rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
 			DSI_CORE_CLK, DSI_CLK_ON);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 387c889..68fc901 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -623,13 +623,111 @@
 	dsi_display->modes = NULL;
 }
 
-int dsi_connector_get_modes(struct drm_connector *connector,
-		void *display)
+
+static int dsi_drm_update_edid_name(struct edid *edid, const char *name)
 {
-	u32 count = 0;
+	u8 *dtd = (u8 *)&edid->detailed_timings[3];
+	u8 standard_header[] = {0x00, 0x00, 0x00, 0xFE, 0x00};
+	u32 dtd_size = 18;
+	u32 header_size = sizeof(standard_header);
+
+	if (!name)
+		return -EINVAL;
+
+	/* Fill standard header */
+	memcpy(dtd, standard_header, header_size);
+
+	dtd_size -= header_size;
+	dtd_size = min_t(u32, dtd_size, strlen(name));
+
+	memcpy(dtd + header_size, name, dtd_size);
+
+	return 0;
+}
+
+static void dsi_drm_update_dtd(struct edid *edid,
+		struct dsi_display_mode *modes, u32 modes_count)
+{
+	u32 i;
+	u32 count = min_t(u32, modes_count, 3);
+
+	for (i = 0; i < count; i++) {
+		struct detailed_timing *dtd = &edid->detailed_timings[i];
+		struct dsi_display_mode *mode = &modes[i];
+		struct dsi_mode_info *timing = &mode->timing;
+		struct detailed_pixel_timing *pd = &dtd->data.pixel_data;
+		u32 h_blank = timing->h_front_porch + timing->h_sync_width +
+				timing->h_back_porch;
+		u32 v_blank = timing->v_front_porch + timing->v_sync_width +
+				timing->v_back_porch;
+		u32 h_img = 0, v_img = 0;
+
+		dtd->pixel_clock = mode->pixel_clk_khz / 10;
+
+		pd->hactive_lo = timing->h_active & 0xFF;
+		pd->hblank_lo = h_blank & 0xFF;
+		pd->hactive_hblank_hi = ((h_blank >> 8) & 0xF) |
+				((timing->h_active >> 8) & 0xF) << 4;
+
+		pd->vactive_lo = timing->v_active & 0xFF;
+		pd->vblank_lo = v_blank & 0xFF;
+		pd->vactive_vblank_hi = ((v_blank >> 8) & 0xF) |
+				((timing->v_active >> 8) & 0xF) << 4;
+
+		pd->hsync_offset_lo = timing->h_front_porch & 0xFF;
+		pd->hsync_pulse_width_lo = timing->h_sync_width & 0xFF;
+		pd->vsync_offset_pulse_width_lo =
+			((timing->v_front_porch & 0xF) << 4) |
+			(timing->v_sync_width & 0xF);
+
+		pd->hsync_vsync_offset_pulse_width_hi =
+			(((timing->h_front_porch >> 8) & 0x3) << 6) |
+			(((timing->h_sync_width >> 8) & 0x3) << 4) |
+			(((timing->v_front_porch >> 4) & 0x3) << 2) |
+			(((timing->v_sync_width >> 4) & 0x3) << 0);
+
+		pd->width_mm_lo = h_img & 0xFF;
+		pd->height_mm_lo = v_img & 0xFF;
+		pd->width_height_mm_hi = (((h_img >> 8) & 0xF) << 4) |
+			((v_img >> 8) & 0xF);
+
+		pd->hborder = 0;
+		pd->vborder = 0;
+		pd->misc = 0;
+	}
+}
+
+static void dsi_drm_update_checksum(struct edid *edid)
+{
+	u8 *data = (u8 *)edid;
+	u32 i, sum = 0;
+
+	for (i = 0; i < EDID_LENGTH - 1; i++)
+		sum += data[i];
+
+	edid->checksum = 0x100 - (sum & 0xFF);
+}
+
+int dsi_connector_get_modes(struct drm_connector *connector, void *data)
+{
+	int rc, i;
+	u32 count = 0, edid_size;
 	struct dsi_display_mode *modes = NULL;
 	struct drm_display_mode drm_mode;
-	int rc, i;
+	struct dsi_display *display = data;
+	struct edid edid;
+	const u8 edid_buf[EDID_LENGTH] = {
+		0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x44, 0x6D,
+		0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1B, 0x10, 0x01, 0x03,
+		0x80, 0x50, 0x2D, 0x78, 0x0A, 0x0D, 0xC9, 0xA0, 0x57, 0x47,
+		0x98, 0x27, 0x12, 0x48, 0x4C, 0x00, 0x00, 0x00, 0x01, 0x01,
+		0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+		0x01, 0x01, 0x01, 0x01,
+	};
+
+	edid_size = min_t(u32, sizeof(edid), EDID_LENGTH);
+
+	memcpy(&edid, edid_buf, edid_size);
 
 	if (sde_connector_get_panel(connector)) {
 		/*
@@ -669,6 +767,18 @@
 		m->height_mm = connector->display_info.height_mm;
 		drm_mode_probed_add(connector, m);
 	}
+
+	rc = dsi_drm_update_edid_name(&edid, display->panel->name);
+	if (rc) {
+		count = 0;
+		goto end;
+	}
+
+	dsi_drm_update_dtd(&edid, modes, count);
+	dsi_drm_update_checksum(&edid);
+	rc =  drm_connector_update_edid_property(connector, &edid);
+	if (rc)
+		count = 0;
 end:
 	pr_debug("MODE COUNT =%d\n\n", count);
 	return count;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 5e9d3ac..730a2c2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2032,7 +2032,7 @@
 	}
 
 	panel->bl_config.bl_scale = MAX_BL_SCALE_LEVEL;
-	panel->bl_config.bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+	panel->bl_config.bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
 
 	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-bl-min-level", &val);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 8d9cfea..a2dcebb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -23,7 +23,7 @@
 
 #define MAX_BL_LEVEL 4096
 #define MAX_BL_SCALE_LEVEL 1024
-#define MAX_AD_BL_SCALE_LEVEL 65535
+#define MAX_SV_BL_SCALE_LEVEL 65535
 #define DSI_CMD_PPS_SIZE 135
 
 #define DSI_MODE_MAX 5
@@ -90,7 +90,7 @@
 	u32 brightness_max_level;
 	u32 bl_level;
 	u32 bl_scale;
-	u32 bl_scale_ad;
+	u32 bl_scale_sv;
 
 	int en_gpio;
 	/* PWM params */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 41bec57..3120562 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -17,7 +17,7 @@
  *                              |                |
  *                              |                |
  *                 +---------+  |  +----------+  |  +----+
- *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
  *                 +---------+  |  +----------+  |  +----+
  *                              |                |
  *                              |                |         dsi0_pll_by_2_bit_clk
@@ -25,7 +25,7 @@
  *                              |                |  +----+  |  |\  dsi0_pclk_mux
  *                              |                |--| /2 |--o--| \   |
  *                              |                |  +----+     |  \  |  +---------+
- *                              |                --------------|  |--o--| div_7_4 |-- dsi0pll
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
  *                              |------------------------------|  /     +---------+
  *                              |          +-----+             | /
  *                              -----------| /4? |--o----------|/
@@ -690,7 +690,7 @@
 
 	hws[num++] = hw;
 
-	snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
 
 	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
@@ -739,7 +739,7 @@
 
 	hws[num++] = hw;
 
-	snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
 	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
 
 	/* PIX CLK DIV : DIV_CTRL_7_4*/
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e8da71d..9a36012 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2014 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b9c6edb..b36f62a 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -49,6 +49,7 @@
 #include "msm_kms.h"
 #include "msm_mmu.h"
 #include "sde_wb.h"
+#include "sde_dbg.h"
 
 /*
  * MSM driver version:
@@ -240,7 +241,8 @@
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	if (!res) {
-		dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+		dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
+									name);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -269,7 +271,7 @@
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	if (!res) {
-		dev_err(&pdev->dev, "failed to get memory resource: %s\n",
+		dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
 									name);
 		return 0;
 	}
@@ -915,6 +917,7 @@
 		return -ENOMEM;
 
 	msm_submitqueue_init(dev, ctx);
+	mutex_init(&ctx->power_lock);
 
 	file->driver_priv = ctx;
 
@@ -969,6 +972,14 @@
 		priv->lastctx = NULL;
 	mutex_unlock(&dev->struct_mutex);
 
+	mutex_lock(&ctx->power_lock);
+	if (ctx->enable_refcnt) {
+		SDE_EVT32(ctx->enable_refcnt);
+		sde_power_resource_enable(&priv->phandle,
+				priv->pclient, false);
+	}
+	mutex_unlock(&ctx->power_lock);
+
 	context_close(ctx);
 }
 
@@ -1710,6 +1721,62 @@
 	return msm_submitqueue_remove(file->driver_priv, id);
 }
 
+/**
+ * msm_ioctl_power_ctrl - enable/disable power vote on MDSS Hw
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ */
+int msm_ioctl_power_ctrl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct msm_file_private *ctx = file_priv->driver_priv;
+	struct msm_drm_private *priv;
+	struct drm_msm_power_ctrl *power_ctrl = data;
+	bool vote_req = false;
+	int old_cnt;
+	int rc = 0;
+
+	if (unlikely(!power_ctrl)) {
+		DRM_ERROR("invalid ioctl data\n");
+		return -EINVAL;
+	}
+
+	priv = dev->dev_private;
+
+	mutex_lock(&ctx->power_lock);
+
+	old_cnt = ctx->enable_refcnt;
+	if (power_ctrl->enable) {
+		if (!ctx->enable_refcnt)
+			vote_req = true;
+		ctx->enable_refcnt++;
+	} else if (ctx->enable_refcnt) {
+		ctx->enable_refcnt--;
+		if (!ctx->enable_refcnt)
+			vote_req = true;
+	} else {
+		pr_err("ignoring, unbalanced disable\n");
+	}
+
+	if (vote_req) {
+		rc = sde_power_resource_enable(&priv->phandle,
+				priv->pclient, power_ctrl->enable);
+
+		if (rc)
+			ctx->enable_refcnt = old_cnt;
+	}
+
+	pr_debug("pid %d enable %d, refcnt %d, vote_req %d\n",
+			current->pid, power_ctrl->enable, ctx->enable_refcnt,
+			vote_req);
+	SDE_EVT32(current->pid, power_ctrl->enable, ctx->enable_refcnt,
+			vote_req);
+	mutex_unlock(&ctx->power_lock);
+	return rc;
+}
+
 static const struct drm_ioctl_desc msm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
@@ -1727,6 +1794,8 @@
 	DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT,  msm_ioctl_deregister_event,
 			  DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(MSM_RMFB2, msm_ioctl_rmfb2, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(MSM_POWER_CTRL, msm_ioctl_power_ctrl,
+			DRM_RENDER_ALLOW),
 };
 
 static const struct vm_operations_struct vm_ops = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 75317fb..b8de212 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -78,6 +78,12 @@
 	struct list_head submitqueues;
 
 	int queueid;
+
+	/* update the refcount when user driver calls power_ctrl IOCTL */
+	unsigned short enable_refcnt;
+
+	/* protects enable_refcnt */
+	struct mutex power_lock;
 };
 
 enum msm_mdp_plane_property {
@@ -183,7 +189,7 @@
 	CONNECTOR_PROP_DST_H,
 	CONNECTOR_PROP_ROI_V1,
 	CONNECTOR_PROP_BL_SCALE,
-	CONNECTOR_PROP_AD_BL_SCALE,
+	CONNECTOR_PROP_SV_BL_SCALE,
 
 	/* enum/bitmask properties */
 	CONNECTOR_PROP_TOPOLOGY_NAME,
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 895a94d..021971e 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -128,11 +128,26 @@
 		goto fail_put;
 	}
 
-	domain = (flags & ION_FLAG_SECURE) ? MSM_SMMU_DOMAIN_SECURE :
-						MSM_SMMU_DOMAIN_UNSECURE;
-	if (kms && kms->funcs->get_address_space_device)
-		attach_dev = kms->funcs->get_address_space_device(
-							kms, domain);
+	if (!kms || !kms->funcs->get_address_space_device) {
+		DRM_ERROR("invalid kms ops\n");
+		goto fail_put;
+	}
+
+	if (flags & ION_FLAG_SECURE) {
+		if (flags & ION_FLAG_CP_PIXEL)
+			attach_dev = kms->funcs->get_address_space_device(kms,
+						MSM_SMMU_DOMAIN_SECURE);
+
+		else if ((flags & ION_FLAG_CP_SEC_DISPLAY)
+				|| (flags & ION_FLAG_CP_CAMERA_PREVIEW))
+			attach_dev = dev->dev;
+		else
+			DRM_ERROR("invalid ion secure flag: 0x%x\n", flags);
+	} else {
+		attach_dev = kms->funcs->get_address_space_device(kms,
+						MSM_SMMU_DOMAIN_UNSECURE);
+	}
+
 	if (!attach_dev) {
 		DRM_ERROR("aspace device not found for domain:%d\n", domain);
 		ret = -EINVAL;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 9122ee6..1fe9392 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -63,7 +63,7 @@
 	struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
 	void (*recover)(struct msm_gpu *gpu);
 	void (*destroy)(struct msm_gpu *gpu);
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
 	/* show GPU status in debugfs: */
 	void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
 			struct drm_printer *p);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index f7a0ede..d4cc5ce 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -115,7 +115,9 @@
 		char *fptr = &fifo->buf[fifo->head];
 		int n;
 
-		wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+		wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
+		if (!rd->open)
+			return;
 
 		/* Note that smp_load_acquire() is not strictly required
 		 * as CIRC_SPACE_TO_END() does not access the tail more
@@ -213,7 +215,10 @@
 static int rd_release(struct inode *inode, struct file *file)
 {
 	struct msm_rd_state *rd = inode->i_private;
+
 	rd->open = false;
+	wake_up_all(&rd->fifo_event);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index ca9050c..b380481 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -16,6 +16,7 @@
 #include "sde_hw_interrupts.h"
 #include "sde_core_irq.h"
 #include "dsi_panel.h"
+#include "sde_hw_color_proc_common_v4.h"
 
 struct sde_cp_node {
 	u32 property_id;
@@ -93,6 +94,8 @@
 		struct sde_hw_dspp *hw_dspp, struct sde_hw_cp_cfg *hw_cfg);
 static void sde_cp_notify_ltm_hist(struct drm_crtc *crtc_drm, void *arg);
 static void sde_cp_notify_ltm_wb_pb(struct drm_crtc *crtc_drm, void *arg);
+static void _sde_cp_crtc_update_ltm_roi(struct sde_crtc *sde_crtc,
+		struct sde_hw_cp_cfg *hw_cfg);
 
 #define setup_dspp_prop_install_funcs(func) \
 do { \
@@ -539,10 +542,12 @@
 {
 	int ret = 0;
 
-	if (!hw_dspp || !hw_dspp->ops.setup_ltm_roi)
+	if (!hw_dspp || !hw_dspp->ops.setup_ltm_roi) {
 		ret = -EINVAL;
-	else
+	} else {
 		hw_dspp->ops.setup_ltm_roi(hw_dspp, hw_cfg);
+		_sde_cp_crtc_update_ltm_roi(hw_crtc, hw_cfg);
+	}
 
 	return ret;
 }
@@ -1592,6 +1597,7 @@
 	struct sde_crtc *sde_crtc = NULL;
 	struct sde_cp_node *prop_node = NULL, *n = NULL;
 	bool ad_suspend = false;
+	unsigned long irq_flags;
 
 	if (!crtc) {
 		DRM_ERROR("crtc %pK\n", crtc);
@@ -1618,6 +1624,10 @@
 	}
 	mutex_unlock(&sde_crtc->crtc_cp_lock);
 
+	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
+	sde_crtc->ltm_hist_en = false;
+	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
+
 	if (ad_suspend)
 		sde_cp_ad_set_prop(sde_crtc, AD_SUSPEND);
 }
@@ -2072,6 +2082,12 @@
 			list_add_tail(&prop_node->active_list,
 					&crtc->ad_active);
 		break;
+	case SDE_CP_CRTC_DSPP_LTM_SET_BUF:
+	case SDE_CP_CRTC_DSPP_LTM_QUEUE_BUF:
+		if (dirty_list)
+			list_add_tail(&prop_node->dirty_list,
+					&crtc->dirty_list);
+		break;
 	default:
 		/* color processing properties handle here */
 		if (dirty_list)
@@ -2144,7 +2160,7 @@
 static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
 {
 	uint32_t input_bl = 0, output_bl = 0;
-	uint32_t scale = MAX_AD_BL_SCALE_LEVEL;
+	uint32_t scale = MAX_SV_BL_SCALE_LEVEL;
 	struct sde_hw_mixer *hw_lm = NULL;
 	struct sde_hw_dspp *hw_dspp = NULL;
 	u32 num_mixers;
@@ -2192,7 +2208,7 @@
 	if (!input_bl || input_bl < output_bl)
 		return;
 
-	scale = (output_bl * MAX_AD_BL_SCALE_LEVEL) / input_bl;
+	scale = (output_bl * MAX_SV_BL_SCALE_LEVEL) / input_bl;
 	event.length = sizeof(u32);
 	event.type = DRM_EVENT_AD_BACKLIGHT;
 	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
@@ -2572,7 +2588,7 @@
 /* needs to be called within ltm_buffer_lock mutex */
 static void _sde_cp_crtc_free_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg)
 {
-	u32 i = 0;
+	u32 i = 0, buffer_count = 0;
 	unsigned long irq_flags;
 
 	if (!sde_crtc) {
@@ -2597,13 +2613,13 @@
 		return;
 	}
 
+	buffer_count = sde_crtc->ltm_buffer_cnt;
 	sde_crtc->ltm_buffer_cnt = 0;
 	INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
 	INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
 	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
 
-	for (i = 0; i < sde_crtc->ltm_buffer_cnt && sde_crtc->ltm_buffers[i];
-			i++) {
+	for (i = 0; i < buffer_count && sde_crtc->ltm_buffers[i]; i++) {
 		msm_gem_put_vaddr(sde_crtc->ltm_buffers[i]->gem);
 		drm_framebuffer_put(sde_crtc->ltm_buffers[i]->fb);
 		msm_gem_put_iova(sde_crtc->ltm_buffers[i]->gem,
@@ -2852,6 +2868,7 @@
 {
 	unsigned long irq_flags;
 	struct sde_hw_mixer *hw_lm = hw_cfg->mixer_info;
+	u32 i = 0;
 
 	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
 	if (!hw_lm->cfg.right_mixer && !sde_crtc->ltm_hist_en) {
@@ -2860,6 +2877,11 @@
 		return;
 	}
 	sde_crtc->ltm_hist_en = false;
+	INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
+	INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
+	for (i = 0; i < sde_crtc->ltm_buffer_cnt; i++)
+		list_add(&sde_crtc->ltm_buffers[i]->node,
+			&sde_crtc->ltm_buf_free);
 	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
 }
 
@@ -2873,6 +2895,9 @@
 	u64 addr = 0;
 	int idx = -1;
 	unsigned long irq_flags;
+	struct sde_ltm_phase_info phase;
+	struct sde_hw_cp_cfg hw_cfg;
+	struct sde_hw_mixer *hw_lm;
 
 	if (!sde_crtc) {
 		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
@@ -2913,11 +2938,6 @@
 				0);
 		}
 
-		INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
-		INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
-		for (i = 0; i < sde_crtc->ltm_buffer_cnt; i++)
-			list_add(&sde_crtc->ltm_buffers[i]->node,
-				&sde_crtc->ltm_buf_free);
 		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
 		return;
 	}
@@ -2965,6 +2985,30 @@
 		((u8 *)sde_crtc->ltm_buffers[idx]->kva +
 		sde_crtc->ltm_buffers[idx]->offset);
 	ltm_data->status_flag = ltm_hist_status;
+
+	hw_lm = sde_crtc->mixers[0].hw_lm;
+	if (!hw_lm) {
+		DRM_ERROR("invalid layer mixer\n");
+		return;
+	}
+	hw_cfg.num_of_mixers = num_mixers;
+	hw_cfg.displayh = num_mixers * hw_lm->cfg.out_width;
+	hw_cfg.displayv = hw_lm->cfg.out_height;
+
+	sde_ltm_get_phase_info(&hw_cfg, &phase);
+	ltm_data->display_h = hw_cfg.displayh;
+	ltm_data->display_v = hw_cfg.displayv;
+	ltm_data->init_h[0] = phase.init_h[LTM_0];
+	ltm_data->init_h[1] = phase.init_h[LTM_1];
+	ltm_data->init_v = phase.init_v;
+	ltm_data->inc_v = phase.inc_v;
+	ltm_data->inc_h = phase.inc_h;
+	ltm_data->portrait_en = phase.portrait_en;
+	ltm_data->merge_en = phase.merge_en;
+	ltm_data->cfg_param_01 = sde_crtc->ltm_cfg.cfg_param_01;
+	ltm_data->cfg_param_02 = sde_crtc->ltm_cfg.cfg_param_02;
+	ltm_data->cfg_param_03 = sde_crtc->ltm_cfg.cfg_param_03;
+	ltm_data->cfg_param_04 = sde_crtc->ltm_cfg.cfg_param_04;
 	sde_crtc_event_queue(&sde_crtc->base, sde_cp_notify_ltm_hist,
 				sde_crtc->ltm_buffers[idx], true);
 	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
@@ -3188,3 +3232,45 @@
 	}
 	return ret;
 }
+
+static void _sde_cp_crtc_update_ltm_roi(struct sde_crtc *sde_crtc,
+		struct sde_hw_cp_cfg *hw_cfg)
+{
+	struct drm_msm_ltm_cfg_param *cfg_param = NULL;
+
+	/* disable case */
+	if (!hw_cfg->payload) {
+		memset(&sde_crtc->ltm_cfg, 0,
+			sizeof(struct drm_msm_ltm_cfg_param));
+		return;
+	}
+
+	if (hw_cfg->len != sizeof(struct drm_msm_ltm_cfg_param)) {
+		DRM_ERROR("invalid size of payload len %d exp %zd\n",
+			hw_cfg->len, sizeof(struct drm_msm_ltm_cfg_param));
+		return;
+	}
+
+	cfg_param = hw_cfg->payload;
+	/* input param exceeds the display width */
+	if (cfg_param->cfg_param_01 + cfg_param->cfg_param_03 >
+			hw_cfg->displayh) {
+		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayh = %u\n",
+			cfg_param->cfg_param_01, cfg_param->cfg_param_03,
+			hw_cfg->displayh);
+		/* set the roi width to max register value */
+		cfg_param->cfg_param_03 = 0xFFFF;
+	}
+
+	/* input param exceeds the display height */
+	if (cfg_param->cfg_param_02 + cfg_param->cfg_param_04 >
+			hw_cfg->displayv) {
+		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayv = %u\n",
+			cfg_param->cfg_param_02, cfg_param->cfg_param_04,
+			hw_cfg->displayv);
+		/* set the roi height to max register value */
+		cfg_param->cfg_param_04 = 0xFFFF;
+	}
+
+	sde_crtc->ltm_cfg = *cfg_param;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 05dcceb..d87a981 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -99,10 +99,13 @@
 	}
 
 	if (c_conn->ops.set_backlight) {
-		event.type = DRM_EVENT_SYS_BACKLIGHT;
-		event.length = sizeof(u32);
-		msm_mode_object_event_notify(&c_conn->base.base,
+		/* skip notifying user space if bl is 0 */
+		if (brightness != 0) {
+			event.type = DRM_EVENT_SYS_BACKLIGHT;
+			event.length = sizeof(u32);
+			msm_mode_object_event_notify(&c_conn->base.base,
 				c_conn->base.dev, &event, (u8 *)&brightness);
+		}
 		rc = c_conn->ops.set_backlight(&c_conn->base,
 				c_conn->display, bl_lvl);
 		c_conn->unset_bl_level = 0;
@@ -536,13 +539,13 @@
 	else
 		bl_config->bl_scale = c_conn->bl_scale;
 
-	if (c_conn->bl_scale_ad > MAX_AD_BL_SCALE_LEVEL)
-		bl_config->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+	if (c_conn->bl_scale_sv > MAX_SV_BL_SCALE_LEVEL)
+		bl_config->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
 	else
-		bl_config->bl_scale_ad = c_conn->bl_scale_ad;
+		bl_config->bl_scale_sv = c_conn->bl_scale_sv;
 
-	SDE_DEBUG("bl_scale = %u, bl_scale_ad = %u, bl_level = %u\n",
-		bl_config->bl_scale, bl_config->bl_scale_ad,
+	SDE_DEBUG("bl_scale = %u, bl_scale_sv = %u, bl_level = %u\n",
+		bl_config->bl_scale, bl_config->bl_scale_sv,
 		bl_config->bl_level);
 	rc = c_conn->ops.set_backlight(&c_conn->base,
 			dsi_display, bl_config->bl_level);
@@ -612,7 +615,7 @@
 			mutex_unlock(&c_conn->lock);
 			break;
 		case CONNECTOR_PROP_BL_SCALE:
-		case CONNECTOR_PROP_AD_BL_SCALE:
+		case CONNECTOR_PROP_SV_BL_SCALE:
 			_sde_connector_update_bl_scale(c_conn);
 			break;
 		case CONNECTOR_PROP_HDR_METADATA:
@@ -1254,7 +1257,7 @@
 		if (rc)
 			SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
 		break;
-	/* CONNECTOR_PROP_BL_SCALE and CONNECTOR_PROP_AD_BL_SCALE are
+	/* CONNECTOR_PROP_BL_SCALE and CONNECTOR_PROP_SV_BL_SCALE are
 	 * color-processing properties. These two properties require
 	 * special handling since they don't quite fit the current standard
 	 * atomic set property framework.
@@ -1263,8 +1266,8 @@
 		c_conn->bl_scale = val;
 		c_conn->bl_scale_dirty = true;
 		break;
-	case CONNECTOR_PROP_AD_BL_SCALE:
-		c_conn->bl_scale_ad = val;
+	case CONNECTOR_PROP_SV_BL_SCALE:
+		c_conn->bl_scale_sv = val;
 		c_conn->bl_scale_dirty = true;
 		break;
 	case CONNECTOR_PROP_HDR_METADATA:
@@ -1382,15 +1385,15 @@
 static void sde_connector_update_hdr_props(struct drm_connector *connector)
 {
 	struct sde_connector *c_conn = to_sde_connector(connector);
-	struct drm_msm_ext_hdr_properties hdr = {
-		connector->hdr_metadata_type_one,
-		connector->hdr_supported,
-		connector->hdr_eotf,
-		connector->hdr_max_luminance,
-		connector->hdr_avg_luminance,
-		connector->hdr_min_luminance,
-		connector->hdr_plus_app_ver,
-	};
+	struct drm_msm_ext_hdr_properties hdr = {0};
+
+	hdr.hdr_metadata_type_one = connector->hdr_metadata_type_one ? 1 : 0;
+	hdr.hdr_supported = connector->hdr_supported ? 1 : 0;
+	hdr.hdr_eotf = connector->hdr_eotf;
+	hdr.hdr_max_luminance = connector->hdr_max_luminance;
+	hdr.hdr_avg_luminance = connector->hdr_avg_luminance;
+	hdr.hdr_min_luminance = connector->hdr_min_luminance;
+	hdr.hdr_plus_supported = connector->hdr_plus_app_ver;
 
 	msm_property_set_blob(&c_conn->property_info, &c_conn->blob_ext_hdr,
 			&hdr, sizeof(hdr), CONNECTOR_PROP_EXT_HDR_INFO);
@@ -2246,13 +2249,13 @@
 		0x0, 0, MAX_BL_SCALE_LEVEL, MAX_BL_SCALE_LEVEL,
 		CONNECTOR_PROP_BL_SCALE);
 
-	msm_property_install_range(&c_conn->property_info, "ad_bl_scale",
-		0x0, 0, MAX_AD_BL_SCALE_LEVEL, MAX_AD_BL_SCALE_LEVEL,
-		CONNECTOR_PROP_AD_BL_SCALE);
+	msm_property_install_range(&c_conn->property_info, "sv_bl_scale",
+		0x0, 0, MAX_SV_BL_SCALE_LEVEL, MAX_SV_BL_SCALE_LEVEL,
+		CONNECTOR_PROP_SV_BL_SCALE);
 
 	c_conn->bl_scale_dirty = false;
 	c_conn->bl_scale = MAX_BL_SCALE_LEVEL;
-	c_conn->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+	c_conn->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
 
 	/* enum/bitmask properties */
 	msm_property_install_enum(&c_conn->property_info, "topology_name",
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index a1bd65e..0db872f 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -372,7 +372,7 @@
  * @esd_status_check: Flag to indicate if ESD thread is scheduled or not
  * @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
  * @bl_scale: BL scale value for ABA feature
- * @bl_scale_ad: BL scale value for AD feature
+ * @bl_scale_sv: BL scale value for sunlight visibility feature
  * @unset_bl_level: BL level that needs to be set later
  * @allow_bl_update: Flag to indicate if BL update is allowed currently or not
  * @qsync_mode: Cached Qsync mode, 0=disabled, 1=continuous mode
@@ -423,7 +423,7 @@
 
 	bool bl_scale_dirty;
 	u32 bl_scale;
-	u32 bl_scale_ad;
+	u32 bl_scale_sv;
 	u32 unset_bl_level;
 	bool allow_bl_update;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 47771a3..93a1f0b 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -5051,28 +5051,18 @@
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
-	uint32_t offset, i;
-	struct drm_connector_state *old_conn_state, *new_conn_state;
-	struct drm_connector *conn;
-	struct sde_connector *sde_conn = NULL;
-	struct msm_display_info disp_info;
+	uint32_t offset;
 	bool is_vid = false;
+	struct drm_encoder *encoder;
 
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(state);
 
-	for_each_oldnew_connector_in_state(state->state, conn, old_conn_state,
-							new_conn_state, i) {
-		if (!new_conn_state || new_conn_state->crtc != crtc)
-			continue;
-
-		sde_conn = to_sde_connector(new_conn_state->connector);
-		if (sde_conn->display && sde_conn->ops.get_info) {
-			sde_conn->ops.get_info(conn, &disp_info,
-							sde_conn->display);
-			is_vid |= disp_info.capabilities &
-						MSM_DISPLAY_CAP_VID_MODE;
-		}
+	drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
+		is_vid |= sde_encoder_check_mode(encoder,
+						MSM_DISPLAY_CAP_VID_MODE);
+		if (is_vid)
+			break;
 	}
 
 	offset = sde_crtc_get_property(cstate, CRTC_PROP_OUTPUT_FENCE_OFFSET);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index fcd94d8..0171f4c 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -339,6 +339,7 @@
 	struct list_head ltm_buf_free;
 	struct list_head ltm_buf_busy;
 	bool ltm_hist_en;
+	struct drm_msm_ltm_cfg_param ltm_cfg;
 	struct mutex ltm_buffer_lock;
 	spinlock_t ltm_lock;
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index db3933c6..f28a0a2 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -223,6 +223,7 @@
  * @elevated_ahb_vote:		increase AHB bus speed for the first frame
  *				after power collapse
  * @pm_qos_cpu_req:		pm_qos request for cpu frequency
+ * @mode_info:                  stores the current mode information
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -282,6 +283,7 @@
 	bool recovery_events_enabled;
 	bool elevated_ahb_vote;
 	struct pm_qos_request pm_qos_cpu_req;
+	struct msm_mode_info mode_info;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -340,67 +342,16 @@
 	pm_qos_remove_request(&sde_enc->pm_qos_cpu_req);
 }
 
-static struct drm_connector_state *_sde_encoder_get_conn_state(
-		struct drm_encoder *drm_enc)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct list_head *connector_list;
-	struct drm_connector *conn_iter;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid argument\n");
-		return NULL;
-	}
-
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-	connector_list = &sde_kms->dev->mode_config.connector_list;
-
-	list_for_each_entry(conn_iter, connector_list, head)
-		if (conn_iter->encoder == drm_enc)
-			return conn_iter->state;
-
-	return NULL;
-}
-
-static int _sde_encoder_get_mode_info(struct drm_encoder *drm_enc,
-		struct msm_mode_info *mode_info)
-{
-	struct drm_connector_state *conn_state;
-
-	if (!drm_enc || !mode_info) {
-		SDE_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	conn_state = _sde_encoder_get_conn_state(drm_enc);
-	if (!conn_state) {
-		SDE_ERROR("invalid connector state for the encoder: %d\n",
-			drm_enc->base.id);
-		return -EINVAL;
-	}
-
-	return sde_connector_get_mode_info(conn_state, mode_info);
-}
-
 static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
 {
+	struct sde_encoder_virt *sde_enc;
 	struct msm_compression_info *comp_info;
-	struct msm_mode_info mode_info;
-	int rc = 0;
 
 	if (!drm_enc)
 		return false;
 
-	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (rc) {
-		SDE_ERROR("failed to get mode info, enc: %d\n",
-			drm_enc->base.id);
-		return false;
-	}
-
-	comp_info = &mode_info.comp_info;
+	sde_enc  = to_sde_encoder_virt(drm_enc);
+	comp_info = &sde_enc->mode_info.comp_info;
 
 	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
 }
@@ -712,8 +663,7 @@
 		struct drm_connector_state *conn_state)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
-	struct msm_mode_info mode_info;
-	int rc, i = 0;
+	int i = 0;
 
 	if (!hw_res || !drm_enc || !conn_state) {
 		SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
@@ -735,18 +685,8 @@
 			phys->ops.get_hw_resources(phys, hw_res, conn_state);
 	}
 
-	/**
-	 * NOTE: Do not use sde_encoder_get_mode_info here as this function is
-	 * called from atomic_check phase. Use the below API to get mode
-	 * information of the temporary conn_state passed.
-	 */
-	rc = sde_connector_get_mode_info(conn_state, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return;
-	}
-
-	hw_res->topology = mode_info.topology;
+	sde_connector_get_mode_info(conn_state, &sde_enc->mode_info);
+	hw_res->topology = sde_enc->mode_info.topology;
 	hw_res->is_primary = sde_enc->disp_info.is_primary;
 }
 
@@ -1334,27 +1274,20 @@
 	struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
 	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
-	struct msm_mode_info mode_info;
 	struct msm_display_dsc_info *dsc = NULL;
 	struct sde_hw_ctl *hw_ctl;
 	struct sde_ctl_dsc_cfg cfg;
-	int rc;
 
 	if (hw_dsc == NULL || hw_pp == NULL || !enc_master) {
 		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
 		return -EINVAL;
 	}
 
-	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return -EINVAL;
-	}
 
 	hw_ctl = enc_master->hw_ctl;
 
 	memset(&cfg, 0, sizeof(cfg));
-	dsc = &mode_info.comp_info.dsc_info;
+	dsc = &sde_enc->mode_info.comp_info.dsc_info;
 	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
 
 	this_frame_slices = roi->w / dsc->slice_width;
@@ -1407,11 +1340,10 @@
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
 	struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
-	struct msm_mode_info mode_info;
 	bool half_panel_partial_update;
 	struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl;
 	struct sde_ctl_dsc_cfg cfg;
-	int i, rc;
+	int i;
 
 	memset(&cfg, 0, sizeof(cfg));
 
@@ -1426,12 +1358,6 @@
 		}
 	}
 
-	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return -EINVAL;
-	}
-
 	half_panel_partial_update =
 			hweight_long(params->affected_displays) == 1;
 
@@ -1441,8 +1367,8 @@
 	if (enc_master->intf_mode == INTF_MODE_VIDEO)
 		dsc_common_mode |= DSC_MODE_VIDEO;
 
-	memcpy(&dsc[0], &mode_info.comp_info.dsc_info, sizeof(dsc[0]));
-	memcpy(&dsc[1], &mode_info.comp_info.dsc_info, sizeof(dsc[1]));
+	memcpy(&dsc[0], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[0]));
+	memcpy(&dsc[1], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[1]));
 
 	/*
 	 * Since both DSC use same pic dimension, set same pic dimension
@@ -1530,11 +1456,10 @@
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
 	struct msm_display_dsc_info *dsc = NULL;
-	struct msm_mode_info mode_info;
 	bool half_panel_partial_update;
 	struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl;
 	struct sde_ctl_dsc_cfg cfg;
-	int i, rc;
+	int i;
 
 	memset(&cfg, 0, sizeof(cfg));
 
@@ -1549,13 +1474,7 @@
 		}
 	}
 
-	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return -EINVAL;
-	}
-
-	dsc = &mode_info.comp_info.dsc_info;
+	dsc = &sde_enc->mode_info.comp_info.dsc_info;
 
 	half_panel_partial_update =
 			hweight_long(params->affected_displays) == 1;
@@ -1720,9 +1639,8 @@
 	struct sde_kms *sde_kms;
 	struct sde_hw_mdp *hw_mdptop;
 	struct drm_encoder *drm_enc;
-	struct msm_mode_info mode_info;
 	struct sde_encoder_virt *sde_enc;
-	int i, rc = 0;
+	int i;
 
 	sde_enc = to_sde_encoder_virt(phys_enc->parent);
 
@@ -1752,18 +1670,12 @@
 		return;
 	}
 
-	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return;
-	}
-
 	if (hw_mdptop->ops.setup_vsync_source) {
 		for (i = 0; i < sde_enc->num_phys_encs; i++)
 			vsync_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
 
 		vsync_cfg.pp_count = sde_enc->num_phys_encs;
-		vsync_cfg.frame_rate = mode_info.frame_rate;
+		vsync_cfg.frame_rate = sde_enc->mode_info.frame_rate;
 		vsync_cfg.vsync_source = vsync_source;
 		vsync_cfg.is_dummy = is_dummy;
 
@@ -1955,9 +1867,8 @@
 	struct sde_rsc_cmd_config *rsc_config;
 	int ret, prefill_lines;
 	struct msm_display_info *disp_info;
-	struct msm_mode_info mode_info;
+	struct msm_mode_info *mode_info;
 	int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
-	int rc = 0;
 	u32 qsync_mode = 0;
 
 	if (!drm_enc || !drm_enc->dev) {
@@ -1966,6 +1877,8 @@
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	mode_info = &sde_enc->mode_info;
+
 	crtc = sde_enc->crtc;
 
 	if (!sde_enc->crtc) {
@@ -1980,12 +1893,6 @@
 		return 0;
 	}
 
-	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to mode info\n");
-		return 0;
-	}
-
 	/**
 	 * only primary command mode panel without Qsync can request CMD state.
 	 * all other panels/displays can request for VID state including
@@ -2006,19 +1913,19 @@
 
 	SDE_EVT32(rsc_state, qsync_mode);
 
-	prefill_lines = mode_info.prefill_lines;
+	prefill_lines = mode_info->prefill_lines;
 
 	/* compare specific items and reconfigure the rsc */
-	if ((rsc_config->fps != mode_info.frame_rate) ||
-	    (rsc_config->vtotal != mode_info.vtotal) ||
+	if ((rsc_config->fps != mode_info->frame_rate) ||
+	    (rsc_config->vtotal != mode_info->vtotal) ||
 	    (rsc_config->prefill_lines != prefill_lines) ||
-	    (rsc_config->jitter_numer != mode_info.jitter_numer) ||
-	    (rsc_config->jitter_denom != mode_info.jitter_denom)) {
-		rsc_config->fps = mode_info.frame_rate;
-		rsc_config->vtotal = mode_info.vtotal;
+	    (rsc_config->jitter_numer != mode_info->jitter_numer) ||
+	    (rsc_config->jitter_denom != mode_info->jitter_denom)) {
+		rsc_config->fps = mode_info->frame_rate;
+		rsc_config->vtotal = mode_info->vtotal;
 		rsc_config->prefill_lines = prefill_lines;
-		rsc_config->jitter_numer = mode_info.jitter_numer;
-		rsc_config->jitter_denom = mode_info.jitter_denom;
+		rsc_config->jitter_numer = mode_info->jitter_numer;
+		rsc_config->jitter_denom = mode_info->jitter_denom;
 		sde_enc->rsc_state_init = false;
 	}
 
@@ -3166,7 +3073,6 @@
 	int i, ret = 0;
 	struct msm_compression_info *comp_info = NULL;
 	struct drm_display_mode *cur_mode = NULL;
-	struct msm_mode_info mode_info;
 	struct msm_display_info *disp_info;
 
 	if (!drm_enc) {
@@ -3181,16 +3087,10 @@
 		return;
 	}
 
-	ret = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (ret) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return;
-	}
-
 	if (drm_enc->crtc && !sde_enc->crtc)
 		sde_enc->crtc = drm_enc->crtc;
 
-	comp_info = &mode_info.comp_info;
+	comp_info = &sde_enc->mode_info.comp_info;
 	cur_mode = &sde_enc->base.crtc->state->adjusted_mode;
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
@@ -3244,7 +3144,7 @@
 
 		phys->comp_type = comp_info->comp_type;
 		phys->comp_ratio = comp_info->comp_ratio;
-		phys->wide_bus_en = mode_info.wide_bus_en;
+		phys->wide_bus_en = sde_enc->mode_info.wide_bus_en;
 		phys->frame_trigger_mode = sde_enc->frame_trigger_mode;
 		if (phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
 			phys->dsc_extra_pclk_cycle_cnt =
@@ -3372,6 +3272,7 @@
 	 * outstanding events and timers have been completed
 	 */
 	sde_enc->crtc = NULL;
+	memset(&sde_enc->mode_info, 0, sizeof(sde_enc->mode_info));
 
 	SDE_DEBUG_ENC(sde_enc, "encoder disabled\n");
 
@@ -4219,11 +4120,10 @@
 static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys)
 {
 	void *dither_cfg;
-	int ret = 0, rc, i = 0;
+	int ret = 0, i = 0;
 	size_t len = 0;
 	enum sde_rm_topology_name topology;
 	struct drm_encoder *drm_enc;
-	struct msm_mode_info mode_info;
 	struct msm_display_dsc_info *dsc = NULL;
 	struct sde_encoder_virt *sde_enc;
 	struct sde_hw_pingpong *hw_pp;
@@ -4239,13 +4139,7 @@
 
 	drm_enc = phys->parent;
 	sde_enc = to_sde_encoder_virt(drm_enc);
-	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return;
-	}
-
-	dsc = &mode_info.comp_info.dsc_info;
+	dsc = &sde_enc->mode_info.comp_info.dsc_info;
 	/* disable dither for 10 bpp or 10bpc dsc config */
 	if (dsc->bpp == 10 || dsc->bpc == 10) {
 		phys->hw_pp->ops.setup_dither(phys->hw_pp, NULL, 0);
@@ -5180,7 +5074,7 @@
 	debugfs_create_bool("idle_power_collapse", 0600, sde_enc->debugfs_root,
 			&sde_enc->idle_pc_enabled);
 
-	debugfs_create_u32("frame_trigger_mode", 0600, sde_enc->debugfs_root,
+	debugfs_create_u32("frame_trigger_mode", 0400, sde_enc->debugfs_root,
 			&sde_enc->frame_trigger_mode);
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++)
@@ -5596,22 +5490,16 @@
 
 u32 sde_encoder_get_fps(struct drm_encoder *drm_enc)
 {
-	struct msm_mode_info mode_info;
-	int rc;
+	struct sde_encoder_virt *sde_enc;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
 		return 0;
 	}
 
-	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc),
-			"failed to get mode info\n");
-		return 0;
-	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
 
-	return mode_info.frame_rate;
+	return sde_enc->mode_info.frame_rate;
 }
 
 enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 178eba8..5657480 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -1395,11 +1395,13 @@
 	struct sde_encoder_wait_info wait_info;
 	int ret;
 	bool frame_pending = true;
+	struct sde_hw_ctl *ctl;
 
 	if (!phys_enc || !phys_enc->hw_ctl) {
 		SDE_ERROR("invalid argument(s)\n");
 		return -EINVAL;
 	}
+	ctl = phys_enc->hw_ctl;
 
 	wait_info.wq = &phys_enc->pending_kickoff_wq;
 	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
@@ -1440,7 +1442,18 @@
 			atomic_add_unless(
 				&phys_enc->pending_ctlstart_cnt, -1, 0);
 		}
+	} else if ((ret == 0) &&
+	    (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_POSTED_START) &&
+	    atomic_read(&phys_enc->pending_kickoff_cnt) &&
+	    ctl->ops.get_scheduler_status &&
+	    (ctl->ops.get_scheduler_status(ctl) & BIT(0)) &&
+	    phys_enc->parent_ops.handle_frame_done) {
+		atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
 
+		phys_enc->parent_ops.handle_frame_done(
+			phys_enc->parent, phys_enc,
+			SDE_ENCODER_FRAME_EVENT_DONE |
+			SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index eb7876f..6754977 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -139,12 +139,13 @@
 	qos_params.xin_id = hw_wb->caps->xin_id;
 	qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
 	qos_params.num = hw_wb->idx - WB_0;
-	qos_params.is_rt = sde_crtc_get_client_type(crtc) != NRT_CLIENT;
+	qos_params.client_type = phys_enc->in_clone_mode ?
+					VBIF_CWB_CLIENT : VBIF_NRT_CLIENT;
 
-	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d\n",
+	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d clone:%d\n",
 			qos_params.num,
 			qos_params.vbif_idx,
-			qos_params.xin_id, qos_params.is_rt);
+			qos_params.xin_id, qos_params.client_type);
 
 	sde_vbif_set_qos_remap(phys_enc->sde_kms, &qos_params);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index bc874f1..dc9cc77 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -295,22 +295,16 @@
 	unsigned long flags;
 	struct sde_fence *fc, *next;
 	bool is_signaled = false;
-	struct list_head local_list_head;
 
-	INIT_LIST_HEAD(&local_list_head);
+	kref_get(&ctx->kref);
 
 	spin_lock(&ctx->list_lock);
 	if (list_empty(&ctx->fence_list_head)) {
 		SDE_DEBUG("nothing to trigger!\n");
-		spin_unlock(&ctx->list_lock);
-		return;
+		goto end;
 	}
 
-	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list)
-		list_move(&fc->fence_list, &local_list_head);
-	spin_unlock(&ctx->list_lock);
-
-	list_for_each_entry_safe(fc, next, &local_list_head, fence_list) {
+	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
 		spin_lock_irqsave(&ctx->lock, flags);
 		fc->base.error = error ? -EBUSY : 0;
 		fc->base.timestamp = ts;
@@ -320,20 +314,20 @@
 		if (is_signaled) {
 			list_del_init(&fc->fence_list);
 			dma_fence_put(&fc->base);
-		} else {
-			spin_lock(&ctx->list_lock);
-			list_move(&fc->fence_list, &ctx->fence_list_head);
-			spin_unlock(&ctx->list_lock);
 		}
 	}
+end:
+	spin_unlock(&ctx->list_lock);
+	kref_put(&ctx->kref, sde_fence_destroy);
 }
 
 int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
 							uint32_t offset)
 {
 	uint32_t trigger_value;
-	int fd, rc = -EINVAL;
+	int fd = -1, rc = -EINVAL;
 	unsigned long flags;
+	struct sde_fence *fc;
 
 	if (!ctx || !val) {
 		SDE_ERROR("invalid argument(s), fence %d, pval %d\n",
@@ -351,22 +345,27 @@
 	 */
 	spin_lock_irqsave(&ctx->lock, flags);
 	trigger_value = ctx->commit_count + offset;
-
 	spin_unlock_irqrestore(&ctx->lock, flags);
 
-	fd = _sde_fence_create_fd(ctx, trigger_value);
-	*val = fd;
-	SDE_DEBUG("fence_create::fd:%d trigger:%d commit:%d offset:%d\n",
+	spin_lock(&ctx->list_lock);
+	list_for_each_entry(fc, &ctx->fence_list_head, fence_list) {
+		if (trigger_value == fc->base.seqno) {
+			fd = fc->fd;
+			*val = fd;
+			break;
+		}
+	}
+	spin_unlock(&ctx->list_lock);
+
+	if (fd < 0) {
+		fd = _sde_fence_create_fd(ctx, trigger_value);
+		*val = fd;
+		SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
 				fd, trigger_value, ctx->commit_count, offset);
+	}
 
 	SDE_EVT32(ctx->drm_id, trigger_value, fd);
-
-	if (fd >= 0) {
-		rc = 0;
-		_sde_fence_trigger(ctx, ktime_get(), false);
-	} else {
-		rc = fd;
-	}
+	rc = (fd >= 0) ? 0 : fd;
 
 	return rc;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 6b3eb6c..738ecbb 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -65,7 +65,8 @@
 #define MAX_DOWNSCALE_RATIO		4
 #define SSPP_UNITY_SCALE		1
 
-#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DEFAULT	2
+#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR	11
+#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR	5
 #define MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT	4
 #define MAX_PRE_ROT_HEIGHT_INLINE_ROT_DEFAULT	1088
 
@@ -379,10 +380,12 @@
 	VBIF_DEFAULT_OT_WR_LIMIT,
 	VBIF_DYNAMIC_OT_RD_LIMIT,
 	VBIF_DYNAMIC_OT_WR_LIMIT,
-	VBIF_QOS_RT_REMAP,
-	VBIF_QOS_NRT_REMAP,
 	VBIF_MEMTYPE_0,
 	VBIF_MEMTYPE_1,
+	VBIF_QOS_RT_REMAP,
+	VBIF_QOS_NRT_REMAP,
+	VBIF_QOS_CWB_REMAP,
+	VBIF_QOS_LUTDMA_REMAP,
 	VBIF_PROP_MAX,
 };
 
@@ -397,6 +400,8 @@
 	REG_DMA_VERSION,
 	REG_DMA_TRIGGER_OFF,
 	REG_DMA_BROADCAST_DISABLED,
+	REG_DMA_XIN_ID,
+	REG_DMA_CLK_CTRL,
 	REG_DMA_PROP_MAX
 };
 
@@ -698,12 +703,16 @@
 		PROP_TYPE_U32_ARRAY},
 	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
 		PROP_TYPE_U32_ARRAY},
+	{VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
 	{VBIF_QOS_RT_REMAP, "qcom,sde-vbif-qos-rt-remap", false,
 		PROP_TYPE_U32_ARRAY},
 	{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
 		PROP_TYPE_U32_ARRAY},
-	{VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
-	{VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_CWB_REMAP, "qcom,sde-vbif-qos-cwb-remap", false,
+		PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_LUTDMA_REMAP, "qcom,sde-vbif-qos-lutdma-remap", false,
+		PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type uidle_prop[] = {
@@ -721,6 +730,10 @@
 		PROP_TYPE_U32},
 	[REG_DMA_BROADCAST_DISABLED] = {REG_DMA_BROADCAST_DISABLED,
 		"qcom,sde-reg-dma-broadcast-disabled", false, PROP_TYPE_BOOL},
+	[REG_DMA_XIN_ID] = {REG_DMA_XIN_ID,
+		"qcom,sde-reg-dma-xin-id", false, PROP_TYPE_U32},
+	[REG_DMA_CLK_CTRL] = {REG_DMA_XIN_ID,
+		"qcom,sde-reg-dma-clk-ctrl", false, PROP_TYPE_BIT_OFFSET_ARRAY},
 };
 
 static struct sde_prop_type merge_3d_prop[] = {
@@ -1131,8 +1144,10 @@
 	if (IS_SDE_INLINE_ROT_REV_100(sde_cfg->true_inline_rot_rev)) {
 		set_bit(SDE_SSPP_TRUE_INLINE_ROT_V1, &sspp->features);
 		sblk->in_rot_format_list = sde_cfg->inline_rot_formats;
-		sblk->in_rot_maxdwnscale_rt =
-			sde_cfg->true_inline_dwnscale_rt;
+		sblk->in_rot_maxdwnscale_rt_num =
+			sde_cfg->true_inline_dwnscale_rt_num;
+		sblk->in_rot_maxdwnscale_rt_denom =
+			sde_cfg->true_inline_dwnscale_rt_denom;
 		sblk->in_rot_maxdwnscale_nrt =
 			sde_cfg->true_inline_dwnscale_nrt;
 		sblk->in_rot_maxheight =
@@ -2692,61 +2707,41 @@
 	struct sde_vbif_cfg *vbif, struct sde_prop_value *prop_value,
 	int *prop_count)
 {
-	int j;
+	int i, j;
+	int prop_index = VBIF_QOS_RT_REMAP;
 
-	vbif->qos_rt_tbl.npriority_lvl =
-			prop_count[VBIF_QOS_RT_REMAP];
-	SDE_DEBUG("qos_rt_tbl.npriority_lvl=%u\n",
-			vbif->qos_rt_tbl.npriority_lvl);
-	if (vbif->qos_rt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
-		vbif->qos_rt_tbl.priority_lvl = kcalloc(
-			vbif->qos_rt_tbl.npriority_lvl, sizeof(u32),
-			GFP_KERNEL);
-		if (!vbif->qos_rt_tbl.priority_lvl)
-			return -ENOMEM;
-	} else if (vbif->qos_rt_tbl.npriority_lvl) {
-		vbif->qos_rt_tbl.npriority_lvl = 0;
-		vbif->qos_rt_tbl.priority_lvl = NULL;
-		SDE_ERROR("invalid qos rt table\n");
+	for (i = VBIF_RT_CLIENT;
+			((i < VBIF_MAX_CLIENT) && (prop_index < VBIF_PROP_MAX));
+				i++, prop_index++) {
+		vbif->qos_tbl[i].npriority_lvl = prop_count[prop_index];
+		SDE_DEBUG("qos_tbl[%d].npriority_lvl=%u\n",
+				i, vbif->qos_tbl[i].npriority_lvl);
+
+		if (vbif->qos_tbl[i].npriority_lvl == sde_cfg->vbif_qos_nlvl) {
+			vbif->qos_tbl[i].priority_lvl = kcalloc(
+					vbif->qos_tbl[i].npriority_lvl,
+					sizeof(u32), GFP_KERNEL);
+			if (!vbif->qos_tbl[i].priority_lvl)
+				return -ENOMEM;
+		} else if (vbif->qos_tbl[i].npriority_lvl) {
+			vbif->qos_tbl[i].npriority_lvl = 0;
+			vbif->qos_tbl[i].priority_lvl = NULL;
+			SDE_ERROR("invalid qos table for client:%d, prop:%d\n",
+					i, prop_index);
+		}
+
+		for (j = 0; j < vbif->qos_tbl[i].npriority_lvl; j++) {
+			vbif->qos_tbl[i].priority_lvl[j] =
+				PROP_VALUE_ACCESS(prop_value, prop_index, j);
+			SDE_DEBUG("client:%d, prop:%d, lvl[%d]=%u\n",
+					i, prop_index, j,
+					vbif->qos_tbl[i].priority_lvl[j]);
+		}
+
+		if (vbif->qos_tbl[i].npriority_lvl)
+			set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
 	}
 
-	for (j = 0; j < vbif->qos_rt_tbl.npriority_lvl; j++) {
-		vbif->qos_rt_tbl.priority_lvl[j] =
-			PROP_VALUE_ACCESS(prop_value,
-					VBIF_QOS_RT_REMAP, j);
-		SDE_DEBUG("lvl[%d]=%u\n", j,
-				vbif->qos_rt_tbl.priority_lvl[j]);
-	}
-
-	vbif->qos_nrt_tbl.npriority_lvl =
-			prop_count[VBIF_QOS_NRT_REMAP];
-	SDE_DEBUG("qos_nrt_tbl.npriority_lvl=%u\n",
-			vbif->qos_nrt_tbl.npriority_lvl);
-
-	if (vbif->qos_nrt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
-		vbif->qos_nrt_tbl.priority_lvl = kcalloc(
-			vbif->qos_nrt_tbl.npriority_lvl, sizeof(u32),
-			GFP_KERNEL);
-		if (!vbif->qos_nrt_tbl.priority_lvl)
-			return -ENOMEM;
-	} else if (vbif->qos_nrt_tbl.npriority_lvl) {
-		vbif->qos_nrt_tbl.npriority_lvl = 0;
-		vbif->qos_nrt_tbl.priority_lvl = NULL;
-		SDE_ERROR("invalid qos nrt table\n");
-	}
-
-	for (j = 0; j < vbif->qos_nrt_tbl.npriority_lvl; j++) {
-		vbif->qos_nrt_tbl.priority_lvl[j] =
-			PROP_VALUE_ACCESS(prop_value,
-					VBIF_QOS_NRT_REMAP, j);
-		SDE_DEBUG("lvl[%d]=%u\n", j,
-				vbif->qos_nrt_tbl.priority_lvl[j]);
-	}
-
-	if (vbif->qos_rt_tbl.npriority_lvl ||
-			vbif->qos_nrt_tbl.npriority_lvl)
-		set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
-
 	return 0;
 }
 
@@ -2829,6 +2824,16 @@
 	if (rc)
 		goto end;
 
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_0], 1,
+			&prop_count[VBIF_MEMTYPE_0], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_1], 1,
+			&prop_count[VBIF_MEMTYPE_1], NULL);
+	if (rc)
+		goto end;
+
 	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_RT_REMAP], 1,
 			&prop_count[VBIF_QOS_RT_REMAP], NULL);
 	if (rc)
@@ -2839,13 +2844,13 @@
 	if (rc)
 		goto end;
 
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_0], 1,
-			&prop_count[VBIF_MEMTYPE_0], NULL);
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_CWB_REMAP], 1,
+			&prop_count[VBIF_QOS_CWB_REMAP], NULL);
 	if (rc)
 		goto end;
 
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_1], 1,
-			&prop_count[VBIF_MEMTYPE_1], NULL);
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_LUTDMA_REMAP], 1,
+			&prop_count[VBIF_QOS_LUTDMA_REMAP], NULL);
 	if (rc)
 		goto end;
 
@@ -3152,40 +3157,52 @@
 static int sde_parse_reg_dma_dt(struct device_node *np,
 		struct sde_mdss_cfg *sde_cfg)
 {
-	u32 val;
-	int rc = 0;
-	int i = 0;
+	int rc = 0, i, prop_count[REG_DMA_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
+	u32 off_count;
+	bool prop_exists[REG_DMA_PROP_MAX];
 
-	sde_cfg->reg_dma_count = 0;
-	for (i = 0; i < REG_DMA_PROP_MAX; i++) {
-		if (reg_dma_prop[i].type == PROP_TYPE_BOOL) {
-			val = of_property_read_bool(np,
-					reg_dma_prop[i].prop_name);
-		} else {
-			rc = of_property_read_u32(np, reg_dma_prop[i].prop_name,
-					&val);
-			if (rc)
-				break;
-		}
-		switch (i) {
-		case REG_DMA_OFF:
-			sde_cfg->dma_cfg.base = val;
-			break;
-		case REG_DMA_VERSION:
-			sde_cfg->dma_cfg.version = val;
-			break;
-		case REG_DMA_TRIGGER_OFF:
-			sde_cfg->dma_cfg.trigger_sel_off = val;
-			break;
-		case REG_DMA_BROADCAST_DISABLED:
-			sde_cfg->dma_cfg.broadcast_disabled = val;
-			break;
-		default:
-			break;
-		}
+	prop_value = kcalloc(REG_DMA_PROP_MAX,
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
 	}
-	if (!rc && i == REG_DMA_PROP_MAX)
-		sde_cfg->reg_dma_count = 1;
+
+	rc = _validate_dt_entry(np, reg_dma_prop, ARRAY_SIZE(reg_dma_prop),
+			prop_count, &off_count);
+	if (rc || !off_count)
+		goto end;
+
+	rc = _read_dt_entry(np, reg_dma_prop, ARRAY_SIZE(reg_dma_prop),
+			prop_count, prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	sde_cfg->reg_dma_count = off_count;
+	sde_cfg->dma_cfg.base = PROP_VALUE_ACCESS(prop_value, REG_DMA_OFF, 0);
+	sde_cfg->dma_cfg.version = PROP_VALUE_ACCESS(prop_value,
+						REG_DMA_VERSION, 0);
+	sde_cfg->dma_cfg.trigger_sel_off = PROP_VALUE_ACCESS(prop_value,
+						REG_DMA_TRIGGER_OFF, 0);
+	sde_cfg->dma_cfg.broadcast_disabled = PROP_VALUE_ACCESS(prop_value,
+						REG_DMA_BROADCAST_DISABLED, 0);
+	sde_cfg->dma_cfg.xin_id = PROP_VALUE_ACCESS(prop_value,
+						REG_DMA_XIN_ID, 0);
+	sde_cfg->dma_cfg.clk_ctrl = SDE_CLK_CTRL_LUTDMA;
+	sde_cfg->dma_cfg.vbif_idx = VBIF_RT;
+
+	for (i = 0; i < sde_cfg->mdp_count; i++) {
+		sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].reg_off =
+			PROP_BITVALUE_ACCESS(prop_value,
+					REG_DMA_CLK_CTRL, 0, 0);
+		sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].bit_off =
+			PROP_BITVALUE_ACCESS(prop_value,
+					REG_DMA_CLK_CTRL, 0, 1);
+	}
+
+end:
+	kfree(prop_value);
 	/* reg dma is optional feature hence return 0 */
 	return 0;
 }
@@ -3764,6 +3781,8 @@
 		sde_cfg->sui_ns_allowed = true;
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->sui_block_xin_mask = 0x3F71;
+		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
 		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
 		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
@@ -3792,6 +3811,8 @@
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->has_decimation = true;
 		sde_cfg->sui_block_xin_mask = 0x2EE1;
+		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
 		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
 		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
@@ -3809,6 +3830,8 @@
 		sde_cfg->sui_ns_allowed = true;
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->sui_block_xin_mask = 0xE71;
+		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
 	} else if (IS_KONA_TARGET(hw_rev)) {
 		sde_cfg->has_cwb_support = true;
@@ -3822,6 +3845,8 @@
 		sde_cfg->sui_ns_allowed = true;
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->sui_block_xin_mask = 0x3F71;
+		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
 		clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
 		clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
@@ -3830,8 +3855,10 @@
 		set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
 		sde_cfg->has_vig_p010 = true;
 		sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_1_0_0;
-		sde_cfg->true_inline_dwnscale_rt =
-			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DEFAULT;
+		sde_cfg->true_inline_dwnscale_rt_num =
+			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR;
+		sde_cfg->true_inline_dwnscale_rt_denom =
+			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR;
 		sde_cfg->true_inline_dwnscale_nrt =
 			MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT;
 		sde_cfg->true_inline_prefill_fudge_lines = 2;
@@ -3861,16 +3888,10 @@
 	if (!sde_cfg)
 		return -EINVAL;
 
-	if (IS_SM8150_TARGET(hw_rev) || IS_SM6150_TARGET(hw_rev) ||
-			IS_SDMMAGPIE_TARGET(hw_rev)) {
+	if (sde_cfg->has_sui_blendstage)
 		sde_cfg->sui_supported_blendstage =
 			sde_cfg->max_mixer_blendstages - SDE_STAGE_0;
 
-		for (i = 0; i < sde_cfg->sspp_count; i++)
-			set_bit(SDE_PERF_SSPP_QOS_FL_NOCALC,
-					&sde_cfg->sspp[i].perf_features);
-	}
-
 	for (i = 0; i < sde_cfg->sspp_count; i++) {
 		if (sde_cfg->sspp[i].sblk) {
 			max_horz_deci = max(max_horz_deci,
@@ -3879,6 +3900,10 @@
 				sde_cfg->sspp[i].sblk->maxvdeciexp);
 		}
 
+		if (sde_cfg->has_qos_fl_nocalc)
+			set_bit(SDE_PERF_SSPP_QOS_FL_NOCALC,
+				&sde_cfg->sspp[i].perf_features);
+
 		/*
 		 * set sec-ui blocked SSPP feature flag based on blocked
 		 * xin-mask if sec-ui-misr feature is enabled;
@@ -3914,7 +3939,7 @@
 
 void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
 {
-	int i;
+	int i, j;
 
 	if (!sde_cfg)
 		return;
@@ -3940,12 +3965,15 @@
 	for (i = 0; i < sde_cfg->vbif_count; i++) {
 		kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
 		kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
-		kfree(sde_cfg->vbif[i].qos_rt_tbl.priority_lvl);
-		kfree(sde_cfg->vbif[i].qos_nrt_tbl.priority_lvl);
+
+		for (j = VBIF_RT_CLIENT; j < VBIF_MAX_CLIENT; j++)
+			kfree(sde_cfg->vbif[i].qos_tbl[j].priority_lvl);
 	}
 
-	for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++)
+	for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++) {
+		kfree(sde_cfg->perf.sfe_lut_tbl[i].entries);
 		kfree(sde_cfg->perf.qos_lut_tbl[i].entries);
+	}
 
 	kfree(sde_cfg->dma_formats);
 	kfree(sde_cfg->cursor_formats);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 18e3762..7d25092c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -593,7 +593,10 @@
  * @format_list: Pointer to list of supported formats
  * @virt_format_list: Pointer to list of supported formats for virtual planes
  * @in_rot_format_list: Pointer to list of supported formats for inline rotation
- * @in_rot_maxdwnscale_rt: max downscale ratio for inline rotation rt clients
+ * @in_rot_maxdwnscale_rt_num: max downscale ratio for inline rotation
+ *                                 rt clients - numerator
+ * @in_rot_maxdwnscale_rt_denom: max downscale ratio for inline rotation
+ *                                 rt clients - denominator
  * @in_rot_maxdwnscale_nrt: max downscale ratio for inline rotation nrt clients
  * @in_rot_maxheight: max pre rotated height for inline rotation
  * @in_rot_prefill_fudge_lines: prefill fudge lines for inline rotation
@@ -630,7 +633,8 @@
 	const struct sde_format_extended *format_list;
 	const struct sde_format_extended *virt_format_list;
 	const struct sde_format_extended *in_rot_format_list;
-	u32 in_rot_maxdwnscale_rt;
+	u32 in_rot_maxdwnscale_rt_num;
+	u32 in_rot_maxdwnscale_rt_denom;
 	u32 in_rot_maxdwnscale_nrt;
 	u32 in_rot_maxheight;
 	u32 in_rot_prefill_fudge_lines;
@@ -705,6 +709,7 @@
 	SDE_CLK_CTRL_WB0,
 	SDE_CLK_CTRL_WB1,
 	SDE_CLK_CTRL_WB2,
+	SDE_CLK_CTRL_LUTDMA,
 	SDE_CLK_CTRL_MAX,
 };
 
@@ -998,6 +1003,22 @@
 };
 
 /**
+ * enum sde_vbif_client_type
+ * @VBIF_RT_CLIENT: real time client
+ * @VBIF_NRT_CLIENT: non-realtime clients like writeback
+ * @VBIF_CWB_CLIENT: concurrent writeback client
+ * @VBIF_LUTDMA_CLIENT: LUTDMA client
+ * @VBIF_MAX_CLIENT: max number of clients
+ */
+enum sde_vbif_client_type {
+	VBIF_RT_CLIENT,
+	VBIF_NRT_CLIENT,
+	VBIF_CWB_CLIENT,
+	VBIF_LUTDMA_CLIENT,
+	VBIF_MAX_CLIENT
+};
+
+/**
  * struct sde_vbif_cfg - information of VBIF blocks
  * @id                 enum identifying this block
  * @base               register offset of this block
@@ -1007,8 +1028,7 @@
  * @xin_halt_timeout   maximum time (in usec) for xin to halt
  * @dynamic_ot_rd_tbl  dynamic OT read configuration table
  * @dynamic_ot_wr_tbl  dynamic OT write configuration table
- * @qos_rt_tbl         real-time QoS priority table
- * @qos_nrt_tbl        non-real-time QoS priority table
+ * @qos_tbl            Array of QoS priority table
  * @memtype_count      number of defined memtypes
  * @memtype            array of xin memtype definitions
  */
@@ -1019,8 +1039,7 @@
 	u32 xin_halt_timeout;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
-	struct sde_vbif_qos_tbl qos_rt_tbl;
-	struct sde_vbif_qos_tbl qos_nrt_tbl;
+	struct sde_vbif_qos_tbl qos_tbl[VBIF_MAX_CLIENT];
 	u32 memtype_count;
 	u32 memtype[MAX_XIN_COUNT];
 };
@@ -1032,12 +1051,18 @@
  * @version            version of lutdma hw block
  * @trigger_sel_off    offset to trigger select registers of lutdma
  * @broadcast_disabled flag indicating if broadcast usage should be avoided
+ * @xin_id             VBIF xin client-id for LUTDMA
+ * @vbif_idx           VBIF id (RT/NRT)
+ * @clk_ctrl           VBIF xin client clk-ctrl
  */
 struct sde_reg_dma_cfg {
 	SDE_HW_BLK_INFO;
 	u32 version;
 	u32 trigger_sel_off;
 	u32 broadcast_disabled;
+	u32 xin_id;
+	u32 vbif_idx;
+	enum sde_clk_ctrl_type clk_ctrl;
 };
 
 /**
@@ -1165,7 +1190,10 @@
  * @vbif_qos_nlvl      number of vbif QoS priority level
  * @ts_prefill_rev     prefill traffic shaper feature revision
  * @true_inline_rot_rev	inline rotator feature revision
- * @true_inline_dwnscale_rt    true inline rotator downscale ratio for rt
+ * @true_inline_dwnscale_rt_num    true inline rotator downscale ratio for rt
+ *                                       - numerator
+ * @true_inline_dwnscale_rt_denom    true inline rot downscale ratio for rt
+ *                                       - denominator
  * @true_inline_dwnscale_nrt    true inline rotator downscale ratio for nrt
  * @true_inline_prefill_fudge_lines    true inline rotator prefill fudge lines
  * @true_inline_prefill_lines_nv12    true inline prefill lines for nv12 format
@@ -1176,6 +1204,7 @@
  * @has_qsync	       Supports qsync feature
  * @has_3d_merge_reset Supports 3D merge reset
  * @has_decimation     Supports decimation
+ * @has_qos_fl_nocalc  flag to indicate QoS fill level needs no calculation
  * @sc_cfg: system cache configuration
  * @uidle_cfg		Settings for uidle feature
  * @sui_misr_supported  indicate if secure-ui-misr is supported
@@ -1187,6 +1216,7 @@
  * @sui_ns_allowed      flag to indicate non-secure context banks are allowed
  *                         during secure-ui session
  * @sui_supported_blendstage  secure-ui supported blendstage
+ * @has_sui_blendstage  flag to indicate secure-ui has a blendstage restriction
  * @has_cursor    indicates if hardware cursor is supported
  * @has_vig_p010  indicates if vig pipe supports p010 format
  * @inline_rot_formats	formats supported by the inline rotator feature
@@ -1221,7 +1251,8 @@
 	u32 vbif_qos_nlvl;
 	u32 ts_prefill_rev;
 	u32 true_inline_rot_rev;
-	u32 true_inline_dwnscale_rt;
+	u32 true_inline_dwnscale_rt_num;
+	u32 true_inline_dwnscale_rt_denom;
 	u32 true_inline_dwnscale_nrt;
 	u32 true_inline_prefill_fudge_lines;
 	u32 true_inline_prefill_lines_nv12;
@@ -1232,6 +1263,7 @@
 	bool has_qsync;
 	bool has_3d_merge_reset;
 	bool has_decimation;
+	bool has_qos_fl_nocalc;
 
 	struct sde_sc_cfg sc_cfg;
 
@@ -1242,6 +1274,7 @@
 	u32 sec_sid_mask[MAX_BLOCKS];
 	u32 sui_ns_allowed;
 	u32 sui_supported_blendstage;
+	bool has_sui_blendstage;
 
 	bool has_hdr;
 	bool has_hdr_plus;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index e56e789..190335e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -24,6 +24,7 @@
 #define   CTL_PREPARE                   0x0d0
 #define   CTL_SW_RESET                  0x030
 #define   CTL_SW_RESET_OVERRIDE         0x060
+#define   CTL_STATUS                    0x064
 #define   CTL_LAYER_EXTN_OFFSET         0x40
 #define   CTL_ROT_TOP                   0x0C0
 #define   CTL_ROT_FLUSH                 0x0C4
@@ -687,6 +688,13 @@
 	return (u32)SDE_REG_READ(&ctx->hw, CTL_SW_RESET);
 }
 
+static u32 sde_hw_ctl_get_scheduler_status(struct sde_hw_ctl *ctx)
+{
+	if (!ctx)
+		return 0;
+	return (u32)SDE_REG_READ(&ctx->hw, CTL_STATUS);
+}
+
 static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
 {
 	struct sde_hw_blk_reg_map *c;
@@ -1226,6 +1234,7 @@
 			sde_hw_ctl_update_bitmask_periph_v1;
 		ops->get_ctl_intf = sde_hw_ctl_get_intf_v1;
 		ops->reset_post_disable = sde_hw_ctl_reset_post_disable;
+		ops->get_scheduler_status = sde_hw_ctl_get_scheduler_status;
 	} else {
 		ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
 		ops->trigger_flush = sde_hw_ctl_trigger_flush;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index b645f20..812d44a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _SDE_HW_CTL_H
@@ -288,6 +288,13 @@
 	u32 (*get_reset)(struct sde_hw_ctl *ctx);
 
 	/**
+	 * get_scheduler_reset - check ctl scheduler status bit
+	 * @ctx    : ctl path ctx pointer
+	 * Returns: current value of ctl scheduler and idle status
+	 */
+	u32 (*get_scheduler_status)(struct sde_hw_ctl *ctx);
+
+	/**
 	 * hard_reset - force reset on ctl_path
 	 * @ctx    : ctl path ctx pointer
 	 * @enable : whether to enable/disable hard reset
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 0e59734..4f4fe59 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 #include <drm/msm_drm_pp.h>
 #include "sde_hw_mdss.h"
@@ -304,6 +304,13 @@
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
 
+	if ((cfg->sblk->ltm.id == SDE_DSPP_LTM) && cfg->sblk->ltm.base) {
+		sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "LTM",
+				c->hw.blk_off + cfg->sblk->ltm.base,
+				c->hw.blk_off + cfg->sblk->ltm.base + 0xC4,
+				c->hw.xin_id);
+	}
+
 	return c;
 
 blk_init_error:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
index 0a794f8..0c2e025c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
@@ -3443,19 +3443,21 @@
 	/* input param exceeds the display width */
 	if (cfg_param->cfg_param_01 + cfg_param->cfg_param_03 >
 			hw_cfg->displayh) {
-		DRM_ERROR("invalid input param = [%u,%u], displayh = %u\n",
+		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayh = %u\n",
 			cfg_param->cfg_param_01, cfg_param->cfg_param_03,
 			hw_cfg->displayh);
-		return;
+		/* set the roi width to max register value */
+		cfg_param->cfg_param_03 = 0xFFFF;
 	}
 
 	/* input param exceeds the display height */
 	if (cfg_param->cfg_param_02 + cfg_param->cfg_param_04 >
 			hw_cfg->displayv) {
-		DRM_ERROR("invalid input param = [%u,%u], displayv = %u\n",
+		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayv = %u\n",
 			cfg_param->cfg_param_02, cfg_param->cfg_param_04,
 			hw_cfg->displayv);
-		return;
+		/* set the roi height to max register value */
+		cfg_param->cfg_param_04 = 0xFFFF;
 	}
 
 	roi_data[0] = ((cfg_param->cfg_param_02 & 0xFFFF) << 16) |
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 78e0785..5c1aef5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -994,7 +994,7 @@
 		offset = SSPP_UIDLE_CTRL_VALUE;
 
 	val = SDE_REG_READ(&ctx->hw, offset + idx);
-	val = (val & ~BIT(31)) | (cfg->enable ? BIT(31) : 0x0);
+	val = (val & ~BIT(31)) | (cfg->enable ? 0x0 : BIT(31));
 	val = (val & ~0xFF00000) | (cfg->fal_allowed_threshold << 20);
 	val = (val & ~0xF0000) | (cfg->fal10_exit_threshold << 16);
 	val = (val & ~0xF00) | (cfg->fal10_threshold << 8);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 973e6d1..219ee07 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -106,174 +106,6 @@
 }
 
 #ifdef CONFIG_DEBUG_FS
-static int _sde_danger_signal_status(struct seq_file *s,
-		bool danger_status)
-{
-	struct sde_kms *kms = (struct sde_kms *)s->private;
-	struct msm_drm_private *priv;
-	struct sde_danger_safe_status status;
-	int i;
-	int rc;
-
-	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
-		SDE_ERROR("invalid arg(s)\n");
-		return 0;
-	}
-
-	priv = kms->dev->dev_private;
-	memset(&status, 0, sizeof(struct sde_danger_safe_status));
-
-	rc = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
-	if (rc) {
-		SDE_ERROR("failed to enable power resource %d\n", rc);
-		SDE_EVT32(rc, SDE_EVTLOG_ERROR);
-		return rc;
-	}
-
-	if (danger_status) {
-		seq_puts(s, "\nDanger signal status:\n");
-		if (kms->hw_mdp->ops.get_danger_status)
-			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
-					&status);
-	} else {
-		seq_puts(s, "\nSafe signal status:\n");
-		if (kms->hw_mdp->ops.get_danger_status)
-			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
-					&status);
-	}
-	sde_power_resource_enable(&priv->phandle, kms->core_client, false);
-
-	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
-
-	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
-		seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
-				status.sspp[i]);
-	seq_puts(s, "\n");
-
-	for (i = WB_0; i < WB_MAX; i++)
-		seq_printf(s, "WB%d     :  0x%x  \t", i - WB_0,
-				status.wb[i]);
-	seq_puts(s, "\n");
-
-	return 0;
-}
-
-#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
-static int __prefix ## _open(struct inode *inode, struct file *file)	\
-{									\
-	return single_open(file, __prefix ## _show, inode->i_private);	\
-}									\
-static const struct file_operations __prefix ## _fops = {		\
-	.owner = THIS_MODULE,						\
-	.open = __prefix ## _open,					\
-	.release = single_release,					\
-	.read = seq_read,						\
-	.llseek = seq_lseek,						\
-}
-
-static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
-{
-	return _sde_danger_signal_status(s, true);
-}
-DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
-
-static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
-{
-	return _sde_danger_signal_status(s, false);
-}
-DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
-
-static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
-{
-	debugfs_remove_recursive(sde_kms->debugfs_danger);
-	sde_kms->debugfs_danger = NULL;
-}
-
-static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
-		struct dentry *parent)
-{
-	sde_kms->debugfs_danger = debugfs_create_dir("danger",
-			parent);
-	if (!sde_kms->debugfs_danger) {
-		SDE_ERROR("failed to create danger debugfs\n");
-		return -EINVAL;
-	}
-
-	debugfs_create_file("danger_status", 0400, sde_kms->debugfs_danger,
-			sde_kms, &sde_debugfs_danger_stats_fops);
-	debugfs_create_file("safe_status", 0400, sde_kms->debugfs_danger,
-			sde_kms, &sde_debugfs_safe_stats_fops);
-
-	return 0;
-}
-
-static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
-{
-	struct sde_debugfs_regset32 *regset;
-	struct sde_kms *sde_kms;
-	struct drm_device *dev;
-	struct msm_drm_private *priv;
-	void __iomem *base;
-	uint32_t i, addr;
-
-	if (!s || !s->private)
-		return 0;
-
-	regset = s->private;
-
-	sde_kms = regset->sde_kms;
-	if (!sde_kms || !sde_kms->mmio)
-		return 0;
-
-	dev = sde_kms->dev;
-	if (!dev)
-		return 0;
-
-	priv = dev->dev_private;
-	if (!priv)
-		return 0;
-
-	base = sde_kms->mmio + regset->offset;
-
-	/* insert padding spaces, if needed */
-	if (regset->offset & 0xF) {
-		seq_printf(s, "[%x]", regset->offset & ~0xF);
-		for (i = 0; i < (regset->offset & 0xF); i += 4)
-			seq_puts(s, "         ");
-	}
-
-	if (sde_power_resource_enable(&priv->phandle,
-				sde_kms->core_client, true)) {
-		seq_puts(s, "failed to enable sde clocks\n");
-		return 0;
-	}
-
-	/* main register output */
-	for (i = 0; i < regset->blk_len; i += 4) {
-		addr = regset->offset + i;
-		if ((addr & 0xF) == 0x0)
-			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
-		seq_printf(s, " %08x", readl_relaxed(base + i));
-	}
-	seq_puts(s, "\n");
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-
-	return 0;
-}
-
-static int sde_debugfs_open_regset32(struct inode *inode,
-		struct file *file)
-{
-	return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
-}
-
-static const struct file_operations sde_fops_regset32 = {
-	.open =		sde_debugfs_open_regset32,
-	.read =		seq_read,
-	.llseek =	seq_lseek,
-	.release =	single_release,
-};
-
 void *sde_debugfs_get_root(struct sde_kms *sde_kms)
 {
 	struct msm_drm_private *priv;
@@ -303,7 +135,6 @@
 	/* allow debugfs_root to be NULL */
 	debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
 
-	(void) sde_debugfs_danger_init(sde_kms, debugfs_root);
 	(void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
 	(void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
 
@@ -321,7 +152,6 @@
 	/* don't need to NULL check debugfs_root */
 	if (sde_kms) {
 		sde_debugfs_vbif_destroy(sde_kms);
-		sde_debugfs_danger_destroy(sde_kms);
 		sde_debugfs_core_irq_destroy(sde_kms);
 	}
 }
@@ -2669,9 +2499,14 @@
 	}
 
 	crtc_state->active = true;
-	drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
+	ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
+	if (ret)
+		SDE_ERROR("error %d setting the crtc\n", ret);
 
-	drm_atomic_commit(state);
+	ret = drm_atomic_commit(state);
+	if (ret)
+		SDE_ERROR("Error %d doing the atomic commit\n", ret);
+
 end:
 	if (state)
 		drm_atomic_state_put(state);
@@ -3014,6 +2849,25 @@
 	sde_hw_sid_rotator_set(sde_kms->hw_sid);
 }
 
+static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms)
+{
+	struct sde_vbif_set_qos_params qos_params;
+	struct sde_mdss_cfg *catalog;
+
+	if (!sde_kms->catalog)
+		return;
+
+	catalog = sde_kms->catalog;
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = catalog->dma_cfg.vbif_idx;
+	qos_params.xin_id = catalog->dma_cfg.xin_id;
+	qos_params.clk_ctrl = catalog->dma_cfg.clk_ctrl;
+	qos_params.client_type = VBIF_LUTDMA_CLIENT;
+
+	sde_vbif_set_qos_remap(sde_kms, &qos_params);
+}
+
 static void sde_kms_handle_power_event(u32 event_type, void *usr)
 {
 	struct sde_kms *sde_kms = usr;
@@ -3030,6 +2884,7 @@
 		sde_irq_update(msm_kms, true);
 		sde_vbif_init_memtypes(sde_kms);
 		sde_kms_init_shared_hw(sde_kms);
+		_sde_kms_set_lutdma_vbif_remap(sde_kms);
 		sde_kms->first_kickoff = true;
 	} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
 		sde_irq_update(msm_kms, false);
@@ -3112,7 +2967,7 @@
 
 	node = of_find_node_by_name(parent, node_name);
 	if (!node) {
-		SDE_ERROR("failed to find node %s\n", node_name);
+		SDE_DEBUG("failed to find node %s\n", node_name);
 		return -EINVAL;
 	}
 
@@ -3132,7 +2987,7 @@
 
 	data->num_splash_displays = num_displays;
 
-	pr_info("splash mem num_regions:%d\n", num_regions);
+	SDE_DEBUG("splash mem num_regions:%d\n", num_regions);
 	if (num_displays > num_regions) {
 		share_splash_mem = true;
 		pr_info(":%d displays share same splash buf\n", num_displays);
@@ -3165,7 +3020,7 @@
 			data->splash_display[i].splash = &data->splash_mem[0];
 		}
 
-		pr_info("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
+		SDE_DEBUG("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
 				splash_display->splash->splash_buf_base,
 				splash_display->splash->splash_buf_size);
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 0085410..62da4de 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -243,7 +243,6 @@
 	struct sde_power_event *power_event;
 
 	/* directory entry for debugfs */
-	struct dentry *debugfs_danger;
 	struct dentry *debugfs_vbif;
 
 	/* io/register spaces: */
@@ -423,22 +422,10 @@
  *
  * Documentation/filesystems/debugfs.txt
  *
- * @sde_debugfs_setup_regset32: Initialize data for sde_debugfs_create_regset32
- * @sde_debugfs_create_regset32: Create 32-bit register dump file
  * @sde_debugfs_get_root: Get root dentry for SDE_KMS's debugfs node
  */
 
 /**
- * Companion structure for sde_debugfs_create_regset32. Do not initialize the
- * members of this structure explicitly; use sde_debugfs_setup_regset32 instead.
- */
-struct sde_debugfs_regset32 {
-	uint32_t offset;
-	uint32_t blk_len;
-	struct sde_kms *sde_kms;
-};
-
-/**
  * sde_debugfs_get_root - Return root directory entry for KMS's debugfs
  *
  * The return value should be passed as the 'parent' argument to subsequent
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index d8aa5b5..eff1a2d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -136,9 +136,6 @@
 
 	/* debugfs related stuff */
 	struct dentry *debugfs_root;
-	struct sde_debugfs_regset32 debugfs_src;
-	struct sde_debugfs_regset32 debugfs_scaler;
-	struct sde_debugfs_regset32 debugfs_csc;
 	bool debugfs_default_scale;
 };
 
@@ -670,12 +667,13 @@
 	qos_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
 	qos_params.xin_id = psde->pipe_hw->cap->xin_id;
 	qos_params.num = psde->pipe_hw->idx - SSPP_VIG0;
-	qos_params.is_rt = psde->is_rt_pipe;
+	qos_params.client_type = psde->is_rt_pipe ?
+					VBIF_RT_CLIENT : VBIF_NRT_CLIENT;
 
 	SDE_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
 			plane->base.id, qos_params.num,
 			qos_params.vbif_idx,
-			qos_params.xin_id, qos_params.is_rt,
+			qos_params.xin_id, qos_params.client_type,
 			qos_params.clk_ctrl);
 
 	sde_vbif_set_qos_remap(sde_kms, &qos_params);
@@ -1604,9 +1602,7 @@
 	struct sde_plane *psde;
 	struct sde_plane_state *pstate, *old_pstate;
 	int ret = 0;
-	const struct msm_format *msm_fmt;
-	const struct sde_format *fmt;
-	u32 height;
+	u32 rotation;
 
 	if (!plane || !state) {
 		SDE_ERROR("invalid plane/state\n");
@@ -1618,36 +1614,47 @@
 	old_pstate = to_sde_plane_state(plane->state);
 
 	/* check inline rotation and simplify the transform */
-	pstate->rotation = drm_rotation_simplify(
+	rotation = drm_rotation_simplify(
 			state->rotation,
 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
 			DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
 
-	if ((pstate->rotation & DRM_MODE_ROTATE_180) ||
-		(pstate->rotation & DRM_MODE_ROTATE_270)) {
+	if ((rotation & DRM_MODE_ROTATE_180) ||
+		(rotation & DRM_MODE_ROTATE_270)) {
 		SDE_ERROR_PLANE(psde,
 			"invalid rotation transform must be simplified 0x%x\n",
-			pstate->rotation);
+			rotation);
 		ret = -EINVAL;
 		goto exit;
 	}
 
-	msm_fmt = msm_framebuffer_format(state->fb);
-	fmt = to_sde_format(msm_fmt);
-	height = state->fb ? state->fb->height : 0x0;
-
-	if ((pstate->rotation & DRM_MODE_ROTATE_90)) {
+	if (rotation & DRM_MODE_ROTATE_90) {
 		struct msm_drm_private *priv = plane->dev->dev_private;
 		struct sde_kms *sde_kms;
+		const struct msm_format *msm_fmt;
+		const struct sde_format *fmt;
+		struct sde_rect src;
+		bool q16_data = true;
 
-		if (!psde->pipe_sblk->in_rot_maxdwnscale_rt ||
+		POPULATE_RECT(&src, state->src_x, state->src_y,
+			state->src_w, state->src_h, q16_data);
+		/*
+		 * DRM framework expects rotation flag in counter-clockwise
+		 * direction and the HW expects in clockwise direction.
+		 * Flip the flags to match with HW.
+		 */
+		rotation ^= (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
+
+		if (!psde->pipe_sblk->in_rot_maxdwnscale_rt_num ||
+			!psde->pipe_sblk->in_rot_maxdwnscale_rt_denom ||
 			!psde->pipe_sblk->in_rot_maxdwnscale_nrt ||
 			!psde->pipe_sblk->in_rot_maxheight ||
 			!psde->pipe_sblk->in_rot_format_list ||
 			!(psde->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1))) {
 			SDE_ERROR_PLANE(psde,
-				"wrong config rt:%d nrt:%d fmt:%d h:%d 0x%x\n",
-				!psde->pipe_sblk->in_rot_maxdwnscale_rt,
+			    "wrong config rt:%d/%d nrt:%d fmt:%d h:%d 0x%x\n",
+				!psde->pipe_sblk->in_rot_maxdwnscale_rt_num,
+				!psde->pipe_sblk->in_rot_maxdwnscale_rt_denom,
 				!psde->pipe_sblk->in_rot_maxdwnscale_nrt,
 				!psde->pipe_sblk->in_rot_format_list,
 				!psde->pipe_sblk->in_rot_maxheight,
@@ -1657,10 +1664,10 @@
 		}
 
 		/* check for valid height */
-		if (height > psde->pipe_sblk->in_rot_maxheight) {
+		if (src.h > psde->pipe_sblk->in_rot_maxheight) {
 			SDE_ERROR_PLANE(psde,
 				"invalid height for inline rot:%d max:%d\n",
-				height, psde->pipe_sblk->in_rot_maxheight);
+				src.h, psde->pipe_sblk->in_rot_maxheight);
 			ret = -EINVAL;
 			goto exit;
 		}
@@ -1670,12 +1677,14 @@
 
 		/* check for valid formats supported by inline rot */
 		sde_kms = to_sde_kms(priv->kms);
+		msm_fmt = msm_framebuffer_format(state->fb);
+		fmt = to_sde_format(msm_fmt);
 		ret = sde_format_validate_fmt(&sde_kms->base, fmt,
 			psde->pipe_sblk->in_rot_format_list);
-
 	}
 
 exit:
+	pstate->rotation = rotation;
 	return ret;
 }
 
@@ -2305,6 +2314,8 @@
 		uint32_t hor_req_pixels, hor_fetch_pixels;
 		uint32_t vert_req_pixels, vert_fetch_pixels;
 		uint32_t src_w_tmp, src_h_tmp;
+		uint32_t scaler_w, scaler_h;
+		bool rot;
 
 		/* re-use color plane 1's config for plane 2 */
 		if (i == 2)
@@ -2354,20 +2365,27 @@
 		}
 
 		/*
+		 * swap the scaler src width & height for inline-rotation 90
+		 * comparison with Pixel-Extension, as PE is based on
+		 * pre-rotation and QSEED is based on post-rotation
+		 */
+		rot = pstate->rotation & DRM_MODE_ROTATE_90;
+		scaler_w = rot ? pstate->scaler3_cfg.src_height[i]
+				    : pstate->scaler3_cfg.src_width[i];
+		scaler_h = rot ? pstate->scaler3_cfg.src_width[i]
+				    : pstate->scaler3_cfg.src_height[i];
+		/*
 		 * Alpha plane can only be scaled using bilinear or pixel
 		 * repeat/drop, src_width and src_height are only specified
 		 * for Y and UV plane
 		 */
-		if (i != 3 &&
-			(hor_req_pixels != pstate->scaler3_cfg.src_width[i] ||
-			vert_req_pixels != pstate->scaler3_cfg.src_height[i])) {
+		if (i != 3 && (hor_req_pixels != scaler_w ||
+					vert_req_pixels != scaler_h)) {
 			SDE_ERROR_PLANE(psde,
-				"roi[%d] %d/%d, scaler src %dx%d, src %dx%d\n",
+			    "roi[%d] roi:%dx%d scaler:%dx%d src:%dx%d rot:%d\n",
 				i, pstate->pixel_ext.roi_w[i],
 				pstate->pixel_ext.roi_h[i],
-				pstate->scaler3_cfg.src_width[i],
-				pstate->scaler3_cfg.src_height[i],
-				src_w, src_h);
+				scaler_w, scaler_h, src_w, src_h, rot);
 			return -EINVAL;
 		}
 
@@ -2403,7 +2421,8 @@
 	int ret = 0;
 	uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
 	uint32_t scaler_src_w, scaler_src_h;
-	uint32_t max_upscale, max_downscale, max_linewidth;
+	uint32_t max_downscale_num, max_downscale_denom;
+	uint32_t max_upscale, max_linewidth;
 	bool inline_rotation, rt_client;
 	struct drm_crtc *crtc;
 
@@ -2432,14 +2451,20 @@
 	else
 		rt_client = true;
 
+	max_downscale_denom = 1;
 	/* inline rotation RT clients have a different max downscaling limit */
 	if (inline_rotation) {
-		if (rt_client)
-			max_downscale = psde->pipe_sblk->in_rot_maxdwnscale_rt;
-		else
-			max_downscale = psde->pipe_sblk->in_rot_maxdwnscale_nrt;
+		if (rt_client) {
+			max_downscale_num =
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_num;
+			max_downscale_denom =
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_denom;
+		} else {
+			max_downscale_num =
+				psde->pipe_sblk->in_rot_maxdwnscale_nrt;
+		}
 	} else {
-		max_downscale = psde->pipe_sblk->maxdwnscale;
+		max_downscale_num = psde->pipe_sblk->maxdwnscale;
 	}
 
 	/* decimation validation */
@@ -2472,8 +2497,10 @@
 	/* check max scaler capability */
 	else if (((scaler_src_w * max_upscale) < dst->w) ||
 		((scaler_src_h * max_upscale) < dst->h) ||
-		((dst->w * max_downscale) < scaler_src_w) ||
-		((dst->h * max_downscale) < scaler_src_h)) {
+		(((dst->w * max_downscale_num) / max_downscale_denom)
+			< scaler_src_w) ||
+		(((dst->h * max_downscale_num) / max_downscale_denom)
+			< scaler_src_h)) {
 		SDE_ERROR_PLANE(psde,
 			"too much scaling requested %ux%u->%ux%u rot:%d\n",
 			scaler_src_w, scaler_src_h, dst->w, dst->h,
@@ -3149,11 +3176,9 @@
 				&pstate->property_state)) >= 0) {
 		dirty_prop_flag = plane_prop_array[idx];
 		pstate->dirty |= dirty_prop_flag;
-		if (dirty_prop_flag == SDE_PLANE_DIRTY_ALL &&
-				idx != PLANE_PROP_MULTIRECT_MODE &&
-				idx != PLANE_PROP_COLOR_FILL)
-			SDE_ERROR("executing full mode set, prp_idx %d\n", idx);
-		break;
+
+		if (dirty_prop_flag == SDE_PLANE_DIRTY_ALL)
+			break;
 	}
 
 	/**
@@ -3540,8 +3565,16 @@
 		const struct sde_format_extended *inline_rot_fmt_list;
 
 		sde_kms_info_add_keyint(info, "true_inline_rot_rev", 1);
-		sde_kms_info_add_keyint(info, "true_inline_dwnscale_rt",
-			psde->pipe_sblk->in_rot_maxdwnscale_rt);
+		sde_kms_info_add_keyint(info,
+			"true_inline_dwnscale_rt",
+			(int) (psde->pipe_sblk->in_rot_maxdwnscale_rt_num /
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_denom));
+		sde_kms_info_add_keyint(info,
+				"true_inline_dwnscale_rt_numerator",
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_num);
+		sde_kms_info_add_keyint(info,
+				"true_inline_dwnscale_rt_denominator",
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_denom);
 		sde_kms_info_add_keyint(info, "true_inline_dwnscale_nrt",
 			psde->pipe_sblk->in_rot_maxdwnscale_nrt);
 		sde_kms_info_add_keyint(info, "true_inline_max_height",
@@ -4271,10 +4304,14 @@
 				&psde->debugfs_default_scale);
 
 	if (cfg->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1)) {
-		debugfs_create_u32("in_rot_max_downscale_rt",
+		debugfs_create_u32("in_rot_max_downscale_rt_num",
 			0600,
 			psde->debugfs_root,
-			(u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt);
+			(u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt_num);
+		debugfs_create_u32("in_rot_max_downscale_rt_denom",
+			0600,
+			psde->debugfs_root,
+			(u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt_denom);
 		debugfs_create_u32("in_rot_max_downscale_nrt",
 			0600,
 			psde->debugfs_root,
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index 6fb7b5a..44e3a84 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
@@ -389,9 +389,12 @@
 		return;
 	}
 
-	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
-			&vbif->cap->qos_nrt_tbl;
+	if (params->client_type > VBIF_MAX_CLIENT) {
+		SDE_ERROR("invalid client type:%d\n", params->client_type);
+		return;
+	}
 
+	qos_tbl = &vbif->cap->qos_tbl[params->client_type];
 	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
 		SDE_DEBUG("qos tbl not defined\n");
 		return;
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index a7e7b4a..b16e0c7 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SDE_VBIF_H__
@@ -50,14 +50,14 @@
  * @xin_id: client interface identifier
  * @clk_ctrl: clock control identifier of the xin
  * @num: pipe identifier (debug only)
- * @is_rt: true if pipe is used in real-time use case
+ * @client_type: client type enumerated by sde_vbif_client_type
  */
 struct sde_vbif_set_qos_params {
 	u32 vbif_idx;
 	u32 xin_id;
 	u32 clk_ctrl;
 	u32 num;
-	bool is_rt;
+	enum sde_vbif_client_type client_type;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 9a64dd9..ec019fb 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -4413,7 +4413,7 @@
 
 		snprintf(debug_name, sizeof(debug_name), "%s_reg",
 				blk_base->name);
-		debugfs_create_file(debug_name, 0600, debugfs_root, blk_base,
+		debugfs_create_file(debug_name, 0400, debugfs_root, blk_base,
 				&sde_reg_fops);
 	}
 
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
index 03178ca..5c1dc4a 100644
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012, 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SDE_HDCP_H__
@@ -13,12 +13,15 @@
 #include <linux/debugfs.h>
 #include <linux/of_device.h>
 #include <linux/i2c.h>
+#include <linux/list.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <linux/hdcp_qseecom.h>
 #include "sde_kms.h"
 
+#define MAX_STREAM_COUNT 2
+
 enum sde_hdcp_client_id {
 	HDCP_CLIENT_HDMI,
 	HDCP_CLIENT_DP,
@@ -38,6 +41,18 @@
 	HDCP_VERSION_MAX = BIT(2),
 };
 
+struct stream_info {
+	u8 stream_id;
+	u8 virtual_channel;
+};
+
+struct sde_hdcp_stream {
+	struct list_head list;
+	u8 stream_id;
+	u8 virtual_channel;
+	u32 stream_handle;
+};
+
 struct sde_hdcp_init_data {
 	struct device *msm_hdcp_dev;
 	struct dss_io_data *core_io;
@@ -67,7 +82,13 @@
 	bool (*feature_supported)(void *input);
 	void (*force_encryption)(void *input, bool enable);
 	bool (*sink_support)(void *input);
+	int (*set_mode)(void *input, bool mst_enabled);
+	int (*on)(void *input);
 	void (*off)(void *hdcp_ctrl);
+	int (*register_streams)(void *input, u8 num_streams,
+			struct stream_info *streams);
+	int (*deregister_streams)(void *input, u8 num_streams,
+			struct stream_info *streams);
 };
 
 static inline const char *sde_hdcp_state_name(enum sde_hdcp_state hdcp_state)
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c
index 54dfc8f..f578e09 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.c
@@ -64,6 +64,10 @@
 	atomic_t hdcp_off;
 	enum sde_hdcp_2x_device_type device_type;
 	u8 min_enc_level;
+	struct list_head stream_handles;
+	u8 stream_count;
+	struct stream_info *streams;
+	u8 num_streams;
 
 	struct task_struct *thread;
 	struct completion response_completion;
@@ -315,6 +319,8 @@
 
 static void sde_hdcp_2x_clean(struct sde_hdcp_2x_ctrl *hdcp)
 {
+	struct list_head *element;
+	struct sde_hdcp_stream *stream_entry;
 	struct hdcp_transport_wakeup_data cdata = {HDCP_TRANSPORT_CMD_INVALID};
 
 	hdcp->authenticated = false;
@@ -322,10 +328,20 @@
 	cdata.context = hdcp->client_data;
 	cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_FAILED;
 
-	if (!atomic_read(&hdcp->hdcp_off))
-		sde_hdcp_2x_wakeup_client(hdcp, &cdata);
+	while (!list_empty(&hdcp->stream_handles)) {
+		element = hdcp->stream_handles.next;
+		list_del(element);
 
-	atomic_set(&hdcp->hdcp_off, 1);
+		stream_entry = list_entry(element, struct sde_hdcp_stream,
+			list);
+		hdcp2_close_stream(hdcp->hdcp2_ctx,
+			stream_entry->stream_handle);
+		kzfree(stream_entry);
+		hdcp->stream_count--;
+	}
+
+	if (!atomic_xchg(&hdcp->hdcp_off, 1))
+		sde_hdcp_2x_wakeup_client(hdcp, &cdata);
 
 	hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_STOP, &hdcp->app_data);
 }
@@ -333,20 +349,17 @@
 static u8 sde_hdcp_2x_stream_type(u8 min_enc_level)
 {
 	u8 stream_type = 0;
-	u8 const hdcp_min_enc_level_0 = 0, hdcp_min_enc_level_1 = 1,
-	   hdcp_min_enc_level_2 = 2;
-	u8 const stream_type_0 = 0, stream_type_1 = 1;
 
 	switch (min_enc_level) {
-	case hdcp_min_enc_level_0:
-	case hdcp_min_enc_level_1:
-		stream_type = stream_type_0;
+	case 0:
+	case 1:
+		stream_type = 0;
 		break;
-	case hdcp_min_enc_level_2:
-		stream_type = stream_type_1;
+	case 2:
+		stream_type = 1;
 		break;
 	default:
-		stream_type = stream_type_0;
+		stream_type = 0;
 		break;
 	}
 
@@ -480,19 +493,26 @@
 static void sde_hdcp_2x_init(struct sde_hdcp_2x_ctrl *hdcp)
 {
 	int rc;
-
 	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START, &hdcp->app_data);
 	if (rc)
-		goto exit;
+		sde_hdcp_2x_clean(hdcp);
+}
 
-	pr_debug("[tz]: %s\n", sde_hdcp_2x_message_name(
-		hdcp->app_data.response.data[0]));
+static void sde_hdcp_2x_start_auth(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	int rc;
+
+	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START_AUTH,
+		&hdcp->app_data);
+	if (rc) {
+		sde_hdcp_2x_clean(hdcp);
+		return;
+	}
+
+	pr_debug("message received from TZ: %s\n",
+		 sde_hdcp_2x_message_name(hdcp->app_data.response.data[0]));
 
 	sde_hdcp_2x_send_message(hdcp);
-
-	return;
-exit:
-	sde_hdcp_2x_clean(hdcp);
 }
 
 static void sde_hdcp_2x_timeout(struct sde_hdcp_2x_ctrl *hdcp)
@@ -542,7 +562,8 @@
 		goto exit;
 	}
 
-	if (hdcp->device_type == HDCP_TXMTR_DP) {
+	if (hdcp->device_type == HDCP_TXMTR_DP ||
+			hdcp->device_type == HDCP_TXMTR_DP_MST) {
 		msg[0] = hdcp->last_msg;
 		message_id_bytes = 1;
 	}
@@ -628,6 +649,147 @@
 		sde_hdcp_2x_clean(hdcp);
 }
 
+static struct list_head *sde_hdcp_2x_stream_present(
+		struct sde_hdcp_2x_ctrl *hdcp, u8 stream_id, u8 virtual_channel)
+{
+	struct sde_hdcp_stream *stream_entry;
+	struct list_head *entry;
+	bool present = false;
+
+	list_for_each(entry, &hdcp->stream_handles) {
+		stream_entry = list_entry(entry,
+			struct sde_hdcp_stream, list);
+		if (stream_entry->virtual_channel == virtual_channel &&
+				stream_entry->stream_id == stream_id) {
+			present = true;
+			break;
+		}
+	}
+
+	if (!present)
+		entry = NULL;
+	return entry;
+}
+
+static void sde_hdcp_2x_open_stream(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	int rc;
+	size_t iterations, i;
+	u8 stream_id;
+	u8 virtual_channel;
+	u32 stream_handle = 0;
+	bool query_streams = false;
+
+	if (!hdcp->streams) {
+		pr_err("Array of streams to register is NULL\n");
+		return;
+	}
+
+	iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
+
+	for (i  = 0; i < iterations; i++) {
+		if (hdcp->stream_count == MAX_STREAM_COUNT) {
+			pr_debug("Registered the maximum amount of streams\n");
+			break;
+		}
+
+		stream_id = hdcp->streams[i].stream_id;
+		virtual_channel = hdcp->streams[i].virtual_channel;
+
+		pr_debug("Opening stream %d, virtual channel %d\n",
+			stream_id, virtual_channel);
+
+		if (sde_hdcp_2x_stream_present(hdcp, stream_id,
+				virtual_channel)) {
+			pr_debug("Stream %d, virtual channel %d already open\n",
+				stream_id, virtual_channel);
+			continue;
+		}
+
+		rc = hdcp2_open_stream(hdcp->hdcp2_ctx, virtual_channel,
+				stream_id, &stream_handle);
+		if (rc) {
+			pr_err("Unable to open stream %d, virtual channel %d\n",
+				stream_id, virtual_channel);
+		} else {
+			struct sde_hdcp_stream *stream =
+				kzalloc(sizeof(struct sde_hdcp_stream),
+					GFP_KERNEL);
+			if (!stream)
+				break;
+
+			INIT_LIST_HEAD(&stream->list);
+			stream->stream_handle = stream_handle;
+			stream->stream_id = stream_id;
+			stream->virtual_channel = virtual_channel;
+
+			list_add(&stream->list, &hdcp->stream_handles);
+			hdcp->stream_count++;
+
+			query_streams = true;
+		}
+	}
+
+	if (query_streams && hdcp->authenticated)
+		sde_hdcp_2x_query_stream(hdcp);
+}
+
+static void sde_hdcp_2x_close_stream(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	int rc;
+	size_t iterations, i;
+	u8 stream_id;
+	u8 virtual_channel;
+	struct list_head *entry;
+	struct sde_hdcp_stream *stream_entry;
+	bool query_streams = false;
+
+	if (!hdcp->streams) {
+		pr_err("Array of streams to register is NULL\n");
+		return;
+	}
+
+	iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
+
+	for (i = 0; i < iterations; i++) {
+		if (hdcp->stream_count == 0) {
+			pr_debug("No streams are currently registered\n");
+			return;
+		}
+
+		stream_id = hdcp->streams[i].stream_id;
+		virtual_channel = hdcp->streams[i].virtual_channel;
+
+		pr_debug("Closing stream %d, virtual channel %d\n",
+			stream_id, virtual_channel);
+
+		entry = sde_hdcp_2x_stream_present(hdcp, stream_id,
+			virtual_channel);
+
+		if (!entry) {
+			pr_err("Unable to find stream %d, virtual channel %d\n"
+				, stream_id, virtual_channel);
+			continue;
+		}
+
+		stream_entry = list_entry(entry, struct sde_hdcp_stream,
+			list);
+
+		rc = hdcp2_close_stream(hdcp->hdcp2_ctx,
+			stream_entry->stream_handle);
+		if (rc)
+			pr_err("Unable to close stream %d, virtual channel %d\n"
+				, stream_id, virtual_channel);
+		hdcp->stream_count--;
+		list_del(entry);
+		kzfree(stream_entry);
+		query_streams = true;
+	}
+
+	if (query_streams && hdcp->authenticated)
+		sde_hdcp_2x_query_stream(hdcp);
+}
+
 /** sde_hdcp_2x_wakeup() - wakeup the module to execute a requested command
  * @data: data required for executing corresponding command.
  *
@@ -651,6 +813,8 @@
 	hdcp->timeout_left = data->timeout;
 	hdcp->total_message_length = data->total_message_length;
 	hdcp->min_enc_level = data->min_enc_level;
+	hdcp->streams = data->streams;
+	hdcp->num_streams = data->num_streams;
 
 	if (!completion_done(&hdcp->response_completion))
 		complete_all(&hdcp->response_completion);
@@ -712,6 +876,9 @@
 		case HDCP_2X_CMD_STOP:
 			sde_hdcp_2x_clean(hdcp);
 			break;
+		case HDCP_2X_CMD_START_AUTH:
+			sde_hdcp_2x_start_auth(hdcp);
+			break;
 		case HDCP_2X_CMD_MSG_SEND_SUCCESS:
 			sde_hdcp_2x_msg_sent(hdcp);
 			break;
@@ -736,6 +903,12 @@
 			}
 			sde_hdcp_2x_query_stream(hdcp);
 			break;
+		case HDCP_2X_CMD_OPEN_STREAMS:
+			sde_hdcp_2x_open_stream(hdcp);
+			break;
+		case HDCP_2X_CMD_CLOSE_STREAMS:
+			sde_hdcp_2x_close_stream(hdcp);
+			break;
 		default:
 			break;
 		}
@@ -780,16 +953,14 @@
 		goto unlock;
 	}
 
+	INIT_LIST_HEAD(&hdcp->stream_handles);
 	hdcp->client_data = data->client_data;
 	hdcp->client_ops = data->client_ops;
-	hdcp->device_type = data->device_type;
-
-	hdcp->hdcp2_ctx = hdcp2_init(hdcp->device_type);
 
 	INIT_KFIFO(hdcp->cmd_q);
 
 	init_waitqueue_head(&hdcp->wait_q);
-	atomic_set(&hdcp->hdcp_off, 0);
+	atomic_set(&hdcp->hdcp_off, 1);
 
 	init_completion(&hdcp->response_completion);
 
@@ -814,6 +985,40 @@
 	return rc;
 }
 
+int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type)
+{
+	int rc =  0;
+	struct sde_hdcp_2x_ctrl *hdcp = data;
+
+	if (!hdcp)
+		return  -EINVAL;
+
+	if (hdcp->hdcp2_ctx) {
+		pr_debug("HDCP library context already acquired\n");
+		return 0;
+	}
+
+	hdcp->device_type = device_type;
+	hdcp->hdcp2_ctx = hdcp2_init(hdcp->device_type);
+	if (!hdcp->hdcp2_ctx) {
+		pr_err("Unable to acquire HDCP library handle\n");
+		return -ENOMEM;
+	}
+
+	return rc;
+}
+
+void sde_hdcp_2x_disable(void *data)
+{
+	struct sde_hdcp_2x_ctrl *hdcp = data;
+
+	if (!hdcp->hdcp2_ctx)
+		return;
+
+	hdcp2_deinit(hdcp->hdcp2_ctx);
+	hdcp->hdcp2_ctx = NULL;
+}
+
 void sde_hdcp_2x_deregister(void *data)
 {
 	struct sde_hdcp_2x_ctrl *hdcp = data;
@@ -821,7 +1026,7 @@
 	if (!hdcp)
 		return;
 
+	sde_hdcp_2x_disable(data);
 	kthread_stop(hdcp->thread);
-	hdcp2_deinit(hdcp->hdcp2_ctx);
 	kzfree(hdcp);
 }
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.h b/drivers/gpu/drm/msm/sde_hdcp_2x.h
index 47247e4..cfcd7ce 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.h
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.h
@@ -15,8 +15,9 @@
 /**
  * enum sde_hdcp_2x_wakeup_cmd - commands for interacting with HDCP driver
  * @HDCP_2X_CMD_INVALID:           initialization value
- * @HDCP_2X_CMD_START:             start authentication
- * @HDCP_2X_CMD_STOP:              stop authentication
+ * @HDCP_2X_CMD_START:             start HDCP driver
+ * @HDCP_2X_CMD_START_AUTH:        start authentication
+ * @HDCP_2X_CMD_STOP:              stop HDCP driver
  * @HDCP_2X_CMD_MSG_SEND_SUCCESS:  sending message to sink succeeded
  * @HDCP_2X_CMD_MSG_SEND_FAILED:   sending message to sink failed
  * @HDCP_2X_CMD_MSG_SEND_TIMEOUT:  sending message to sink timed out
@@ -26,10 +27,13 @@
  * @HDCP_2X_CMD_QUERY_STREAM_TYPE: start content stream processing
  * @HDCP_2X_CMD_LINK_FAILED:       link failure notification
  * @HDCP_2X_CMD_MIN_ENC_LEVEL:     trigger minimum encryption level change
+ * @HDCP_2X_CMD_OPEN_STREAMS:       open a virtual channel
+ * @HDCP_2X_CMD_CLOSE_STREAMS:      close a virtual channel
  */
 enum sde_hdcp_2x_wakeup_cmd {
 	HDCP_2X_CMD_INVALID,
 	HDCP_2X_CMD_START,
+	HDCP_2X_CMD_START_AUTH,
 	HDCP_2X_CMD_STOP,
 	HDCP_2X_CMD_MSG_SEND_SUCCESS,
 	HDCP_2X_CMD_MSG_SEND_FAILED,
@@ -40,6 +44,8 @@
 	HDCP_2X_CMD_QUERY_STREAM_TYPE,
 	HDCP_2X_CMD_LINK_FAILED,
 	HDCP_2X_CMD_MIN_ENC_LEVEL,
+	HDCP_2X_CMD_OPEN_STREAMS,
+	HDCP_2X_CMD_CLOSE_STREAMS,
 };
 
 /**
@@ -66,16 +72,19 @@
 
 enum sde_hdcp_2x_device_type {
 	HDCP_TXMTR_HDMI = 0x8001,
-	HDCP_TXMTR_DP = 0x8002
+	HDCP_TXMTR_DP = 0x8002,
+	HDCP_TXMTR_DP_MST = 0x8003
 };
 
 /**
  * struct sde_hdcp_2x_lib_wakeup_data - command and data send to HDCP driver
- * @cmd:       command type
- * @context:   void pointer to the HDCP driver instance
- * @buf:       message received from the sink
- * @buf_len:   length of message received from the sink
- * @timeout:   time out value for timed transactions
+ * @cmd:                       command type
+ * @context:                   void pointer to the HDCP driver instance
+ * @buf:                       message received from the sink
+ * @buf_len:                   length of message received from the sink
+ * @timeout:                   time out value for timed transactions
+ * @streams:                   list indicating which streams need adjustment
+ * @num_streams:               number of entries in streams
  */
 struct sde_hdcp_2x_wakeup_data {
 	enum sde_hdcp_2x_wakeup_cmd cmd;
@@ -83,6 +92,8 @@
 	uint32_t total_message_length;
 	uint32_t timeout;
 	u8 min_enc_level;
+	struct stream_info *streams;
+	u8 num_streams;
 };
 
 /**
@@ -151,6 +162,10 @@
 		return TO_STR(HDCP_2X_CMD_MSG_RECV_TIMEOUT);
 	case HDCP_2X_CMD_QUERY_STREAM_TYPE:
 		return TO_STR(HDCP_2X_CMD_QUERY_STREAM_TYPE);
+	case HDCP_2X_CMD_OPEN_STREAMS:
+		return TO_STR(HDCP_2X_CMD_OPEN_STREAMS);
+	case HDCP_2X_CMD_CLOSE_STREAMS:
+		return TO_STR(HDCP_2X_CMD_CLOSE_STREAMS);
 	default:
 		return "UNKNOWN";
 	}
@@ -190,12 +205,13 @@
 struct sde_hdcp_2x_register_data {
 	struct hdcp_transport_ops *client_ops;
 	struct sde_hdcp_2x_ops *ops;
-	enum sde_hdcp_2x_device_type device_type;
 	void *client_data;
 	void **hdcp_data;
 };
 
 /* functions for the HDCP 2.2 state machine module */
 int sde_hdcp_2x_register(struct sde_hdcp_2x_register_data *data);
+int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type);
+void sde_hdcp_2x_disable(void *data);
 void sde_hdcp_2x_deregister(void *data);
 #endif
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index cada0fb..d14441c 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _SDE_POWER_HANDLE_H_
@@ -14,8 +14,8 @@
 #define SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA	0
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA	1800000000
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	1800000000
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA	3000000000
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	3000000000
 
 #include <linux/sde_io_util.h>
 #include <soc/qcom/cx_ipeak.h>
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index f5b674c..cf218c6 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -30,7 +30,14 @@
 
 #define RSC_MODE_INSTRUCTION_TIME	100
 #define RSC_MODE_THRESHOLD_OVERHEAD	2700
-#define MAX_MODE_0_ENTRY_EXIT_TIME	100
+
+/**
+ * rsc_min_threshold will be set to MIN_THRESHOLD_OVERHEAD_TIME which
+ * takes into account back off time + overhead from RSC/RSC_WRAPPER. The
+ * overhead buffer time is required to be greater than 14. For measure,
+ * this value assumes 18.
+ */
+#define MIN_THRESHOLD_OVERHEAD_TIME	18
 
 #define DEFAULT_PANEL_FPS		60
 #define DEFAULT_PANEL_JITTER_NUMERATOR	2
@@ -87,7 +94,7 @@
 		pr_err("invalid rsc index\n");
 		return ERR_PTR(-EINVAL);
 	} else if (!rsc_prv_list[rsc_index]) {
-		pr_err("rsc not probed yet or not available\n");
+		pr_debug("rsc not probed yet or not available\n");
 		return NULL;
 	}
 
@@ -243,7 +250,7 @@
 		pr_err("invalid rsc index:%d\n", rsc_index);
 		return false;
 	} else if (!rsc_prv_list[rsc_index]) {
-		pr_err("rsc idx:%d not probed yet or not available\n",
+		pr_debug("rsc idx:%d not probed yet or not available\n",
 								rsc_index);
 		return false;
 	}
@@ -329,7 +336,7 @@
 }
 
 static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
-	struct sde_rsc_cmd_config *cmd_config)
+	struct sde_rsc_cmd_config *cmd_config, enum sde_rsc_state state)
 {
 	const u32 cxo_period_ns = 52;
 	u64 rsc_backoff_time_ns = rsc->backoff_time_ns;
@@ -380,7 +387,12 @@
 	line_time_ns = div_u64(line_time_ns, rsc->cmd_config.vtotal);
 	prefill_time_ns = line_time_ns * rsc->cmd_config.prefill_lines;
 
-	total = frame_time_ns - frame_jitter - prefill_time_ns;
+	/* only take jitter into account for CMD mode */
+	if (state == SDE_RSC_CMD_STATE)
+		total = frame_time_ns - frame_jitter - prefill_time_ns;
+	else
+		total = frame_time_ns - prefill_time_ns;
+
 	if (total < 0) {
 		pr_err("invalid total time period time:%llu jiter_time:%llu blanking time:%llu\n",
 			frame_time_ns, frame_jitter, prefill_time_ns);
@@ -421,9 +433,9 @@
 	/* mode 2 is infinite */
 	rsc->timer_config.rsc_time_slot_2_ns = 0xFFFFFFFF;
 
-	rsc->timer_config.min_threshold_time_ns = MAX_MODE_0_ENTRY_EXIT_TIME;
+	rsc->timer_config.min_threshold_time_ns = MIN_THRESHOLD_OVERHEAD_TIME;
 	rsc->timer_config.bwi_threshold_time_ns =
-		rsc->single_tcs_execution_time;
+		rsc->timer_config.rsc_time_slot_0_ns;
 
 	/* timer update should be called with client call */
 	if (cmd_config && rsc->hw_ops.timer_update) {
@@ -461,7 +473,7 @@
 
 	/* update timers - might not be available at next switch */
 	if (config)
-		sde_rsc_timer_calculate(rsc, config);
+		sde_rsc_timer_calculate(rsc, config, SDE_RSC_CMD_STATE);
 
 	/**
 	 * rsc clients can still send config at any time. If a config is
@@ -601,7 +613,7 @@
 
 	/* update timers - might not be available at next switch */
 	if (config)
-		sde_rsc_timer_calculate(rsc, config);
+		sde_rsc_timer_calculate(rsc, config, SDE_RSC_VID_STATE);
 
 	/**
 	 * rsc clients can still send config at any time. If a config is
@@ -1458,14 +1470,19 @@
 	else
 		rsc->single_tcs_execution_time = SINGLE_TCS_EXECUTION_TIME_V1;
 
-	rsc->backoff_time_ns = rsc->single_tcs_execution_time
+	if (rsc->version == SDE_RSC_REV_3) {
+		rsc->time_slot_0_ns = rsc->single_tcs_execution_time
 					+ RSC_MODE_INSTRUCTION_TIME;
-
-	rsc->mode_threshold_time_ns = rsc->backoff_time_ns
-					+ RSC_MODE_THRESHOLD_OVERHEAD;
-
-	rsc->time_slot_0_ns = (rsc->single_tcs_execution_time * 2)
+		rsc->backoff_time_ns = RSC_MODE_INSTRUCTION_TIME;
+		rsc->mode_threshold_time_ns = rsc->time_slot_0_ns;
+	} else {
+		rsc->time_slot_0_ns = (rsc->single_tcs_execution_time * 2)
 					+ RSC_MODE_INSTRUCTION_TIME;
+		rsc->backoff_time_ns = rsc->single_tcs_execution_time
+						+ RSC_MODE_INSTRUCTION_TIME;
+		rsc->mode_threshold_time_ns = rsc->backoff_time_ns
+						+ RSC_MODE_THRESHOLD_OVERHEAD;
+	}
 
 	ret = sde_power_resource_init(pdev, &rsc->phandle);
 	if (ret) {
@@ -1536,7 +1553,7 @@
 		goto sde_rsc_fail;
 	}
 
-	if (sde_rsc_timer_calculate(rsc, NULL))
+	if (sde_rsc_timer_calculate(rsc, NULL, SDE_RSC_IDLE_STATE))
 		goto sde_rsc_fail;
 
 	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 7cea16f..e2c3e9e 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[sde_rsc_hw:%s:%d]: " fmt, __func__, __LINE__
@@ -293,7 +293,7 @@
 	return 0;
 }
 
-int rsc_hw_timer_update(struct sde_rsc_priv *rsc)
+static int rsc_hw_timer_update(struct sde_rsc_priv *rsc)
 {
 	if (!rsc) {
 		pr_debug("invalid input param\n");
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.h b/drivers/gpu/drm/msm/sde_rsc_hw.h
index 7a4fc2f..9540fc5 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.h
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _SDE_RSC_HW_H_
@@ -102,8 +102,6 @@
 
 bool rsc_hw_is_amc_mode(struct sde_rsc_priv *rsc);
 
-int rsc_hw_timer_update(struct sde_rsc_priv *rsc);
-
 void rsc_hw_debug_dump(struct sde_rsc_priv *rsc, u32 mux_sel);
 
 int sde_rsc_debug_show(struct seq_file *s, struct sde_rsc_priv *rsc);
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw_v3.c b/drivers/gpu/drm/msm/sde_rsc_hw_v3.c
index fb0c0e1..77b931e 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw_v3.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw_v3.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[sde_rsc_hw:%s:%d]: " fmt, __func__, __LINE__
@@ -205,7 +205,8 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE1,
 					0x80000000, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1,
-			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
+			rsc->timer_config.rsc_backoff_time_ns * 2,
+			rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1,
 			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
 
@@ -327,7 +328,15 @@
 	if (rsc->power_collapse_block)
 		return -EINVAL;
 
-	dss_reg_w(&rsc->wrapper_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
+	if (rsc->sw_fs_enabled) {
+		rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
+		if (rc) {
+			pr_err("vdd reg fast mode set failed rc:%d\n", rc);
+			return rc;
+		}
+	}
+
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
 						0x7, rsc->debug_mode);
 
 	for (i = 0; i <= MAX_MODE2_ENTRY_TRY; i++) {
@@ -407,10 +416,15 @@
 	case SDE_RSC_VID_STATE:
 		pr_debug("video mode handling\n");
 
+		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+							0x0, rsc->debug_mode);
+		wmb(); /* disable double buffer config before vsync select */
+
 		ctrl2_config = (rsc->vsync_source & 0x7) << 4;
 		ctrl2_config |= (BIT(0) | BIT(1) | BIT(3));
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL2,
 				ctrl2_config, rsc->debug_mode);
+		wmb(); /* select vsync before double buffer config enabled */
 
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
 						0x1, rsc->debug_mode);
@@ -511,7 +525,7 @@
 						0x1, rsc->debug_mode);
 
 	bw_ack = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_CTRL2,
-			rsc->debug_mode) & BIT(13);
+			rsc->debug_mode) & BIT(14);
 
 	/* check for sequence running status before exiting */
 	for (count = MAX_CHECK_LOOPS; count > 0 && !bw_ack; count--) {
@@ -520,7 +534,7 @@
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_BW_INDICATION,
 						bw_indication, rsc->debug_mode);
 		bw_ack = dss_reg_r(&rsc->wrapper_io,
-		       SDE_RSCC_WRAPPER_DEBUG_CTRL2, rsc->debug_mode) & BIT(13);
+		       SDE_RSCC_WRAPPER_DEBUG_CTRL2, rsc->debug_mode) & BIT(14);
 	}
 
 	if (!bw_ack)
@@ -529,6 +543,49 @@
 	return rc;
 }
 
+static int rsc_hw_timer_update_v3(struct sde_rsc_priv *rsc)
+{
+	if (!rsc) {
+		pr_debug("invalid input param\n");
+		return -EINVAL;
+	}
+
+	pr_debug("rsc hw timer update\n");
+
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
+		rsc->timer_config.rsc_time_slot_0_ns, rsc->debug_mode);
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
+		rsc->timer_config.rsc_time_slot_1_ns, rsc->debug_mode);
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_3_DRV0,
+		rsc->timer_config.rsc_time_slot_2_ns, rsc->debug_mode);
+
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0,
+			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0,
+			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
+
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1,
+			rsc->timer_config.rsc_backoff_time_ns * 2,
+			rsc->debug_mode);
+
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1,
+			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
+
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2,
+			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
+
+	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_STATIC_WAKEUP_0,
+		rsc->timer_config.static_wakeup_time_ns, rsc->debug_mode);
+
+	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_RSCC_MODE_THRESHOLD,
+		rsc->timer_config.rsc_mode_threshold_time_ns, rsc->debug_mode);
+
+	/* make sure that hw timers are updated */
+	wmb();
+
+	return 0;
+}
+
 int sde_rsc_hw_register_v3(struct sde_rsc_priv *rsc)
 {
 	pr_debug("rsc hardware register v3\n");
@@ -536,10 +593,10 @@
 	rsc->hw_ops.init = rsc_hw_init_v3;
 	rsc->hw_ops.state_update = sde_rsc_state_update_v3;
 	rsc->hw_ops.bwi_status = rsc_hw_bwi_status_v3;
+	rsc->hw_ops.timer_update = rsc_hw_timer_update_v3;
 
 	rsc->hw_ops.tcs_wait = rsc_hw_tcs_wait;
 	rsc->hw_ops.tcs_use_ok = rsc_hw_tcs_use_ok;
-	rsc->hw_ops.timer_update = rsc_hw_timer_update;
 	rsc->hw_ops.is_amc_mode = rsc_hw_is_amc_mode;
 	rsc->hw_ops.hw_vsync = rsc_hw_vsync;
 	rsc->hw_ops.debug_show = sde_rsc_debug_show;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 2abcd7b..f889d41 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1224,8 +1224,16 @@
 static void
 nv50_mstm_init(struct nv50_mstm *mstm)
 {
-	if (mstm && mstm->mgr.mst_state)
-		drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+	int ret;
+
+	if (!mstm || !mstm->mgr.mst_state)
+		return;
+
+	ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+	if (ret == -1) {
+		drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+		drm_kms_helper_hotplug_event(mstm->mgr.dev);
+	}
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 816ccae..8675613 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -22,6 +22,7 @@
 #include <engine/falcon.h>
 
 #include <core/gpuobj.h>
+#include <subdev/mc.h>
 #include <subdev/timer.h>
 #include <engine/fifo.h>
 
@@ -107,8 +108,10 @@
 		}
 	}
 
-	nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
-	nvkm_wr32(device, base + 0x014, 0xffffffff);
+	if (nvkm_mc_enabled(device, engine->subdev.index)) {
+		nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
+		nvkm_wr32(device, base + 0x014, 0xffffffff);
+	}
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 3695cde..07914e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -132,11 +132,12 @@
 			duty = nvkm_therm_update_linear(therm);
 			break;
 		case NVBIOS_THERM_FAN_OTHER:
-			if (therm->cstate)
+			if (therm->cstate) {
 				duty = therm->cstate;
-			else
+				poll = false;
+			} else {
 				duty = nvkm_therm_update_linear_fallback(therm);
-			poll = false;
+			}
 			break;
 		}
 		immd = false;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d587779..a97294a 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,7 +5676,7 @@
 	u16 data_offset, size;
 	u8 frev, crev;
 	struct ci_power_info *pi;
-	enum pci_bus_speed speed_cap;
+	enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
 	struct pci_dev *root = rdev->pdev->bus->self;
 	int ret;
 
@@ -5685,7 +5685,8 @@
 		return -ENOMEM;
 	rdev->pm.dpm.priv = pi;
 
-	speed_cap = pcie_get_speed_cap(root);
+	if (!pci_is_root_bus(rdev->pdev->bus))
+		speed_cap = pcie_get_speed_cap(root);
 	if (speed_cap == PCI_SPEED_UNKNOWN) {
 		pi->sys_pcie_mask = 0;
 	} else {
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dec1e08..6a8fb6f 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -172,6 +172,7 @@
 	}
 
 	if (radeon_is_px(dev)) {
+		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
 		pm_runtime_use_autosuspend(dev->dev);
 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
 		pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8fb60b3..0a785ef 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,7 +6899,7 @@
 	struct ni_power_info *ni_pi;
 	struct si_power_info *si_pi;
 	struct atom_clock_dividers dividers;
-	enum pci_bus_speed speed_cap;
+	enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
 	struct pci_dev *root = rdev->pdev->bus->self;
 	int ret;
 
@@ -6911,7 +6911,8 @@
 	eg_pi = &ni_pi->eg;
 	pi = &eg_pi->rv7xx;
 
-	speed_cap = pcie_get_speed_cap(root);
+	if (!pci_is_root_bus(rdev->pdev->bus))
+		speed_cap = pcie_get_speed_cap(root);
 	if (speed_cap == PCI_SPEED_UNKNOWN) {
 		si_pi->sys_pcie_mask = 0;
 	} else {
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 3105965..5a48548 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -147,7 +147,7 @@
 }
 
 static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
-				       u8 *buff, u8 buff_size)
+				       u8 *buff, u16 buff_size)
 {
 	u32 i;
 	int ret;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index 79d00d8..01ff3c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -189,12 +189,14 @@
 int rockchip_drm_psr_register(struct drm_encoder *encoder,
 			int (*psr_set)(struct drm_encoder *, bool enable))
 {
-	struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+	struct rockchip_drm_private *drm_drv;
 	struct psr_drv *psr;
 
 	if (!encoder || !psr_set)
 		return -EINVAL;
 
+	drm_drv = encoder->dev->dev_private;
+
 	psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
 	if (!psr)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index d7950b5..e30b1f5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -717,17 +717,18 @@
 		remote = of_graph_get_remote_port_parent(ep);
 		if (!remote)
 			continue;
+		of_node_put(remote);
 
 		/* does this node match any registered engines? */
 		list_for_each_entry(frontend, &drv->frontend_list, list) {
 			if (remote == frontend->node) {
-				of_node_put(remote);
 				of_node_put(port);
+				of_node_put(ep);
 				return frontend;
 			}
 		}
 	}
-
+	of_node_put(port);
 	return ERR_PTR(-EINVAL);
 }
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 061d2e0..416da53 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -92,6 +92,8 @@
 	val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
 	val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
 	writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
+
+	clk_disable_unprepare(hdmi->tmds_clk);
 }
 
 static void sun4i_hdmi_enable(struct drm_encoder *encoder)
@@ -102,6 +104,8 @@
 
 	DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
 
+	clk_prepare_enable(hdmi->tmds_clk);
+
 	sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
 	val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
 	val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 3fb084f..8c31c9a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -672,6 +672,7 @@
 			return PTR_ERR(tcon->sclk0);
 		}
 	}
+	clk_prepare_enable(tcon->sclk0);
 
 	if (tcon->quirks->has_channel_1) {
 		tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@@ -686,6 +687,7 @@
 
 static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
 {
+	clk_disable_unprepare(tcon->sclk0);
 	clk_disable_unprepare(tcon->clk);
 }
 
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index d5240b7..adcdf94 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -169,6 +169,13 @@
 	}
 
 	/*
+	 * At least on H6, some registers have some bits set by default
+	 * which may cause issues. Clear them here.
+	 */
+	writel(0, regs + TCON_TOP_PORT_SEL_REG);
+	writel(0, regs + TCON_TOP_GATE_SRC_REG);
+
+	/*
 	 * TCON TOP has two muxes, which select parent clock for each TCON TV
 	 * channel clock. Parent could be either TCON TV or TVE clock. For now
 	 * we leave this fixed to TCON TV, since TVE driver for R40 is not yet
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index f455f09..1b014d9 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -350,15 +350,10 @@
 	if (ret)
 		goto err;
 
-	ret = drm_vblank_init(dev, 1);
-	if (ret)
-		goto err_fb;
-
 	drm_kms_helper_poll_init(dev);
 
 	return 0;
-err_fb:
-	udl_fbdev_cleanup(dev);
+
 err:
 	if (udl->urbs.count)
 		udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 54d9651..a08766d 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -293,6 +293,7 @@
 	bo->resv = attach->dmabuf->resv;
 
 	bo->sgt = sgt;
+	obj->import_attach = attach;
 	v3d_bo_get_pages(bo);
 
 	v3d_mmu_insert_ptes(bo);
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 4db62c5..26470c7 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -71,10 +71,13 @@
 			   V3D_READ(v3d_hub_reg_defs[i].reg));
 	}
 
-	for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
-		seq_printf(m, "%s (0x%04x): 0x%08x\n",
-			   v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
-			   V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+	if (v3d->ver < 41) {
+		for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+			seq_printf(m, "%s (0x%04x): 0x%08x\n",
+				   v3d_gca_reg_defs[i].name,
+				   v3d_gca_reg_defs[i].reg,
+				   V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+		}
 	}
 
 	for (core = 0; core < v3d->cores; core++) {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index a3275fa..ab39315 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -315,13 +315,16 @@
 			vc4_get_scaling_mode(vc4_state->src_h[1],
 					     vc4_state->crtc_h);
 
-		/* YUV conversion requires that horizontal scaling be enabled,
-		 * even on a plane that's otherwise 1:1. Looks like only PPF
-		 * works in that case, so let's pick that one.
+		/* YUV conversion requires that horizontal scaling be enabled
+		 * on the UV plane even if vc4_get_scaling_mode() returned
+		 * VC4_SCALING_NONE (which can happen when the down-scaling
+		 * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
+		 * case.
 		 */
-		if (vc4_state->is_unity)
-			vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+		if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
+			vc4_state->x_scaling[1] = VC4_SCALING_PPF;
 	} else {
+		vc4_state->is_yuv = false;
 		vc4_state->x_scaling[1] = VC4_SCALING_NONE;
 		vc4_state->y_scaling[1] = VC4_SCALING_NONE;
 	}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 0e5620f..6887db8 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -471,31 +471,31 @@
 	if (!vgem_device)
 		return -ENOMEM;
 
-	ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
-	if (ret)
-		goto out_free;
-
 	vgem_device->platform =
 		platform_device_register_simple("vgem", -1, NULL, 0);
 	if (IS_ERR(vgem_device->platform)) {
 		ret = PTR_ERR(vgem_device->platform);
-		goto out_fini;
+		goto out_free;
 	}
 
 	dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
 				     DMA_BIT_MASK(64));
+	ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
+			   &vgem_device->platform->dev);
+	if (ret)
+		goto out_unregister;
 
 	/* Final step: expose the device/driver to userspace */
 	ret  = drm_dev_register(&vgem_device->drm, 0);
 	if (ret)
-		goto out_unregister;
+		goto out_fini;
 
 	return 0;
 
-out_unregister:
-	platform_device_unregister(vgem_device->platform);
 out_fini:
 	drm_dev_fini(&vgem_device->drm);
+out_unregister:
+	platform_device_unregister(vgem_device->platform);
 out_free:
 	kfree(vgem_device);
 	return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 875fca6..1ea2dd3 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 #include "vkms_drv.h"
 #include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 6e728b8..b1201c1 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -1,9 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 #include <linux/module.h>
 #include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 07be29f..e018752 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
 #ifndef _VKMS_DRV_H_
 #define _VKMS_DRV_H_
 
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index c7e3836..ca4a74e 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 #include <linux/shmem_fs.h>
 
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 901012c..5697148 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 #include "vkms_drv.h"
 #include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 9f75b1e..ce043b7 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 #include "vkms_drv.h"
 #include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index bb6dbbe..c72b942 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -627,13 +627,16 @@
 static int vmw_dma_masks(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
+	int ret = 0;
 
-	if (intel_iommu_enabled &&
+	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
+	if (dev_priv->map_mode != vmw_dma_phys &&
 	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
 		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
-		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+		return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
 	}
-	return 0;
+
+	return ret;
 }
 #else
 static int vmw_dma_masks(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f0ab6b2..c3e2022 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3843,7 +3843,7 @@
 		*p_fence = NULL;
 	}
 
-	return 0;
+	return ret;
 }
 
 /**
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index f408196..91653ad 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -1524,7 +1524,7 @@
 EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
 
 /* Abort any active or pending conversions for this context */
-void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
 {
 	struct ipu_image_convert_chan *chan = ctx->chan;
 	struct ipu_image_convert_priv *priv = chan->priv;
@@ -1551,7 +1551,7 @@
 
 	need_abort = (run_count || active_run);
 
-	ctx->aborting = need_abort;
+	ctx->aborting = true;
 
 	spin_unlock_irqrestore(&chan->irqlock, flags);
 
@@ -1572,7 +1572,11 @@
 		dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
 		force_abort(ctx);
 	}
+}
 
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+{
+	__ipu_image_convert_abort(ctx);
 	ctx->aborting = false;
 }
 EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
@@ -1586,7 +1590,7 @@
 	bool put_res;
 
 	/* make sure no runs are hanging around */
-	ipu_image_convert_abort(ctx);
+	__ipu_image_convert_abort(ctx);
 
 	dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
 		chan->ic_task, ctx);
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 5c75a6a..3ddb7f2 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -58,6 +58,7 @@
 #define A6XX_CP_SQE_INSTR_BASE_LO        0x830
 #define A6XX_CP_SQE_INSTR_BASE_HI        0x831
 #define A6XX_CP_MISC_CNTL                0x840
+#define A6XX_CP_APRIV_CNTL               0X844
 #define A6XX_CP_ROQ_THRESHOLDS_1         0x8C1
 #define A6XX_CP_ROQ_THRESHOLDS_2         0x8C2
 #define A6XX_CP_MEM_POOL_SIZE            0x8C3
@@ -825,6 +826,7 @@
 
 #define A6XX_GBIF_PERF_PWR_CNT_EN         0x3cc0
 #define A6XX_GBIF_PERF_CNT_SEL            0x3cc2
+#define A6XX_GBIF_PERF_PWR_CNT_SEL        0x3cc3
 #define A6XX_GBIF_PERF_CNT_LOW0           0x3cc4
 #define A6XX_GBIF_PERF_CNT_LOW1           0x3cc5
 #define A6XX_GBIF_PERF_CNT_LOW2           0x3cc6
@@ -965,6 +967,7 @@
 #define A6XX_GMU_RPMH_HYST_CTRL			0x1F8E9
 #define A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE    0x1F8EC
 #define A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG      0x1F900
+#define A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP     0x1F901
 #define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE		0x1F9F0
 #define A6XX_GMU_LLM_GLM_SLEEP_CTRL		0x1F957
 #define A6XX_GMU_LLM_GLM_SLEEP_STATUS		0x1F958
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 4f6dda6..9cc556a 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -452,8 +452,10 @@
 		.major = 5,
 		.minor = 0,
 		.patchid = 0,
-		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU,
+		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
+			ADRENO_IOCOHERENT,
 		.sqefw_name = "a650_sqe.fw",
+		.zap_name = "a650_zap",
 		.gpudev = &adreno_a6xx_gpudev,
 		.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
 		.num_protected_regs = 0x30,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 182aff7..9aafd04 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/module.h>
 #include <linux/uaccess.h>
@@ -1229,6 +1229,24 @@
 		dev_warn(device->dev, "rscc ioremap failed\n");
 }
 
+static void adreno_isense_probe(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct resource *res;
+
+	res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
+			"isense_cntl");
+	if (res == NULL)
+		return;
+
+	adreno_dev->isense_base = res->start - device->reg_phys;
+	adreno_dev->isense_len = resource_size(res);
+	adreno_dev->isense_virt = devm_ioremap(device->dev, res->start,
+					adreno_dev->isense_len);
+	if (adreno_dev->isense_virt == NULL)
+		dev_warn(device->dev, "isense ioremap failed\n");
+}
+
 static void adreno_efuse_read_soc_hw_rev(struct adreno_device *adreno_dev)
 {
 	unsigned int val;
@@ -1355,6 +1373,8 @@
 	adreno_cx_misc_probe(device);
 
 	adreno_rscc_probe(device);
+
+	adreno_isense_probe(device);
 	/*
 	 * qcom,iommu-secure-id is used to identify MMUs that can handle secure
 	 * content but that is only part of the story - the GPU also has to be
@@ -2377,7 +2397,10 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	u64 vaddr;
 
-	vaddr = (ADRENO_GPUREV(adreno_dev) >= 500) ? ADRENO_UCHE_GMEM_BASE : 0;
+	if (ADRENO_GPUREV(adreno_dev) >= 500 && !(adreno_is_a650(adreno_dev)))
+		vaddr = ADRENO_UCHE_GMEM_BASE;
+	else
+		vaddr = 0;
 
 	return copy_prop(value, count, &vaddr, sizeof(vaddr));
 }
@@ -3244,6 +3267,25 @@
 	rmb();
 }
 
+void adreno_isense_regread(struct adreno_device *adreno_dev,
+	unsigned int offsetwords, unsigned int *value)
+{
+	unsigned int isense_offset;
+
+	isense_offset = (offsetwords << 2);
+	if (!adreno_dev->isense_virt ||
+		(isense_offset >= adreno_dev->isense_len))
+		return;
+
+	*value =  __raw_readl(adreno_dev->isense_virt + isense_offset);
+
+	/*
+	 * ensure this read finishes before the next one.
+	 * i.e. act like normal readl()
+	 */
+	rmb();
+}
+
 void adreno_cx_misc_regwrite(struct adreno_device *adreno_dev,
 	unsigned int offsetwords, unsigned int value)
 {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 45fba57..b276b3d 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -445,6 +445,9 @@
  * @rscc_base: Base physical address of the RSCC
  * @rscc_len: Length of the RSCC register block
  * @rscc_virt: Pointer where RSCC block is mapped
+ * @isense_base: Base physical address of isense block
+ * @isense_len: Length of the isense register block
+ * @isense_virt: Pointer where isense block is mapped
  * @gpucore: Pointer to the adreno_gpu_core structure
  * @pfp_fw: Buffer which holds the pfp ucode
  * @pfp_fw_size: Size of pfp ucode buffer
@@ -530,6 +533,9 @@
 	unsigned long rscc_base;
 	unsigned int rscc_len;
 	void __iomem *rscc_virt;
+	unsigned long isense_base;
+	unsigned int isense_len;
+	void __iomem *isense_virt;
 	const struct adreno_gpu_core *gpucore;
 	struct adreno_firmware fw[2];
 	size_t gpmu_cmds_size;
@@ -1191,6 +1197,8 @@
 		unsigned int mask, unsigned int bits);
 void adreno_rscc_regread(struct adreno_device *adreno_dev,
 		unsigned int offsetwords, unsigned int *value);
+void adreno_isense_regread(struct adreno_device *adreno_dev,
+		unsigned int offsetwords, unsigned int *value);
 
 
 #define ADRENO_TARGET(_name, _id) \
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index e76a875..a1b6e22 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -283,7 +283,7 @@
 	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
 	{A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
 	{A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
 	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
 	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
 	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
@@ -875,14 +875,21 @@
 	kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
 	kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
 
-	/* Program the GMEM VA range for the UCHE path */
-	kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
-				ADRENO_UCHE_GMEM_BASE);
-	kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
-	kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
-				ADRENO_UCHE_GMEM_BASE +
-				adreno_dev->gmem_size - 1);
-	kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
+	/*
+	 * Program the GMEM VA range for the UCHE path.
+	 * From Kona onwards the GMEM VA address is 0, and
+	 * UCHE_GMEM_RANGE registers are no longer used, so we don't
+	 * have to program them.
+	 */
+	if (!adreno_is_a650(adreno_dev)) {
+		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
+					ADRENO_UCHE_GMEM_BASE);
+		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
+		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
+					ADRENO_UCHE_GMEM_BASE +
+					adreno_dev->gmem_size - 1);
+		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
+	}
 
 	kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804);
 	kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4);
@@ -1287,6 +1294,15 @@
 	if (ret)
 		return ret;
 
+	/*
+	 * Set the RBPRIVLEVEL bit in this register to determine
+	 * the privilege level of ucode executing packets in the RB,
+	 * so we can come out of secure mode and CP does not drop
+	 * the packet.
+	 */
+	if (adreno_is_a650(adreno_dev))
+		kgsl_regwrite(device, A6XX_CP_APRIV_CNTL, (1 << 2));
+
 	/* Clear the SQE_HALT to start the CP engine */
 	kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
 
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index cc2b7aa..8e0d04e 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _ADRENO_A6XX_H_
@@ -14,6 +14,7 @@
 #define CP_CLUSTER_GRAS		0x3
 #define CP_CLUSTER_SP_PS	0x4
 #define CP_CLUSTER_PS		0x5
+#define CP_CLUSTER_VPC_PS	0x6
 
 /**
  * struct a6xx_cp_preemption_record - CP context record for
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index 5f308c3..2dc05a2 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -325,6 +325,10 @@
 
 	kgsl_regwrite(device, A6XX_GMU_CX_GMU_WFI_CONFIG, 0x0);
 
+	/* Set the log wptr index */
+	gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP,
+			gmu->log_wptr_retention);
+
 	/* Bring GMU out of reset */
 	gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
 	if (timed_poll_check(device,
@@ -423,6 +427,9 @@
 	/* Make sure M3 is in reset before going on */
 	wmb();
 
+	gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP,
+			&gmu->log_wptr_retention);
+
 	/* RSC sleep sequence is different on v1 */
 	if (adreno_is_a630v1(adreno_dev))
 		gmu_core_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 +
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 0b53b51..bbe6577 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/io.h>
@@ -35,7 +35,7 @@
 	0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
 };
 
-static const unsigned int a6xx_ps_cluster[] = {
+static const unsigned int a6xx_vpc_ps_cluster[] = {
 	0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
 };
 
@@ -66,6 +66,67 @@
 	0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
 };
 
+static const unsigned int a650_rscc_registers[] = {
+	0x38000, 0x38034, 0x38036, 0x38036, 0x38040, 0x38042, 0x38080, 0x38084,
+	0x38089, 0x3808C, 0x38091, 0x38094, 0x38099, 0x3809C, 0x380A1, 0x380A4,
+	0x380A9, 0x380AC, 0x38100, 0x38102, 0x38104, 0x38107, 0x38114, 0x38119,
+	0x38124, 0x3812E, 0x38180, 0x38197, 0x38340, 0x38341, 0x38344, 0x38347,
+	0x3834C, 0x3834F, 0x38351, 0x38354, 0x38356, 0x38359, 0x3835B, 0x3835E,
+	0x38360, 0x38363, 0x38365, 0x38368, 0x3836A, 0x3836D, 0x3836F, 0x38372,
+	0x383EC, 0x383EF, 0x383F4, 0x383F7, 0x383F9, 0x383FC, 0x383FE, 0x38401,
+	0x38403, 0x38406, 0x38408, 0x3840B, 0x3840D, 0x38410, 0x38412, 0x38415,
+	0x38417, 0x3841A, 0x38494, 0x38497, 0x3849C, 0x3849F, 0x384A1, 0x384A4,
+	0x384A6, 0x384A9, 0x384AB, 0x384AE, 0x384B0, 0x384B3, 0x384B5, 0x384B8,
+	0x384BA, 0x384BD, 0x384BF, 0x384C2, 0x3853C, 0x3853F, 0x38544, 0x38547,
+	0x38549, 0x3854C, 0x3854E, 0x38551, 0x38553, 0x38556, 0x38558, 0x3855B,
+	0x3855D, 0x38560, 0x38562, 0x38565, 0x38567, 0x3856A, 0x385E4, 0x385E7,
+	0x385EC, 0x385EF, 0x385F1, 0x385F4, 0x385F6, 0x385F9, 0x385FB, 0x385FE,
+	0x38600, 0x38603, 0x38605, 0x38608, 0x3860A, 0x3860D, 0x3860F, 0x38612,
+	0x3868C, 0x3868F, 0x38694, 0x38697, 0x38699, 0x3869C, 0x3869E, 0x386A1,
+	0x386A3, 0x386A6, 0x386A8, 0x386AB, 0x386AD, 0x386B0, 0x386B2, 0x386B5,
+	0x386B7, 0x386BA, 0x38734, 0x38737, 0x3873C, 0x3873F, 0x38741, 0x38744,
+	0x38746, 0x38749, 0x3874B, 0x3874E, 0x38750, 0x38753, 0x38755, 0x38758,
+	0x3875A, 0x3875D, 0x3875F, 0x38762, 0x387DC, 0x387DF, 0x387E4, 0x387E7,
+	0x387E9, 0x387EC, 0x387EE, 0x387F1, 0x387F3, 0x387F6, 0x387F8, 0x387FB,
+	0x387FD, 0x38800, 0x38802, 0x38805, 0x38807, 0x3880A, 0x38884, 0x38887,
+	0x3888C, 0x3888F, 0x38891, 0x38894, 0x38896, 0x38899, 0x3889B, 0x3889E,
+	0x388A0, 0x388A3, 0x388A5, 0x388A8, 0x388AA, 0x388AD, 0x388AF, 0x388B2,
+	0x3892C, 0x3892F, 0x38934, 0x38937, 0x38939, 0x3893C, 0x3893E, 0x38941,
+	0x38943, 0x38946, 0x38948, 0x3894B, 0x3894D, 0x38950, 0x38952, 0x38955,
+	0x38957, 0x3895A, 0x38B50, 0x38B51, 0x38B53, 0x38B55, 0x38B5A, 0x38B5A,
+	0x38B5F, 0x38B5F, 0x38B64, 0x38B64, 0x38B69, 0x38B69, 0x38B6E, 0x38B6E,
+	0x38B73, 0x38B73, 0x38BF8, 0x38BF8, 0x38BFD, 0x38BFD, 0x38C02, 0x38C02,
+	0x38C07, 0x38C07, 0x38C0C, 0x38C0C, 0x38C11, 0x38C11, 0x38C16, 0x38C16,
+	0x38C1B, 0x38C1B, 0x38CA0, 0x38CA0, 0x38CA5, 0x38CA5, 0x38CAA, 0x38CAA,
+	0x38CAF, 0x38CAF, 0x38CB4, 0x38CB4, 0x38CB9, 0x38CB9, 0x38CBE, 0x38CBE,
+	0x38CC3, 0x38CC3, 0x38D48, 0x38D48, 0x38D4D, 0x38D4D, 0x38D52, 0x38D52,
+	0x38D57, 0x38D57, 0x38D5C, 0x38D5C, 0x38D61, 0x38D61, 0x38D66, 0x38D66,
+	0x38D6B, 0x38D6B, 0x38DF0, 0x38DF0, 0x38DF5, 0x38DF5, 0x38DFA, 0x38DFA,
+	0x38DFF, 0x38DFF, 0x38E04, 0x38E04, 0x38E09, 0x38E09, 0x38E0E, 0x38E0E,
+	0x38E13, 0x38E13, 0x38E98, 0x38E98, 0x38E9D, 0x38E9D, 0x38EA2, 0x38EA2,
+	0x38EA7, 0x38EA7, 0x38EAC, 0x38EAC, 0x38EB1, 0x38EB1, 0x38EB6, 0x38EB6,
+	0x38EBB, 0x38EBB, 0x38F40, 0x38F40, 0x38F45, 0x38F45, 0x38F4A, 0x38F4A,
+	0x38F4F, 0x38F4F, 0x38F54, 0x38F54, 0x38F59, 0x38F59, 0x38F5E, 0x38F5E,
+	0x38F63, 0x38F63, 0x38FE8, 0x38FE8, 0x38FED, 0x38FED, 0x38FF2, 0x38FF2,
+	0x38FF7, 0x38FF7, 0x38FFC, 0x38FFC, 0x39001, 0x39001, 0x39006, 0x39006,
+	0x3900B, 0x3900B, 0x39090, 0x39090, 0x39095, 0x39095, 0x3909A, 0x3909A,
+	0x3909F, 0x3909F, 0x390A4, 0x390A4, 0x390A9, 0x390A9, 0x390AE, 0x390AE,
+	0x390B3, 0x390B3, 0x39138, 0x39138, 0x3913D, 0x3913D, 0x39142, 0x39142,
+	0x39147, 0x39147, 0x3914C, 0x3914C, 0x39151, 0x39151, 0x39156, 0x39156,
+	0x3915B, 0x3915B,
+};
+
+static const unsigned int a650_isense_registers[] = {
+	0x22C00, 0x22C19, 0x22C26, 0x22C2D, 0x22C2F, 0x22C36, 0x22C40, 0x22C44,
+	0x22C50, 0x22C57, 0x22C60, 0x22C67, 0x22C80, 0x22C87, 0x22D25, 0x22D2A,
+	0x22D2C, 0x22D32, 0x22D3E, 0x22D3F, 0x22D4E, 0x22D55, 0x22D58, 0x22D60,
+	0x22D64, 0x22D64, 0x22D66, 0x22D66, 0x22D68, 0x22D6B, 0x22D6E, 0x22D76,
+	0x22D78, 0x22D78, 0x22D80, 0x22D87, 0x22D90, 0x22D97, 0x22DA0, 0x22DA0,
+	0x22DB0, 0x22DB7, 0x22DC0, 0x22DC2, 0x22DC4, 0x22DE3, 0x2301A, 0x2301A,
+	0x2301D, 0x2302A, 0x23120, 0x23121, 0x23133, 0x23133, 0x23156, 0x23157,
+	0x23165, 0x23165, 0x2316D, 0x2316D, 0x23180, 0x23191,
+};
+
 static const struct sel_reg {
 	unsigned int host_reg;
 	unsigned int cd_reg;
@@ -95,7 +156,7 @@
 		&_a6xx_rb_rac_aperture },
 	{ CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
 		&_a6xx_rb_rbp_aperture },
-	{ CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
+	{ CP_CLUSTER_PS, a6xx_vpc_ps_cluster, ARRAY_SIZE(a6xx_vpc_ps_cluster)/2,
 		NULL },
 	{ CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
 		NULL },
@@ -144,7 +205,7 @@
 
 static const unsigned int a6xx_sp_ps_sp_cluster[] = {
 	0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
-	0xAA00, 0xAA00, 0xAA30, 0xAA31,
+	0xAA00, 0xAA00, 0xAA30, 0xAA31, 0xAAF2, 0xAAF2,
 };
 
 static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
@@ -354,19 +415,32 @@
 	A6XX_DBGBUS_HLSQ_SPTP    = 0x1f,
 	A6XX_DBGBUS_RB_0         = 0x20,
 	A6XX_DBGBUS_RB_1         = 0x21,
+	A6XX_DBGBUS_RB_2         = 0x22,
 	A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
 	A6XX_DBGBUS_CCU_0        = 0x28,
 	A6XX_DBGBUS_CCU_1        = 0x29,
+	A6XX_DBGBUS_CCU_2        = 0x2a,
 	A6XX_DBGBUS_VFD_0        = 0x38,
 	A6XX_DBGBUS_VFD_1        = 0x39,
 	A6XX_DBGBUS_VFD_2        = 0x3a,
 	A6XX_DBGBUS_VFD_3        = 0x3b,
+	A6XX_DBGBUS_VFD_4        = 0x3c,
+	A6XX_DBGBUS_VFD_5        = 0x3d,
 	A6XX_DBGBUS_SP_0         = 0x40,
 	A6XX_DBGBUS_SP_1         = 0x41,
+	A6XX_DBGBUS_SP_2         = 0x42,
 	A6XX_DBGBUS_TPL1_0       = 0x48,
 	A6XX_DBGBUS_TPL1_1       = 0x49,
 	A6XX_DBGBUS_TPL1_2       = 0x4a,
 	A6XX_DBGBUS_TPL1_3       = 0x4b,
+	A6XX_DBGBUS_TPL1_4       = 0x4c,
+	A6XX_DBGBUS_TPL1_5       = 0x4d,
+	A6XX_DBGBUS_SPTP_0       = 0x58,
+	A6XX_DBGBUS_SPTP_1       = 0x59,
+	A6XX_DBGBUS_SPTP_2       = 0x5a,
+	A6XX_DBGBUS_SPTP_3       = 0x5b,
+	A6XX_DBGBUS_SPTP_4       = 0x5c,
+	A6XX_DBGBUS_SPTP_5       = 0x5d,
 };
 
 static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
@@ -420,6 +494,22 @@
 	{ A6XX_DBGBUS_CX, 0x100, },
 };
 
+static const struct adreno_debugbus_block a650_dbgc_debugbus_blocks[] = {
+	{ A6XX_DBGBUS_RB_2, 0x100, },
+	{ A6XX_DBGBUS_CCU_2, 0x100, },
+	{ A6XX_DBGBUS_VFD_4, 0x100, },
+	{ A6XX_DBGBUS_VFD_5, 0x100, },
+	{ A6XX_DBGBUS_SP_2, 0x100, },
+	{ A6XX_DBGBUS_TPL1_4, 0x100, },
+	{ A6XX_DBGBUS_TPL1_5, 0x100, },
+	{ A6XX_DBGBUS_SPTP_0, 0x100, },
+	{ A6XX_DBGBUS_SPTP_1, 0x100, },
+	{ A6XX_DBGBUS_SPTP_2, 0x100, },
+	{ A6XX_DBGBUS_SPTP_3, 0x100, },
+	{ A6XX_DBGBUS_SPTP_4, 0x100, },
+	{ A6XX_DBGBUS_SPTP_5, 0x100, },
+};
+
 #define A6XX_NUM_SHADER_BANKS 3
 #define A6XX_SHADER_STATETYPE_SHIFT 8
 
@@ -1381,6 +1471,16 @@
 			snapshot, a6xx_snapshot_dbgc_debugbus_block,
 			(void *) &a6xx_dbgc_debugbus_blocks[i]);
 	}
+
+	if (adreno_is_a650(adreno_dev)) {
+		for (i = 0; i < ARRAY_SIZE(a650_dbgc_debugbus_blocks); i++) {
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+				snapshot, a6xx_snapshot_dbgc_debugbus_block,
+				(void *) &a650_dbgc_debugbus_blocks[i]);
+		}
+	}
+
 	/*
 	 * GBIF has same debugbus as of other GPU blocks hence fall back to
 	 * default path if GPU uses GBIF.
@@ -1489,6 +1589,91 @@
 	crash_dump_valid = true;
 }
 
+size_t a6xx_snapshot_rscc_registers(struct kgsl_device *device, u8 *buf,
+	size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+	struct kgsl_snapshot_registers *regs = priv;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int count = 0, j, k;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	/* Figure out how many registers we are going to dump */
+	for (j = 0; j < regs->count; j++) {
+		int start = regs->regs[j * 2];
+		int end = regs->regs[j * 2 + 1];
+
+		count += (end - start + 1);
+	}
+
+	if (remain < (count * 8) + sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "RSCC REGISTERS");
+		return 0;
+	}
+
+	for (j = 0; j < regs->count; j++) {
+		unsigned int start = regs->regs[j * 2];
+		unsigned int end = regs->regs[j * 2 + 1];
+
+		for (k = start; k <= end; k++) {
+			unsigned int val;
+
+			adreno_rscc_regread(adreno_dev,
+				k - (adreno_dev->rscc_base >> 2), &val);
+			*data++ = k;
+			*data++ = val;
+		}
+	}
+
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
+size_t a6xx_snapshot_isense_registers(struct kgsl_device *device, u8 *buf,
+	size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+	struct kgsl_snapshot_registers *regs = priv;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int count = 0, j, k;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	/* Figure out how many registers we are going to dump */
+
+	for (j = 0; j < regs->count; j++) {
+		int start = regs->regs[j * 2];
+		int end = regs->regs[j * 2 + 1];
+
+		count += (end - start + 1);
+	}
+
+	if (remain < (count * 8) + sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "ISENSE REGISTERS");
+		return 0;
+	}
+
+	for (j = 0; j < regs->count; j++) {
+		unsigned int start = regs->regs[j * 2];
+		unsigned int end = regs->regs[j * 2 + 1];
+
+		for (k = start; k <= end; k++) {
+			unsigned int val;
+
+			adreno_isense_regread(adreno_dev,
+				k - (adreno_dev->isense_base >> 2), &val);
+			*data++ = k;
+			*data++ = val;
+		}
+	}
+
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
 /*
  * a6xx_snapshot() - A6XX GPU snapshot function
  * @adreno_dev: Device being snapshotted
@@ -1503,7 +1688,7 @@
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	bool sptprac_on;
-	unsigned int i, roq_size;
+	unsigned int i, roq_size, ucode_dbg_size;
 
 	/* GMU TCM data dumped through AHB */
 	gmu_core_dev_snapshot(device, snapshot);
@@ -1517,6 +1702,32 @@
 	 */
 	a6xx_snapshot_debugbus(adreno_dev, snapshot);
 
+	/* RSCC registers are on cx */
+	if (adreno_is_a650(adreno_dev)) {
+		struct kgsl_snapshot_registers r;
+
+		r.regs = a650_rscc_registers;
+		r.count = ARRAY_SIZE(a650_rscc_registers) / 2;
+
+		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+			snapshot, a6xx_snapshot_rscc_registers, &r);
+
+		r.regs = a650_isense_registers;
+		r.count = ARRAY_SIZE(a650_isense_registers) / 2;
+
+		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+			snapshot, a6xx_snapshot_isense_registers, &r);
+	} else if (adreno_is_a615_family(adreno_dev) ||
+			adreno_is_a630(adreno_dev)) {
+		adreno_snapshot_registers(device, snapshot,
+			a630_rscc_snapshot_registers,
+			ARRAY_SIZE(a630_rscc_snapshot_registers) / 2);
+	} else if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) {
+		adreno_snapshot_registers(device, snapshot,
+			a6xx_rscc_snapshot_registers,
+			ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
+	}
+
 	sptprac_on = gpudev->sptprac_is_on(adreno_dev);
 
 	if (!gmu_core_dev_gx_is_on(device))
@@ -1545,15 +1756,6 @@
 			snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
 	}
 
-	if (adreno_is_a615_family(adreno_dev) || adreno_is_a630(adreno_dev))
-		adreno_snapshot_registers(device, snapshot,
-			a630_rscc_snapshot_registers,
-			ARRAY_SIZE(a630_rscc_snapshot_registers) / 2);
-	else if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev))
-		adreno_snapshot_registers(device, snapshot,
-			a6xx_rscc_snapshot_registers,
-			ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
-
 	/* CP_SQE indexed registers */
 	kgsl_snapshot_indexed_registers(device, snapshot,
 		A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA, 0, 0x33);
@@ -1563,10 +1765,12 @@
 		A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
 		0, 0x100);
 
+	ucode_dbg_size = adreno_is_a650(adreno_dev) ? 0x7000 : 0x6000;
+
 	 /* SQE_UCODE Cache */
 	kgsl_snapshot_indexed_registers(device, snapshot,
 		A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
-		0, 0x6000);
+		0, ucode_dbg_size);
 
 	/*
 	 * CP ROQ dump units is 4dwords. The number of units is stored
@@ -1598,7 +1802,8 @@
 
 }
 
-static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
+static int _a6xx_crashdump_init_mvc(struct adreno_device *adreno_dev,
+	uint64_t *ptr, uint64_t *offset)
 {
 	int qwords = 0;
 	unsigned int i, j, k;
@@ -1607,6 +1812,11 @@
 	for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
 		struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
 
+		/* The VPC registers are driven by VPC_PS cluster on a650 */
+		if (adreno_is_a650(adreno_dev) &&
+			(cluster->regs == a6xx_vpc_ps_cluster))
+			cluster->id = CP_CLUSTER_VPC_PS;
+
 		if (cluster->sel) {
 			ptr[qwords++] = cluster->sel->val;
 			ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
@@ -1902,7 +2112,7 @@
 	}
 
 	/* Program the capturescript for the MVC regsiters */
-	ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
+	ptr += _a6xx_crashdump_init_mvc(adreno_dev, ptr, &offset);
 
 	ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
 
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 72916a8..9dab3b6 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/module.h>
 #include <linux/uaccess.h>
@@ -29,10 +29,15 @@
 /* offset of clear register from the power enable register for GBIF*/
 #define GBIF_PWR_CLR_REG_EN_OFF    1
 
+/* offset of select register from the power enable register for GBIF*/
+#define GBIF_PWR_SEL_REG_EN_OFF  3
+
 /* */
-#define GBIF_PERF_RMW_MASK   0xFF
+#define GBIF_PERF_SEL_RMW_MASK   0xFF
 /* */
-#define GBIF_PWR_RMW_MASK    0x10000
+#define GBIF_PWR_SEL_RMW_MASK    0xFF
+/* */
+#define GBIF_PWR_EN_CLR_RMW_MASK 0x10000
 
 /* offset of clear register from the enable register */
 #define VBIF2_PERF_PWR_CLR_REG_EN_OFF 8
@@ -635,7 +640,7 @@
 			perfctr_mask, 0);
 		/* select the desired countable */
 		kgsl_regrmw(device, reg->select,
-			GBIF_PERF_RMW_MASK << shift, countable << shift);
+			GBIF_PERF_SEL_RMW_MASK << shift, countable << shift);
 		/* enable counter */
 		kgsl_regrmw(device, reg->select - GBIF_PERF_EN_REG_SEL_OFF,
 			perfctr_mask, perfctr_mask);
@@ -659,7 +664,8 @@
 }
 
 static void _perfcounter_enable_vbif_pwr(struct adreno_device *adreno_dev,
-		struct adreno_perfcounters *counters, unsigned int counter)
+		struct adreno_perfcounters *counters, unsigned int counter,
+		unsigned int countable)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_perfcount_register *reg;
@@ -667,7 +673,8 @@
 	reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs[counter];
 
 	if (adreno_has_gbif(adreno_dev)) {
-		unsigned int perfctr_mask = GBIF_PWR_RMW_MASK << counter;
+		unsigned int shift = counter << 3;
+		unsigned int perfctr_mask = GBIF_PWR_EN_CLR_RMW_MASK << counter;
 		/*
 		 * Write 1, followed by 0 to CLR register for
 		 * clearing the counter
@@ -676,6 +683,9 @@
 			perfctr_mask, perfctr_mask);
 		kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
 			perfctr_mask, 0);
+		/* select the desired countable */
+		kgsl_regrmw(device, reg->select + GBIF_PWR_SEL_REG_EN_OFF,
+			GBIF_PWR_SEL_RMW_MASK << shift, countable << shift);
 		/* Enable the counter */
 		kgsl_regrmw(device, reg->select, perfctr_mask, perfctr_mask);
 	} else {
@@ -886,7 +896,8 @@
 							countable);
 		break;
 	case KGSL_PERFCOUNTER_GROUP_VBIF_PWR:
-		_perfcounter_enable_vbif_pwr(adreno_dev, counters, counter);
+		_perfcounter_enable_vbif_pwr(adreno_dev, counters, counter,
+							countable);
 		break;
 	case KGSL_PERFCOUNTER_GROUP_SP_PWR:
 	case KGSL_PERFCOUNTER_GROUP_TP_PWR:
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 37e6f5f..ffb52fc 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -22,9 +22,6 @@
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "kgsl."
 
-#define GMU_CONTEXT_USER		0
-#define GMU_CONTEXT_KERNEL		1
-
 #define GMU_CM3_CFG_NONMASKINTR_SHIFT    9
 
 struct gmu_iommu_context {
@@ -363,11 +360,11 @@
 }
 
 /*
- * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
+ * gmu_memory_close() - free all memory allocated for GMU and detach GMU
  * from IOMMU context banks.
  * @gmu: Pointer to GMU device
  */
-static void gmu_kmem_close(struct gmu_device *gmu)
+static void gmu_memory_close(struct gmu_device *gmu)
 {
 	int i;
 	struct gmu_memdesc *md;
@@ -395,19 +392,14 @@
 		clear_bit(i, &gmu->kmem_bitmap);
 	}
 
-	/* Detach the device from SMMU context bank */
-	iommu_detach_device(ctx->domain, ctx->dev);
+	for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
+		ctx = &gmu_ctx[i];
 
-	/* free kernel mem context */
-	iommu_domain_free(ctx->domain);
-}
-
-static void gmu_memory_close(struct gmu_device *gmu)
-{
-	gmu_kmem_close(gmu);
-	/* Free user memory context */
-	iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
-
+		if (ctx->domain) {
+			iommu_detach_device(ctx->domain, ctx->dev);
+			iommu_domain_free(ctx->domain);
+		}
+	}
 }
 
 static enum gmu_mem_type gmu_get_blk_memtype(struct gmu_device *gmu,
@@ -459,38 +451,31 @@
 {
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	int ret;
 
 	/* Allocates & maps memory for HFI */
-	gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
-			HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
-	if (IS_ERR(gmu->hfi_mem)) {
-		ret = PTR_ERR(gmu->hfi_mem);
-		goto err_ret;
-	}
+	if (IS_ERR_OR_NULL(gmu->hfi_mem))
+		gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
+				HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
+	if (IS_ERR(gmu->hfi_mem))
+		return PTR_ERR(gmu->hfi_mem);
 
 	/* Allocates & maps GMU crash dump memory */
 	if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) {
-		gmu->dump_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
-				DUMPMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
-		if (IS_ERR(gmu->dump_mem)) {
-			ret = PTR_ERR(gmu->dump_mem);
-			goto err_ret;
-		}
+		if (IS_ERR_OR_NULL(gmu->dump_mem))
+			gmu->dump_mem = allocate_gmu_kmem(gmu,
+					GMU_NONCACHED_KERNEL, 0,
+					DUMPMEM_SIZE,
+					(IOMMU_READ | IOMMU_WRITE));
+		if (IS_ERR(gmu->dump_mem))
+			return PTR_ERR(gmu->dump_mem);
 	}
 
 	/* GMU master log */
-	gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
-			LOGMEM_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
-	if (IS_ERR(gmu->gmu_log)) {
-		ret = PTR_ERR(gmu->gmu_log);
-		goto err_ret;
-	}
-
-	return 0;
-err_ret:
-	gmu_memory_close(gmu);
-	return ret;
+	if (IS_ERR_OR_NULL(gmu->gmu_log))
+		gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
+				LOGMEM_SIZE,
+				(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
+	return PTR_ERR_OR_ZERO(gmu->gmu_log);
 }
 
 /*
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index e5845b7..e57a844 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -93,6 +93,11 @@
 	GMU_MEM_TYPE_MAX,
 };
 
+enum gmu_context_index {
+	GMU_CONTEXT_USER = 0,
+	GMU_CONTEXT_KERNEL,
+};
+
 /**
  * struct gmu_memdesc - Gmu shared memory object descriptor
  * @hostptr: Kernel virtual address
@@ -108,7 +113,7 @@
 	phys_addr_t physaddr;
 	uint64_t size;
 	enum gmu_mem_type mem_type;
-	uint32_t ctx_idx;
+	enum gmu_context_index ctx_idx;
 };
 
 struct gmu_bw_votes {
@@ -172,6 +177,7 @@
  * @idle_level: Minimal GPU idle power level
  * @fault_count: GMU fault count
  * @mailbox: Messages to AOP for ACD enable/disable go through this
+ * @log_wptr_retention: Store the log wptr offset on slumber
  */
 struct gmu_device {
 	struct {
@@ -214,6 +220,7 @@
 	struct gmu_memdesc kmem_entries[GMU_KERNEL_ENTRIES];
 	unsigned long kmem_bitmap;
 	const struct gmu_vma_entry *vma;
+	unsigned int log_wptr_retention;
 };
 
 struct gmu_memdesc *gmu_get_memdesc(struct gmu_device *gmu,
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 7ea2f29..90f49cf 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -833,7 +833,7 @@
 	adreno_read_gmureg(ADRENO_DEVICE(device),
 			ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
 	adreno_write_gmureg(ADRENO_DEVICE(device),
-			ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
+			ADRENO_REG_GMU_GMU2HOST_INTR_CLR, HFI_IRQ_MASK);
 
 	if (status & HFI_IRQ_DBGQ_MASK)
 		tasklet_hi_schedule(&hfi->tasklet);
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 47a8dbf..799c103 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/export.h>
@@ -304,12 +304,8 @@
 
 	entry = kgsl_sharedmem_find(process, gpuaddr);
 
-	if (entry == NULL) {
-		dev_err(snapshot->device->dev,
-			"snapshot: unable to find GPU buffer 0x%016llx\n",
-			gpuaddr);
+	if (entry == NULL)
 		return -EINVAL;
-	}
 
 	/* We can't freeze external memory, because we don't own it */
 	if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_MASK)
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index b481002..ebc9ffd 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -30,6 +30,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/kfifo.h>
 #include <linux/sched/signal.h>
 #include <linux/export.h>
 #include <linux/slab.h>
@@ -661,17 +662,12 @@
 /* enqueue string to 'events' ring buffer */
 void hid_debug_event(struct hid_device *hdev, char *buf)
 {
-	unsigned i;
 	struct hid_debug_list *list;
 	unsigned long flags;
 
 	spin_lock_irqsave(&hdev->debug_list_lock, flags);
-	list_for_each_entry(list, &hdev->debug_list, node) {
-		for (i = 0; buf[i]; i++)
-			list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
-				buf[i];
-		list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
-        }
+	list_for_each_entry(list, &hdev->debug_list, node)
+		kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
 	spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
 
 	wake_up_interruptible(&hdev->debug_wait);
@@ -722,8 +718,7 @@
 	hid_debug_event(hdev, buf);
 
 	kfree(buf);
-        wake_up_interruptible(&hdev->debug_wait);
-
+	wake_up_interruptible(&hdev->debug_wait);
 }
 EXPORT_SYMBOL_GPL(hid_dump_input);
 
@@ -1088,8 +1083,8 @@
 		goto out;
 	}
 
-	if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
-		err = -ENOMEM;
+	err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
+	if (err) {
 		kfree(list);
 		goto out;
 	}
@@ -1109,77 +1104,57 @@
 		size_t count, loff_t *ppos)
 {
 	struct hid_debug_list *list = file->private_data;
-	int ret = 0, len;
+	int ret = 0, copied;
 	DECLARE_WAITQUEUE(wait, current);
 
 	mutex_lock(&list->read_mutex);
-	while (ret == 0) {
-		if (list->head == list->tail) {
-			add_wait_queue(&list->hdev->debug_wait, &wait);
-			set_current_state(TASK_INTERRUPTIBLE);
+	if (kfifo_is_empty(&list->hid_debug_fifo)) {
+		add_wait_queue(&list->hdev->debug_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
 
-			while (list->head == list->tail) {
-				if (file->f_flags & O_NONBLOCK) {
-					ret = -EAGAIN;
-					break;
-				}
-				if (signal_pending(current)) {
-					ret = -ERESTARTSYS;
-					break;
-				}
-
-				if (!list->hdev || !list->hdev->debug) {
-					ret = -EIO;
-					set_current_state(TASK_RUNNING);
-					goto out;
-				}
-
-				/* allow O_NONBLOCK from other threads */
-				mutex_unlock(&list->read_mutex);
-				schedule();
-				mutex_lock(&list->read_mutex);
-				set_current_state(TASK_INTERRUPTIBLE);
+		while (kfifo_is_empty(&list->hid_debug_fifo)) {
+			if (file->f_flags & O_NONBLOCK) {
+				ret = -EAGAIN;
+				break;
 			}
 
-			set_current_state(TASK_RUNNING);
-			remove_wait_queue(&list->hdev->debug_wait, &wait);
+			if (signal_pending(current)) {
+				ret = -ERESTARTSYS;
+				break;
+			}
+
+			/* if list->hdev is NULL we cannot remove_wait_queue().
+			 * if list->hdev->debug is 0 then hid_debug_unregister()
+			 * was already called and list->hdev is being destroyed.
+			 * if we add remove_wait_queue() here we can hit a race.
+			 */
+			if (!list->hdev || !list->hdev->debug) {
+				ret = -EIO;
+				set_current_state(TASK_RUNNING);
+				goto out;
+			}
+
+			/* allow O_NONBLOCK from other threads */
+			mutex_unlock(&list->read_mutex);
+			schedule();
+			mutex_lock(&list->read_mutex);
+			set_current_state(TASK_INTERRUPTIBLE);
 		}
 
+		__set_current_state(TASK_RUNNING);
+		remove_wait_queue(&list->hdev->debug_wait, &wait);
+
 		if (ret)
 			goto out;
-
-		/* pass the ringbuffer contents to userspace */
-copy_rest:
-		if (list->tail == list->head)
-			goto out;
-		if (list->tail > list->head) {
-			len = list->tail - list->head;
-			if (len > count)
-				len = count;
-
-			if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
-				ret = -EFAULT;
-				goto out;
-			}
-			ret += len;
-			list->head += len;
-		} else {
-			len = HID_DEBUG_BUFSIZE - list->head;
-			if (len > count)
-				len = count;
-
-			if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
-				ret = -EFAULT;
-				goto out;
-			}
-			list->head = 0;
-			ret += len;
-			count -= len;
-			if (count > 0)
-				goto copy_rest;
-		}
-
 	}
+
+	/* pass the fifo content to userspace, locking is not needed with only
+	 * one concurrent reader and one concurrent writer
+	 */
+	ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
+	if (ret)
+		goto out;
+	ret = copied;
 out:
 	mutex_unlock(&list->read_mutex);
 	return ret;
@@ -1190,7 +1165,7 @@
 	struct hid_debug_list *list = file->private_data;
 
 	poll_wait(file, &list->hdev->debug_wait, wait);
-	if (list->head != list->tail)
+	if (!kfifo_is_empty(&list->hid_debug_fifo))
 		return EPOLLIN | EPOLLRDNORM;
 	if (!list->hdev->debug)
 		return EPOLLERR | EPOLLHUP;
@@ -1205,7 +1180,7 @@
 	spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
 	list_del(&list->node);
 	spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
-	kfree(list->hid_debug_buf);
+	kfifo_free(&list->hid_debug_fifo);
 	kfree(list);
 
 	return 0;
@@ -1256,4 +1231,3 @@
 {
 	debugfs_remove_recursive(hid_debug_root);
 }
-
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 46182d4..b7870e7 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -17,6 +17,9 @@
 #ifndef HID_IDS_H_FILE
 #define HID_IDS_H_FILE
 
+#define USB_VENDOR_ID_258A		0x258a
+#define USB_DEVICE_ID_258A_6A88		0x6a88
+
 #define USB_VENDOR_ID_3M		0x0596
 #define USB_DEVICE_ID_3M1968		0x0500
 #define USB_DEVICE_ID_3M2256		0x0502
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 1882a4a..98b059d 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -42,6 +42,7 @@
 
 static const struct hid_device_id ite_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 643b6eb..eacc76d 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -743,7 +743,9 @@
 	data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd;
 	data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd;
 	data_pointer->led_mute.dev = dev;
-	led_classdev_register(dev, &data_pointer->led_mute);
+	ret = led_classdev_register(dev, &data_pointer->led_mute);
+	if (ret < 0)
+		goto err;
 
 	data_pointer->led_micmute.name = name_micmute;
 	data_pointer->led_micmute.brightness_get =
@@ -751,7 +753,11 @@
 	data_pointer->led_micmute.brightness_set =
 		lenovo_led_brightness_set_tpkbd;
 	data_pointer->led_micmute.dev = dev;
-	led_classdev_register(dev, &data_pointer->led_micmute);
+	ret = led_classdev_register(dev, &data_pointer->led_micmute);
+	if (ret < 0) {
+		led_classdev_unregister(&data_pointer->led_mute);
+		goto err;
+	}
 
 	lenovo_features_set_tpkbd(hdev);
 
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 97954f5..1c1a251 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -4,7 +4,7 @@
 
 config HYPERV
 	tristate "Microsoft Hyper-V client drivers"
-	depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST
+	depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
 	select PARAVIRT
 	help
 	  Select this option to run Linux as a Hyper-V client operating
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b1b7880..d2a735ac9 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -888,12 +888,14 @@
 			pfn_cnt -= pgs_ol;
 			/*
 			 * Check if the corresponding memory block is already
-			 * online by checking its last previously backed page.
-			 * In case it is we need to bring rest (which was not
-			 * backed previously) online too.
+			 * online. It is possible to observe struct pages still
+			 * being uninitialized here so check section instead.
+			 * In case the section is online we need to bring the
+			 * rest of pfns (which were not backed previously)
+			 * online too.
 			 */
 			if (start_pfn > has->start_pfn &&
-			    !PageReserved(pfn_to_page(start_pfn - 1)))
+			    online_section_nr(pfn_to_section_nr(start_pfn)))
 				hv_bring_pgs_online(has, start_pfn, pgs_ol);
 
 		}
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 3e90eb9..6cb45f2 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -164,26 +164,25 @@
 }
 
 /* Get various debug metrics for the specified ring buffer. */
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
-				 struct hv_ring_buffer_debug_info *debug_info)
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+				struct hv_ring_buffer_debug_info *debug_info)
 {
 	u32 bytes_avail_towrite;
 	u32 bytes_avail_toread;
 
-	if (ring_info->ring_buffer) {
-		hv_get_ringbuffer_availbytes(ring_info,
-					&bytes_avail_toread,
-					&bytes_avail_towrite);
+	if (!ring_info->ring_buffer)
+		return -EINVAL;
 
-		debug_info->bytes_avail_toread = bytes_avail_toread;
-		debug_info->bytes_avail_towrite = bytes_avail_towrite;
-		debug_info->current_read_index =
-			ring_info->ring_buffer->read_index;
-		debug_info->current_write_index =
-			ring_info->ring_buffer->write_index;
-		debug_info->current_interrupt_mask =
-			ring_info->ring_buffer->interrupt_mask;
-	}
+	hv_get_ringbuffer_availbytes(ring_info,
+				     &bytes_avail_toread,
+				     &bytes_avail_towrite);
+	debug_info->bytes_avail_toread = bytes_avail_toread;
+	debug_info->bytes_avail_towrite = bytes_avail_towrite;
+	debug_info->current_read_index = ring_info->ring_buffer->read_index;
+	debug_info->current_write_index = ring_info->ring_buffer->write_index;
+	debug_info->current_interrupt_mask
+		= ring_info->ring_buffer->interrupt_mask;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
 
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c71cc85..9aa18f3 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -313,10 +313,16 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
 }
 static DEVICE_ATTR_RO(out_intr_mask);
@@ -326,10 +332,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
 	return sprintf(buf, "%d\n", outbound.current_read_index);
 }
 static DEVICE_ATTR_RO(out_read_index);
@@ -340,10 +351,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
 	return sprintf(buf, "%d\n", outbound.current_write_index);
 }
 static DEVICE_ATTR_RO(out_write_index);
@@ -354,10 +370,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
 	return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
 }
 static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -368,10 +389,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
 	return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
 }
 static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -381,10 +407,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
 }
 static DEVICE_ATTR_RO(in_intr_mask);
@@ -394,10 +425,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.current_read_index);
 }
 static DEVICE_ATTR_RO(in_read_index);
@@ -407,10 +443,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.current_write_index);
 }
 static DEVICE_ATTR_RO(in_write_index);
@@ -421,10 +462,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
 }
 static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -435,10 +481,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
 }
 static DEVICE_ATTR_RO(in_write_bytes_avail);
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 08e3945..f9b8e3e 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -360,9 +360,11 @@
 	struct i2c_client *client = data->client;
 	unsigned long min, val;
 	u8 reg;
-	int err = kstrtoul(buf, 10, &val);
-	if (err < 0)
-		return err;
+	int rv;
+
+	rv = kstrtoul(buf, 10, &val);
+	if (rv < 0)
+		return rv;
 
 	/* Save fan_min */
 	mutex_lock(&data->update_lock);
@@ -390,8 +392,13 @@
 		return -EINVAL;
 	}
 
-	reg = (lm80_read_value(client, LM80_REG_FANDIV) &
-	       ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1)));
+	rv = lm80_read_value(client, LM80_REG_FANDIV);
+	if (rv < 0) {
+		mutex_unlock(&data->update_lock);
+		return rv;
+	}
+	reg = (rv & ~(3 << (2 * (nr + 1))))
+	    | (data->fan_div[nr] << (2 * (nr + 1)));
 	lm80_write_value(client, LM80_REG_FANDIV, reg);
 
 	/* Restore fan_min */
@@ -623,6 +630,7 @@
 	struct device *dev = &client->dev;
 	struct device *hwmon_dev;
 	struct lm80_data *data;
+	int rv;
 
 	data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
 	if (!data)
@@ -635,8 +643,14 @@
 	lm80_init_client(client);
 
 	/* A few vars need to be filled upon startup */
-	data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
-	data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
+	rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+	if (rv < 0)
+		return rv;
+	data->fan[f_min][0] = rv;
+	rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
+	if (rv < 0)
+		return rv;
+	data->fan[f_min][1] = rv;
 
 	hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
 							   data, lm80_groups);
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index e363992..ceb3db6f3 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -88,7 +88,7 @@
 		.data = (void *)2
 	},
 	{
-		.compatible = "ti,tmp422",
+		.compatible = "ti,tmp442",
 		.data = (void *)3
 	},
 	{ },
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index be56a8a..4da336f 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -14,6 +14,7 @@
 #include <linux/of.h>
 #include <linux/coresight.h>
 #include <linux/regulator/consumer.h>
+#include <soc/qcom/scm.h>
 
 #include "coresight-priv.h"
 
@@ -131,6 +132,8 @@
 #define TPDM_REVISION_A		0
 #define TPDM_REVISION_B		1
 
+#define HW_ENABLE_CHECK_VALUE   0x10
+
 enum tpdm_dataset {
 	TPDM_DS_IMPLDEF,
 	TPDM_DS_DSB,
@@ -268,45 +271,6 @@
 
 static void tpdm_init_default_data(struct tpdm_drvdata *drvdata);
 
-static void tpdm_setup_disable(struct tpdm_drvdata *drvdata)
-{
-	int i;
-
-	for (i = 0; i < drvdata->nr_tclk; i++)
-		clk_disable_unprepare(drvdata->tclk[i]);
-	for (i = 0; i < drvdata->nr_treg; i++)
-		regulator_disable(drvdata->treg[i]);
-}
-
-int tpdm_setup_enable(struct tpdm_drvdata *drvdata)
-{
-	int ret;
-	int i, j;
-
-	for (i = 0; i < drvdata->nr_treg; i++) {
-		ret = regulator_enable(drvdata->treg[i]);
-		if (ret)
-			goto err_regs;
-	}
-
-	for (j = 0; j < drvdata->nr_tclk; j++) {
-		ret = clk_prepare_enable(drvdata->tclk[j]);
-		if (ret)
-			goto err_clks;
-	}
-
-	return 0;
-
-err_clks:
-	for (j--; j >= 0; j--)
-		clk_disable_unprepare(drvdata->tclk[j]);
-err_regs:
-	for (i--; i >= 0; i--)
-		regulator_disable(drvdata->treg[i]);
-
-	return ret;
-}
-
 static void __tpdm_enable_gpr(struct tpdm_drvdata *drvdata)
 {
 	int i;
@@ -592,6 +556,11 @@
 		val = val | BIT(1);
 	else
 		val = val & ~BIT(1);
+	if (drvdata->cmb->ts_all)
+		val = val | BIT(2);
+	else
+		val = val & ~BIT(2);
+
 	tpdm_writel(drvdata, val, TPDM_CMB_TIER);
 
 	__tpdm_config_cmb_msr(drvdata);
@@ -706,12 +675,6 @@
 		return ret;
 	}
 
-	ret = tpdm_setup_enable(drvdata);
-	if (ret) {
-		dev_err(drvdata->dev, "TPDM setup failed. Skipping enable\n");
-		return ret;
-	}
-
 	mutex_lock(&drvdata->lock);
 	__tpdm_enable(drvdata);
 	drvdata->enable = true;
@@ -795,8 +758,6 @@
 	drvdata->enable = false;
 	mutex_unlock(&drvdata->lock);
 
-	tpdm_setup_disable(drvdata);
-
 	dev_info(drvdata->dev, "TPDM tracing disabled\n");
 }
 
@@ -938,18 +899,11 @@
 	/* Init the default data */
 	tpdm_init_default_data(drvdata);
 
-	/* Disable tpdm if enabled */
-	if (drvdata->enable) {
-		__tpdm_disable(drvdata);
-		drvdata->enable = false;
-	}
-
 	mutex_unlock(&drvdata->lock);
 
-	if (drvdata->enable) {
-		tpdm_setup_disable(drvdata);
-		dev_info(drvdata->dev, "TPDM tracing disabled\n");
-	}
+	/* Disable tpdm if enabled */
+	if (drvdata->enable)
+		coresight_disable(drvdata->csdev);
 
 	return size;
 }
@@ -1031,14 +985,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1056,12 +1010,12 @@
 		drvdata->bc->capture_mode = TPDM_MODE_APB;
 	} else {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EINVAL;
 	}
 
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(bc_capture_mode);
@@ -1128,14 +1082,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1148,7 +1102,7 @@
 	}
 
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_WO(bc_reset_counters);
@@ -1489,14 +1443,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1504,7 +1458,7 @@
 	val = tpdm_readl(drvdata, TPDM_BC_OVSR);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -1522,14 +1476,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1539,7 +1493,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(bc_ovsr);
@@ -1555,14 +1509,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1570,7 +1524,7 @@
 	val = tpdm_readl(drvdata, TPDM_BC_SELR);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -1588,14 +1542,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable || val >= drvdata->bc_counters_avail) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1603,7 +1557,7 @@
 	tpdm_writel(drvdata, val, TPDM_BC_SELR);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(bc_counter_sel);
@@ -1619,14 +1573,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1634,7 +1588,7 @@
 	val = tpdm_readl(drvdata, TPDM_BC_CNTR_LO);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -1652,14 +1606,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1670,7 +1624,7 @@
 		/* Check if selected counter is disabled */
 		if (BVAL(tpdm_readl(drvdata, TPDM_BC_CNTENSET), select)) {
 			mutex_unlock(&drvdata->lock);
-			tpdm_setup_disable(drvdata);
+			coresight_disable_reg_clk(drvdata->csdev);
 			return -EPERM;
 		}
 
@@ -1678,7 +1632,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(bc_count_val_lo);
@@ -1694,14 +1648,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1709,7 +1663,7 @@
 	val = tpdm_readl(drvdata, TPDM_BC_CNTR_HI);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -1727,14 +1681,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1745,7 +1699,7 @@
 		/* Check if selected counter is disabled */
 		if (BVAL(tpdm_readl(drvdata, TPDM_BC_CNTENSET), select)) {
 			mutex_unlock(&drvdata->lock);
-			tpdm_setup_disable(drvdata);
+			coresight_disable_reg_clk(drvdata->csdev);
 			return -EPERM;
 		}
 
@@ -1753,7 +1707,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(bc_count_val_hi);
@@ -1770,14 +1724,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1789,7 +1743,7 @@
 	}
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RO(bc_shadow_val_lo);
@@ -1806,14 +1760,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1824,7 +1778,7 @@
 				  tpdm_readl(drvdata, TPDM_BC_SHADOW_HI(i)));
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RO(bc_shadow_val_hi);
@@ -1840,14 +1794,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1855,7 +1809,7 @@
 	val = tpdm_readl(drvdata, TPDM_BC_SWINC);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -1873,14 +1827,14 @@
 	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -1890,7 +1844,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(bc_sw_inc);
@@ -1976,14 +1930,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2001,11 +1955,11 @@
 		drvdata->tc->capture_mode = TPDM_MODE_APB;
 	} else {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EINVAL;
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(tc_capture_mode);
@@ -2040,14 +1994,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2057,11 +2011,11 @@
 		drvdata->tc->retrieval_mode = TPDM_MODE_APB;
 	} else {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EINVAL;
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(tc_retrieval_mode);
@@ -2080,14 +2034,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2099,7 +2053,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_WO(tc_reset_counters);
@@ -2419,14 +2373,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2434,7 +2388,7 @@
 	val = tpdm_readl(drvdata, TPDM_TC_OVSR_GP);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -2452,14 +2406,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2469,7 +2423,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(tc_ovsr_gp);
@@ -2485,14 +2439,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2500,7 +2454,7 @@
 	val = tpdm_readl(drvdata, TPDM_TC_OVSR_IMPL);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -2518,14 +2472,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2535,7 +2489,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(tc_ovsr_impl);
@@ -2551,14 +2505,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2566,7 +2520,7 @@
 	val = tpdm_readl(drvdata, TPDM_TC_SELR);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -2584,14 +2538,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2599,7 +2553,7 @@
 	tpdm_writel(drvdata, val, TPDM_TC_SELR);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(tc_counter_sel);
@@ -2615,14 +2569,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2630,7 +2584,7 @@
 	val = tpdm_readl(drvdata, TPDM_TC_CNTR_LO);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -2648,14 +2602,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2667,7 +2621,7 @@
 		/* Check if selected counter is disabled */
 		if (BVAL(tpdm_readl(drvdata, TPDM_TC_CNTENSET), select)) {
 			mutex_unlock(&drvdata->lock);
-			tpdm_setup_disable(drvdata);
+			coresight_disable_reg_clk(drvdata->csdev);
 			return -EPERM;
 		}
 
@@ -2675,7 +2629,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(tc_count_val_lo);
@@ -2691,14 +2645,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2706,7 +2660,7 @@
 	val = tpdm_readl(drvdata, TPDM_TC_CNTR_HI);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -2724,14 +2678,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2743,7 +2697,7 @@
 		/* Check if selected counter is disabled */
 		if (BVAL(tpdm_readl(drvdata, TPDM_TC_CNTENSET), select)) {
 			mutex_unlock(&drvdata->lock);
-			tpdm_setup_disable(drvdata);
+			coresight_disable_reg_clk(drvdata->csdev);
 			return -EPERM;
 		}
 
@@ -2751,7 +2705,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(tc_count_val_hi);
@@ -2768,14 +2722,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2787,7 +2741,7 @@
 	}
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RO(tc_shadow_val_lo);
@@ -2804,14 +2758,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2823,7 +2777,7 @@
 	}
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RO(tc_shadow_val_hi);
@@ -2839,14 +2793,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2854,7 +2808,7 @@
 	val = tpdm_readl(drvdata, TPDM_TC_SWINC);
 	TPDM_LOCK(drvdata);
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
 }
 
@@ -2872,14 +2826,14 @@
 	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
 		return -EPERM;
 
-	ret = tpdm_setup_enable(drvdata);
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
 	mutex_lock(&drvdata->lock);
 	if (!drvdata->enable) {
 		mutex_unlock(&drvdata->lock);
-		tpdm_setup_disable(drvdata);
+		coresight_disable_reg_clk(drvdata->csdev);
 		return -EPERM;
 	}
 
@@ -2889,7 +2843,7 @@
 		TPDM_LOCK(drvdata);
 	}
 	mutex_unlock(&drvdata->lock);
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 	return size;
 }
 static DEVICE_ATTR_RW(tc_sw_inc);
@@ -4375,12 +4329,22 @@
 	struct coresight_desc *desc;
 	static int traceid = TPDM_TRACE_ID_START;
 	uint32_t version;
+	struct scm_desc des = {0};
+	u32 scm_ret = 0;
 
 	pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
 	if (IS_ERR(pdata))
 		return PTR_ERR(pdata);
 	adev->dev.platform_data = pdata;
 
+	if (of_property_read_bool(adev->dev.of_node, "qcom,hw-enable-check")) {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_UTIL,
+				HW_ENABLE_CHECK_VALUE), &des);
+		scm_ret = des.ret[0];
+		if (scm_ret == 0)
+			return -ENXIO;
+	}
+
 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 	if (!drvdata)
 		return -ENOMEM;
@@ -4399,7 +4363,20 @@
 		return -EINVAL;
 	}
 
-	ret = tpdm_setup_enable(drvdata);
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+	desc->ops = &tpdm_cs_ops;
+	desc->pdata = adev->dev.platform_data;
+	desc->dev = &adev->dev;
+	desc->groups = tpdm_attr_grps;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev))
+		return PTR_ERR(drvdata->csdev);
+
+	ret = coresight_enable_reg_clk(drvdata->csdev);
 	if (ret)
 		return ret;
 
@@ -4418,8 +4395,10 @@
 	}
 
 	ret = tpdm_datasets_alloc(drvdata);
-	if (ret)
+	if (ret) {
+		coresight_unregister(drvdata->csdev);
 		return ret;
+	}
 
 	tpdm_init_default_data(drvdata);
 
@@ -4430,23 +4409,10 @@
 	drvdata->bc_counters_avail = BMVAL(devid, 6, 10) + 1;
 	drvdata->tc_counters_avail = BMVAL(devid, 4, 5) + 1;
 
-	tpdm_setup_disable(drvdata);
+	coresight_disable_reg_clk(drvdata->csdev);
 
 	drvdata->traceid = traceid++;
 
-	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
-	if (!desc)
-		return -ENOMEM;
-	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
-	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
-	desc->ops = &tpdm_cs_ops;
-	desc->pdata = adev->dev.platform_data;
-	desc->dev = &adev->dev;
-	desc->groups = tpdm_attr_grps;
-	drvdata->csdev = coresight_register(desc);
-	if (IS_ERR(drvdata->csdev))
-		return PTR_ERR(drvdata->csdev);
-
 	dev_dbg(drvdata->dev, "TPDM initialized\n");
 
 	if (boot_enable)
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 35f0bf7..29fdb7c 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -18,6 +18,7 @@
 #include <linux/of_platform.h>
 #include <linux/delay.h>
 #include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
 
 #include "coresight-priv.h"
 
@@ -111,6 +112,54 @@
 	bus_for_each_dev(&coresight_bustype, NULL, NULL, coresight_reset_sink);
 }
 
+int coresight_enable_reg_clk(struct coresight_device *csdev)
+{
+	struct coresight_reg_clk *reg_clk = csdev->reg_clk;
+	int ret;
+	int i, j;
+
+	if (IS_ERR_OR_NULL(reg_clk))
+		return -EINVAL;
+
+	for (i = 0; i < reg_clk->nr_reg; i++) {
+		ret = regulator_enable(reg_clk->reg[i]);
+		if (ret)
+			goto err_regs;
+	}
+
+	for (j = 0; j < reg_clk->nr_clk; j++) {
+		ret = clk_prepare_enable(reg_clk->clk[j]);
+		if (ret)
+			goto err_clks;
+	}
+
+	return 0;
+err_clks:
+	for (j--; j >= 0; j--)
+		clk_disable_unprepare(reg_clk->clk[j]);
+err_regs:
+	for (i--; i >= 0; i--)
+		regulator_disable(reg_clk->reg[i]);
+
+	return ret;
+}
+EXPORT_SYMBOL(coresight_enable_reg_clk);
+
+void coresight_disable_reg_clk(struct coresight_device *csdev)
+{
+	struct coresight_reg_clk *reg_clk = csdev->reg_clk;
+	int i;
+
+	if (IS_ERR_OR_NULL(reg_clk))
+		return;
+
+	for (i = reg_clk->nr_clk - 1; i >= 0; i--)
+		clk_disable_unprepare(reg_clk->clk[i]);
+	for (i = reg_clk->nr_reg - 1; i >= 0; i--)
+		regulator_disable(reg_clk->reg[i]);
+}
+EXPORT_SYMBOL(coresight_disable_reg_clk);
+
 static int coresight_find_link_inport(struct coresight_device *csdev,
 				      struct coresight_device *parent,
 				      struct list_head *path)
@@ -159,9 +208,12 @@
 
 	if (!csdev->enable) {
 		if (sink_ops(csdev)->enable) {
+			coresight_enable_reg_clk(csdev);
 			ret = sink_ops(csdev)->enable(csdev, mode);
-			if (ret)
+			if (ret) {
+				coresight_disable_reg_clk(csdev);
 				return ret;
+			}
 		}
 		csdev->enable = true;
 	}
@@ -176,6 +228,7 @@
 	if (atomic_dec_return(csdev->refcnt) == 0) {
 		if (sink_ops(csdev)->disable) {
 			sink_ops(csdev)->disable(csdev);
+			coresight_disable_reg_clk(csdev);
 			csdev->enable = false;
 			csdev->activated = false;
 		}
@@ -210,8 +263,10 @@
 
 	if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
 		if (link_ops(csdev)->enable) {
+			coresight_enable_reg_clk(csdev);
 			ret = link_ops(csdev)->enable(csdev, inport, outport);
 			if (ret) {
+				coresight_disable_reg_clk(csdev);
 				atomic_dec(&csdev->refcnt[refport]);
 				return ret;
 			}
@@ -251,8 +306,10 @@
 	}
 
 	if (atomic_dec_return(&csdev->refcnt[refport]) == 0) {
-		if (link_ops(csdev)->disable)
+		if (link_ops(csdev)->disable) {
 			link_ops(csdev)->disable(csdev, inport, outport);
+			coresight_disable_reg_clk(csdev);
+		}
 	}
 
 	for (i = 0; i < nr_conns; i++)
@@ -274,9 +331,12 @@
 
 	if (!csdev->enable) {
 		if (source_ops(csdev)->enable) {
+			coresight_enable_reg_clk(csdev);
 			ret = source_ops(csdev)->enable(csdev, NULL, mode);
-			if (ret)
+			if (ret) {
+				coresight_disable_reg_clk(csdev);
 				return ret;
+			}
 		}
 		csdev->enable = true;
 	}
@@ -297,8 +357,10 @@
 static bool coresight_disable_source(struct coresight_device *csdev)
 {
 	if (atomic_dec_return(csdev->refcnt) == 0) {
-		if (source_ops(csdev)->disable)
+		if (source_ops(csdev)->disable) {
 			source_ops(csdev)->disable(csdev, NULL);
+			coresight_disable_reg_clk(csdev);
+		}
 		csdev->enable = false;
 	}
 	return !csdev->enable;
@@ -1195,6 +1257,7 @@
 	csdev->subtype = desc->subtype;
 	csdev->ops = desc->ops;
 	csdev->orphan = false;
+	csdev->reg_clk = desc->pdata->reg_clk;
 
 	csdev->dev.type = &coresight_dev_type[desc->type];
 	csdev->dev.groups = desc->groups;
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index dc6f111..29dbf52 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -122,6 +122,59 @@
 }
 EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
 
+static struct coresight_reg_clk *
+of_coresight_get_reg_clk(struct device *dev, const struct device_node *node)
+{
+	struct coresight_reg_clk *reg_clk;
+	const char *clk_name, *reg_name;
+	int nr_reg, nr_clk, i, ret;
+
+	nr_reg = of_property_count_strings(node, "qcom,proxy-regs");
+	nr_clk = of_property_count_strings(node, "qcom,proxy-clks");
+	if (!nr_reg && !nr_clk)
+		return NULL;
+
+	reg_clk = devm_kzalloc(dev, sizeof(*reg_clk), GFP_KERNEL);
+	if (!reg_clk)
+		return ERR_PTR(-ENOMEM);
+
+	reg_clk->nr_reg = nr_reg;
+	reg_clk->nr_clk = nr_clk;
+	if (nr_reg > 0) {
+		reg_clk->reg = devm_kzalloc(dev, nr_reg *
+			sizeof(reg_clk->reg), GFP_KERNEL);
+		if (!reg_clk->reg)
+			return ERR_PTR(-ENOMEM);
+
+		for (i = 0; i < nr_reg; i++) {
+			ret = of_property_read_string_index(node,
+				"qcom,proxy-regs", i, &reg_name);
+			if (ret)
+				return ERR_PTR(ret);
+			reg_clk->reg[i] = devm_regulator_get(dev, reg_name);
+			if (IS_ERR(reg_clk->reg[i]))
+				return ERR_PTR(-EINVAL);
+		}
+	}
+	if (nr_clk > 0) {
+		reg_clk->clk = devm_kzalloc(dev, nr_clk *
+			sizeof(reg_clk->clk), GFP_KERNEL);
+		if (!reg_clk->clk)
+			return ERR_PTR(-ENOMEM);
+
+		for (i = 0; i < nr_clk; i++) {
+			ret = of_property_read_string_index(node,
+				"qcom,proxy-clks", i, &clk_name);
+			if (ret)
+				return ERR_PTR(ret);
+			reg_clk->clk[i] = devm_clk_get(dev, clk_name);
+			if (IS_ERR(reg_clk->clk[i]))
+				return ERR_PTR(-EINVAL);
+		}
+	}
+	return reg_clk;
+}
+
 struct coresight_platform_data *
 of_get_coresight_platform_data(struct device *dev,
 			       const struct device_node *node)
@@ -212,6 +265,10 @@
 
 	pdata->cpu = of_coresight_get_cpu(node);
 
+	pdata->reg_clk = of_coresight_get_reg_clk(dev, node);
+	if (IS_ERR(pdata->reg_clk))
+		return (void *)(pdata->reg_clk);
+
 	return pdata;
 }
 EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index d293e55..ba7aaf4 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1423,7 +1423,8 @@
 		if (!end)
 			break;
 
-		len -= end - p;
+		/* consume the number and the following comma, hence +1 */
+		len -= end - p + 1;
 		p = end + 1;
 	} while (len);
 
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index e123112..293142d 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -167,8 +167,9 @@
 {
 	struct stp_master *master;
 	size_t size;
+	unsigned long align = sizeof(unsigned long);
 
-	size = ALIGN(stm->data->sw_nchannels, 8) / 8;
+	size = ALIGN(stm->data->sw_nchannels, align) / align;
 	size += sizeof(struct stp_master);
 	master = kzalloc(size, GFP_ATOMIC);
 	if (!master)
@@ -218,8 +219,8 @@
 	bitmap_release_region(&master->chan_map[0], output->channel,
 			      ilog2(output->nr_chans));
 
-	output->nr_chans = 0;
 	master->nr_free += output->nr_chans;
+	output->nr_chans = 0;
 }
 
 /*
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 51d3495..fb5bac0 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -296,22 +296,7 @@
 			i2c_int_disable(idev, MST_STATUS_TFL);
 	}
 
-	if (status & MST_STATUS_SCC) {
-		/* Stop completed */
-		i2c_int_disable(idev, ~MST_STATUS_TSS);
-		complete(&idev->msg_complete);
-	} else if (status & MST_STATUS_SNS) {
-		/* Transfer done */
-		i2c_int_disable(idev, ~MST_STATUS_TSS);
-		if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
-			axxia_i2c_empty_rx_fifo(idev);
-		complete(&idev->msg_complete);
-	} else if (status & MST_STATUS_TSS) {
-		/* Transfer timeout */
-		idev->msg_err = -ETIMEDOUT;
-		i2c_int_disable(idev, ~MST_STATUS_TSS);
-		complete(&idev->msg_complete);
-	} else if (unlikely(status & MST_STATUS_ERR)) {
+	if (unlikely(status & MST_STATUS_ERR)) {
 		/* Transfer error */
 		i2c_int_disable(idev, ~0);
 		if (status & MST_STATUS_AL)
@@ -328,6 +313,21 @@
 			readl(idev->base + MST_TX_BYTES_XFRD),
 			readl(idev->base + MST_TX_XFER));
 		complete(&idev->msg_complete);
+	} else if (status & MST_STATUS_SCC) {
+		/* Stop completed */
+		i2c_int_disable(idev, ~MST_STATUS_TSS);
+		complete(&idev->msg_complete);
+	} else if (status & MST_STATUS_SNS) {
+		/* Transfer done */
+		i2c_int_disable(idev, ~MST_STATUS_TSS);
+		if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
+			axxia_i2c_empty_rx_fifo(idev);
+		complete(&idev->msg_complete);
+	} else if (status & MST_STATUS_TSS) {
+		/* Transfer timeout */
+		idev->msg_err = -ETIMEDOUT;
+		i2c_int_disable(idev, ~MST_STATUS_TSS);
+		complete(&idev->msg_complete);
 	}
 
 out:
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 65d06a8..2ac8609 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1498,8 +1498,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int omap_i2c_runtime_suspend(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
 {
 	struct omap_i2c_dev *omap = dev_get_drvdata(dev);
 
@@ -1525,7 +1524,7 @@
 	return 0;
 }
 
-static int omap_i2c_runtime_resume(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
 {
 	struct omap_i2c_dev *omap = dev_get_drvdata(dev);
 
@@ -1540,20 +1539,18 @@
 }
 
 static const struct dev_pm_ops omap_i2c_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				      pm_runtime_force_resume)
 	SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
 			   omap_i2c_runtime_resume, NULL)
 };
-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
-#else
-#define OMAP_I2C_PM_OPS NULL
-#endif /* CONFIG_PM */
 
 static struct platform_driver omap_i2c_driver = {
 	.probe		= omap_i2c_probe,
 	.remove		= omap_i2c_remove,
 	.driver		= {
 		.name	= "omap_i2c",
-		.pm	= OMAP_I2C_PM_OPS,
+		.pm	= &omap_i2c_pm_ops,
 		.of_match_table = of_match_ptr(omap_i2c_of_match),
 	},
 };
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index fb65502..703c634 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -321,11 +321,12 @@
 				       SE_DMA_RX_IRQ_CLR);
 		/* Ensure all writes are done before returning from ISR. */
 		wmb();
+		if ((dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE))
+			complete(&gi2c->xfer);
+
 	}
 	/* if this is err with done-bit not set, handle that thr' timeout. */
-	if (m_stat & M_CMD_DONE_EN)
-		complete(&gi2c->xfer);
-	else if ((dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE))
+	else if (m_stat & M_CMD_DONE_EN)
 		complete(&gi2c->xfer);
 
 	return IRQ_HANDLED;
@@ -665,6 +666,7 @@
 		dma_addr_t tx_dma = 0;
 		dma_addr_t rx_dma = 0;
 		enum se_xfer_mode mode = FIFO_MODE;
+		reinit_completion(&gi2c->xfer);
 
 		m_param |= (stretch ? STOP_STRETCH : 0);
 		m_param |= ((msgs[i].addr & 0x7F) << SLV_ADDR_SHFT);
@@ -724,6 +726,7 @@
 						gi2c->xfer_timeout);
 		if (!timeout) {
 			geni_i2c_err(gi2c, GENI_TIMEOUT);
+			reinit_completion(&gi2c->xfer);
 			gi2c->cur = NULL;
 			geni_abort_m_cmd(gi2c->base);
 			timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
@@ -732,6 +735,7 @@
 		gi2c->cur_rd = 0;
 		if (mode == SE_DMA) {
 			if (gi2c->err) {
+				reinit_completion(&gi2c->xfer);
 				if (msgs[i].flags != I2C_M_RD)
 					writel_relaxed(1, gi2c->base +
 							SE_DMA_TX_FSM_RST);
@@ -814,6 +818,7 @@
 		return ret;
 	}
 
+	gi2c->i2c_rsc.ctrl_dev = gi2c->dev;
 	gi2c->i2c_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
 	if (IS_ERR(gi2c->i2c_rsc.se_clk)) {
 		ret = PTR_ERR(gi2c->i2c_rsc.se_clk);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 818cab1..ddcfb6d 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -800,6 +800,7 @@
 static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
 	{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
+	{ .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
 	{ .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
@@ -808,6 +809,7 @@
 	{ .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config },
+	{ .compatible = "renesas,iic-r8a77990", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
 	{},
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 1aca742..ccd76c7 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -470,9 +470,15 @@
 					  data_arg.data);
 	}
 	case I2C_RETRIES:
+		if (arg > INT_MAX)
+			return -EINVAL;
+
 		client->adapter->retries = arg;
 		break;
 	case I2C_TIMEOUT:
+		if (arg > INT_MAX)
+			return -EINVAL;
+
 		/* For historical reasons, user-space sets the timeout
 		 * value in units of 10 ms.
 		 */
diff --git a/drivers/i3c/Kconfig b/drivers/i3c/Kconfig
new file mode 100644
index 0000000..30a4415
--- /dev/null
+++ b/drivers/i3c/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig I3C
+	tristate "I3C support"
+	select I2C
+	help
+	  I3C is a serial protocol standardized by the MIPI alliance.
+
+	  It's supposed to be backward compatible with I2C while providing
+	  support for high speed transfers and native interrupt support
+	  without the need for extra pins.
+
+	  The I3C protocol also standardizes the slave device types and is
+	  mainly designed to communicate with sensors.
+
+	  If you want I3C support, you should say Y here and also to the
+	  specific driver for your bus adapter(s) below.
+
+	  This I3C support can also be built as a module.  If so, the module
+	  will be called i3c.
+
+if I3C
+source "drivers/i3c/master/Kconfig"
+endif # I3C
diff --git a/drivers/i3c/Makefile b/drivers/i3c/Makefile
new file mode 100644
index 0000000..11982ef
--- /dev/null
+++ b/drivers/i3c/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+i3c-y				:= device.o master.o
+obj-$(CONFIG_I3C)		+= i3c.o
+obj-$(CONFIG_I3C)		+= master/
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
new file mode 100644
index 0000000..69cc040
--- /dev/null
+++ b/drivers/i3c/device.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "internals.h"
+
+/**
+ * i3c_device_do_priv_xfers() - do I3C SDR private transfers directed to a
+ *				specific device
+ *
+ * @dev: device with which the transfers should be done
+ * @xfers: array of transfers
+ * @nxfers: number of transfers
+ *
+ * Initiate one or several private SDR transfers with @dev.
+ *
+ * This function can sleep and thus cannot be called in atomic context.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+			     struct i3c_priv_xfer *xfers,
+			     int nxfers)
+{
+	int ret, i;
+
+	if (nxfers < 1)
+		return 0;
+
+	for (i = 0; i < nxfers; i++) {
+		if (!xfers[i].len || !xfers[i].data.in)
+			return -EINVAL;
+	}
+
+	i3c_bus_normaluse_lock(dev->bus);
+	ret = i3c_dev_do_priv_xfers_locked(dev->desc, xfers, nxfers);
+	i3c_bus_normaluse_unlock(dev->bus);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers);
+
+/**
+ * i3c_device_get_info() - get I3C device information
+ *
+ * @dev: device we want information on
+ * @info: the information object to fill in
+ *
+ * Retrieve I3C dev info.
+ */
+void i3c_device_get_info(struct i3c_device *dev,
+			 struct i3c_device_info *info)
+{
+	if (!info)
+		return;
+
+	i3c_bus_normaluse_lock(dev->bus);
+	if (dev->desc)
+		*info = dev->desc->info;
+	i3c_bus_normaluse_unlock(dev->bus);
+}
+EXPORT_SYMBOL_GPL(i3c_device_get_info);
+
+/**
+ * i3c_device_disable_ibi() - Disable IBIs coming from a specific device
+ * @dev: device on which IBIs should be disabled
+ *
+ * This function disable IBIs coming from a specific device and wait for
+ * all pending IBIs to be processed.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_disable_ibi(struct i3c_device *dev)
+{
+	int ret = -ENOENT;
+
+	i3c_bus_normaluse_lock(dev->bus);
+	if (dev->desc) {
+		mutex_lock(&dev->desc->ibi_lock);
+		ret = i3c_dev_disable_ibi_locked(dev->desc);
+		mutex_unlock(&dev->desc->ibi_lock);
+	}
+	i3c_bus_normaluse_unlock(dev->bus);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_disable_ibi);
+
+/**
+ * i3c_device_enable_ibi() - Enable IBIs coming from a specific device
+ * @dev: device on which IBIs should be enabled
+ *
+ * This function enable IBIs coming from a specific device and wait for
+ * all pending IBIs to be processed. This should be called on a device
+ * where i3c_device_request_ibi() has succeeded.
+ *
+ * Note that IBIs from this device might be received before this function
+ * returns to its caller.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_enable_ibi(struct i3c_device *dev)
+{
+	int ret = -ENOENT;
+
+	i3c_bus_normaluse_lock(dev->bus);
+	if (dev->desc) {
+		mutex_lock(&dev->desc->ibi_lock);
+		ret = i3c_dev_enable_ibi_locked(dev->desc);
+		mutex_unlock(&dev->desc->ibi_lock);
+	}
+	i3c_bus_normaluse_unlock(dev->bus);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_enable_ibi);
+
+/**
+ * i3c_device_request_ibi() - Request an IBI
+ * @dev: device for which we should enable IBIs
+ * @req: setup requested for this IBI
+ *
+ * This function is responsible for pre-allocating all resources needed to
+ * process IBIs coming from @dev. When this function returns, the IBI is not
+ * enabled until i3c_device_enable_ibi() is called.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_request_ibi(struct i3c_device *dev,
+			   const struct i3c_ibi_setup *req)
+{
+	int ret = -ENOENT;
+
+	if (!req->handler || !req->num_slots)
+		return -EINVAL;
+
+	i3c_bus_normaluse_lock(dev->bus);
+	if (dev->desc) {
+		mutex_lock(&dev->desc->ibi_lock);
+		ret = i3c_dev_request_ibi_locked(dev->desc, req);
+		mutex_unlock(&dev->desc->ibi_lock);
+	}
+	i3c_bus_normaluse_unlock(dev->bus);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_request_ibi);
+
+/**
+ * i3c_device_free_ibi() - Free all resources needed for IBI handling
+ * @dev: device on which you want to release IBI resources
+ *
+ * This function is responsible for de-allocating resources previously
+ * allocated by i3c_device_request_ibi(). It should be called after disabling
+ * IBIs with i3c_device_disable_ibi().
+ */
+void i3c_device_free_ibi(struct i3c_device *dev)
+{
+	i3c_bus_normaluse_lock(dev->bus);
+	if (dev->desc) {
+		mutex_lock(&dev->desc->ibi_lock);
+		i3c_dev_free_ibi_locked(dev->desc);
+		mutex_unlock(&dev->desc->ibi_lock);
+	}
+	i3c_bus_normaluse_unlock(dev->bus);
+}
+EXPORT_SYMBOL_GPL(i3c_device_free_ibi);
+
+/**
+ * i3cdev_to_dev() - Returns the device embedded in @i3cdev
+ * @i3cdev: I3C device
+ *
+ * Return: a pointer to a device object.
+ */
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev)
+{
+	return &i3cdev->dev;
+}
+EXPORT_SYMBOL_GPL(i3cdev_to_dev);
+
+/**
+ * dev_to_i3cdev() - Returns the I3C device containing @dev
+ * @dev: device object
+ *
+ * Return: a pointer to an I3C device object.
+ */
+struct i3c_device *dev_to_i3cdev(struct device *dev)
+{
+	return container_of(dev, struct i3c_device, dev);
+}
+EXPORT_SYMBOL_GPL(dev_to_i3cdev);
+
+/**
+ * i3c_driver_register_with_owner() - register an I3C device driver
+ *
+ * @drv: driver to register
+ * @owner: module that owns this driver
+ *
+ * Register @drv to the core.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_driver_register_with_owner(struct i3c_driver *drv, struct module *owner)
+{
+	drv->driver.owner = owner;
+	drv->driver.bus = &i3c_bus_type;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(i3c_driver_register_with_owner);
+
+/**
+ * i3c_driver_unregister() - unregister an I3C device driver
+ *
+ * @drv: driver to unregister
+ *
+ * Unregister @drv.
+ */
+void i3c_driver_unregister(struct i3c_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(i3c_driver_unregister);
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
new file mode 100644
index 0000000..86b7b44
--- /dev/null
+++ b/drivers/i3c/internals.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_INTERNALS_H
+#define I3C_INTERNALS_H
+
+#include <linux/i3c/master.h>
+
+extern struct bus_type i3c_bus_type;
+
+void i3c_bus_normaluse_lock(struct i3c_bus *bus);
+void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
+
+int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+				 struct i3c_priv_xfer *xfers,
+				 int nxfers);
+int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev);
+int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
+int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+			       const struct i3c_ibi_setup *req);
+void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev);
+#endif /* I3C_INTERNAL_H */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
new file mode 100644
index 0000000..2dc628d
--- /dev/null
+++ b/drivers/i3c/master.c
@@ -0,0 +1,2659 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "internals.h"
+
+static DEFINE_IDR(i3c_bus_idr);
+static DEFINE_MUTEX(i3c_core_lock);
+
+/**
+ * i3c_bus_maintenance_lock - Lock the bus for a maintenance operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock so that no other operations can occur on
+ * the bus. This is needed for all kind of bus maintenance operation, like
+ * - enabling/disabling slave events
+ * - re-triggering DAA
+ * - changing the dynamic address of a device
+ * - relinquishing mastership
+ * - ...
+ *
+ * The reason for this kind of locking is that we don't want drivers and core
+ * logic to rely on I3C device information that could be changed behind their
+ * back.
+ */
+static void i3c_bus_maintenance_lock(struct i3c_bus *bus)
+{
+	down_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_maintenance_unlock - Release the bus lock after a maintenance
+ *			      operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when the bus maintenance operation is done. See
+ * i3c_bus_maintenance_lock() for more details on what these maintenance
+ * operations are.
+ */
+static void i3c_bus_maintenance_unlock(struct i3c_bus *bus)
+{
+	up_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_lock - Lock the bus for a normal operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock for any operation that is not a maintenance
+ * operation (see i3c_bus_maintenance_lock() for a non-exhaustive list of
+ * maintenance operations). Basically all communications with I3C devices are
+ * normal operations (HDR, SDR transfers or CCC commands that do not change bus
+ * state or I3C dynamic address).
+ *
+ * Note that this lock is not guaranteeing serialization of normal operations.
+ * In other words, transfer requests passed to the I3C master can be submitted
+ * in parallel and I3C master drivers have to use their own locking to make
+ * sure two different communications are not inter-mixed, or access to the
+ * output/input queue is not done while the engine is busy.
+ */
+void i3c_bus_normaluse_lock(struct i3c_bus *bus)
+{
+	down_read(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_unlock - Release the bus lock after a normal operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when a normal operation is done. See
+ * i3c_bus_normaluse_lock() for more details on what these normal operations
+ * are.
+ */
+void i3c_bus_normaluse_unlock(struct i3c_bus *bus)
+{
+	up_read(&bus->lock);
+}
+
+static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
+{
+	return container_of(dev, struct i3c_master_controller, dev);
+}
+
+static const struct device_type i3c_device_type;
+
+static struct i3c_bus *dev_to_i3cbus(struct device *dev)
+{
+	struct i3c_master_controller *master;
+
+	if (dev->type == &i3c_device_type)
+		return dev_to_i3cdev(dev)->bus;
+
+	master = dev_to_i3cmaster(dev);
+
+	return &master->bus;
+}
+
+static struct i3c_dev_desc *dev_to_i3cdesc(struct device *dev)
+{
+	struct i3c_master_controller *master;
+
+	if (dev->type == &i3c_device_type)
+		return dev_to_i3cdev(dev)->desc;
+
+	master = container_of(dev, struct i3c_master_controller, dev);
+
+	return master->this;
+}
+
+static ssize_t bcr_show(struct device *dev,
+			struct device_attribute *da,
+			char *buf)
+{
+	struct i3c_bus *bus = dev_to_i3cbus(dev);
+	struct i3c_dev_desc *desc;
+	ssize_t ret;
+
+	i3c_bus_normaluse_lock(bus);
+	desc = dev_to_i3cdesc(dev);
+	ret = sprintf(buf, "%x\n", desc->info.bcr);
+	i3c_bus_normaluse_unlock(bus);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(bcr);
+
+static ssize_t dcr_show(struct device *dev,
+			struct device_attribute *da,
+			char *buf)
+{
+	struct i3c_bus *bus = dev_to_i3cbus(dev);
+	struct i3c_dev_desc *desc;
+	ssize_t ret;
+
+	i3c_bus_normaluse_lock(bus);
+	desc = dev_to_i3cdesc(dev);
+	ret = sprintf(buf, "%x\n", desc->info.dcr);
+	i3c_bus_normaluse_unlock(bus);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(dcr);
+
+static ssize_t pid_show(struct device *dev,
+			struct device_attribute *da,
+			char *buf)
+{
+	struct i3c_bus *bus = dev_to_i3cbus(dev);
+	struct i3c_dev_desc *desc;
+	ssize_t ret;
+
+	i3c_bus_normaluse_lock(bus);
+	desc = dev_to_i3cdesc(dev);
+	ret = sprintf(buf, "%llx\n", desc->info.pid);
+	i3c_bus_normaluse_unlock(bus);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(pid);
+
+static ssize_t dynamic_address_show(struct device *dev,
+				    struct device_attribute *da,
+				    char *buf)
+{
+	struct i3c_bus *bus = dev_to_i3cbus(dev);
+	struct i3c_dev_desc *desc;
+	ssize_t ret;
+
+	i3c_bus_normaluse_lock(bus);
+	desc = dev_to_i3cdesc(dev);
+	ret = sprintf(buf, "%02x\n", desc->info.dyn_addr);
+	i3c_bus_normaluse_unlock(bus);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(dynamic_address);
+
+static const char * const hdrcap_strings[] = {
+	"hdr-ddr", "hdr-tsp", "hdr-tsl",
+};
+
+static ssize_t hdrcap_show(struct device *dev,
+			   struct device_attribute *da,
+			   char *buf)
+{
+	struct i3c_bus *bus = dev_to_i3cbus(dev);
+	struct i3c_dev_desc *desc;
+	ssize_t offset = 0, ret;
+	unsigned long caps;
+	int mode;
+
+	i3c_bus_normaluse_lock(bus);
+	desc = dev_to_i3cdesc(dev);
+	caps = desc->info.hdr_cap;
+	for_each_set_bit(mode, &caps, 8) {
+		if (mode >= ARRAY_SIZE(hdrcap_strings))
+			break;
+
+		if (!hdrcap_strings[mode])
+			continue;
+
+		ret = sprintf(buf + offset, offset ? " %s" : "%s",
+			      hdrcap_strings[mode]);
+		if (ret < 0)
+			goto out;
+
+		offset += ret;
+	}
+
+	ret = sprintf(buf + offset, "\n");
+	if (ret < 0)
+		goto out;
+
+	ret = offset + ret;
+
+out:
+	i3c_bus_normaluse_unlock(bus);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(hdrcap);
+
+static struct attribute *i3c_device_attrs[] = {
+	&dev_attr_bcr.attr,
+	&dev_attr_dcr.attr,
+	&dev_attr_pid.attr,
+	&dev_attr_dynamic_address.attr,
+	&dev_attr_hdrcap.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(i3c_device);
+
+static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+	struct i3c_device_info devinfo;
+	u16 manuf, part, ext;
+
+	i3c_device_get_info(i3cdev, &devinfo);
+	manuf = I3C_PID_MANUF_ID(devinfo.pid);
+	part = I3C_PID_PART_ID(devinfo.pid);
+	ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+	if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+		return add_uevent_var(env, "MODALIAS=i3c:dcr%02Xmanuf%04X",
+				      devinfo.dcr, manuf);
+
+	return add_uevent_var(env,
+			      "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04xext%04x",
+			      devinfo.dcr, manuf, part, ext);
+}
+
+static const struct device_type i3c_device_type = {
+	.groups	= i3c_device_groups,
+	.uevent = i3c_device_uevent,
+};
+
+static const struct i3c_device_id *
+i3c_device_match_id(struct i3c_device *i3cdev,
+		    const struct i3c_device_id *id_table)
+{
+	struct i3c_device_info devinfo;
+	const struct i3c_device_id *id;
+
+	i3c_device_get_info(i3cdev, &devinfo);
+
+	/*
+	 * The lower 32bits of the provisional ID is just filled with a random
+	 * value, try to match using DCR info.
+	 */
+	if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) {
+		u16 manuf = I3C_PID_MANUF_ID(devinfo.pid);
+		u16 part = I3C_PID_PART_ID(devinfo.pid);
+		u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+		/* First try to match by manufacturer/part ID. */
+		for (id = id_table; id->match_flags != 0; id++) {
+			if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) !=
+			    I3C_MATCH_MANUF_AND_PART)
+				continue;
+
+			if (manuf != id->manuf_id || part != id->part_id)
+				continue;
+
+			if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
+			    ext_info != id->extra_info)
+				continue;
+
+			return id;
+		}
+	}
+
+	/* Fallback to DCR match. */
+	for (id = id_table; id->match_flags != 0; id++) {
+		if ((id->match_flags & I3C_MATCH_DCR) &&
+		    id->dcr == devinfo.dcr)
+			return id;
+	}
+
+	return NULL;
+}
+
+static int i3c_device_match(struct device *dev, struct device_driver *drv)
+{
+	struct i3c_device *i3cdev;
+	struct i3c_driver *i3cdrv;
+
+	if (dev->type != &i3c_device_type)
+		return 0;
+
+	i3cdev = dev_to_i3cdev(dev);
+	i3cdrv = drv_to_i3cdrv(drv);
+	if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
+		return 1;
+
+	return 0;
+}
+
+static int i3c_device_probe(struct device *dev)
+{
+	struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+	struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+
+	return driver->probe(i3cdev);
+}
+
+static int i3c_device_remove(struct device *dev)
+{
+	struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+	struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+	int ret;
+
+	ret = driver->remove(i3cdev);
+	if (ret)
+		return ret;
+
+	i3c_device_free_ibi(i3cdev);
+
+	return ret;
+}
+
+struct bus_type i3c_bus_type = {
+	.name = "i3c",
+	.match = i3c_device_match,
+	.probe = i3c_device_probe,
+	.remove = i3c_device_remove,
+};
+
+static enum i3c_addr_slot_status
+i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+{
+	int status, bitpos = addr * 2;
+
+	if (addr > I2C_MAX_ADDR)
+		return I3C_ADDR_SLOT_RSVD;
+
+	status = bus->addrslots[bitpos / BITS_PER_LONG];
+	status >>= bitpos % BITS_PER_LONG;
+
+	return status & I3C_ADDR_SLOT_STATUS_MASK;
+}
+
+static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
+					 enum i3c_addr_slot_status status)
+{
+	int bitpos = addr * 2;
+	unsigned long *ptr;
+
+	if (addr > I2C_MAX_ADDR)
+		return;
+
+	ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
+	*ptr &= ~(I3C_ADDR_SLOT_STATUS_MASK << (bitpos % BITS_PER_LONG));
+	*ptr |= status << (bitpos % BITS_PER_LONG);
+}
+
+static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
+{
+	enum i3c_addr_slot_status status;
+
+	status = i3c_bus_get_addr_slot_status(bus, addr);
+
+	return status == I3C_ADDR_SLOT_FREE;
+}
+
+static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr)
+{
+	enum i3c_addr_slot_status status;
+	u8 addr;
+
+	for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
+		status = i3c_bus_get_addr_slot_status(bus, addr);
+		if (status == I3C_ADDR_SLOT_FREE)
+			return addr;
+	}
+
+	return -ENOMEM;
+}
+
+static void i3c_bus_init_addrslots(struct i3c_bus *bus)
+{
+	int i;
+
+	/* Addresses 0 to 7 are reserved. */
+	for (i = 0; i < 8; i++)
+		i3c_bus_set_addr_slot_status(bus, i, I3C_ADDR_SLOT_RSVD);
+
+	/*
+	 * Reserve broadcast address and all addresses that might collide
+	 * with the broadcast address when facing a single bit error.
+	 */
+	i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR,
+				     I3C_ADDR_SLOT_RSVD);
+	for (i = 0; i < 7; i++)
+		i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR ^ BIT(i),
+					     I3C_ADDR_SLOT_RSVD);
+}
+
+static void i3c_bus_cleanup(struct i3c_bus *i3cbus)
+{
+	mutex_lock(&i3c_core_lock);
+	idr_remove(&i3c_bus_idr, i3cbus->id);
+	mutex_unlock(&i3c_core_lock);
+}
+
+static int i3c_bus_init(struct i3c_bus *i3cbus)
+{
+	int ret;
+
+	init_rwsem(&i3cbus->lock);
+	INIT_LIST_HEAD(&i3cbus->devs.i2c);
+	INIT_LIST_HEAD(&i3cbus->devs.i3c);
+	i3c_bus_init_addrslots(i3cbus);
+	i3cbus->mode = I3C_BUS_MODE_PURE;
+
+	mutex_lock(&i3c_core_lock);
+	ret = idr_alloc(&i3c_bus_idr, i3cbus, 0, 0, GFP_KERNEL);
+	mutex_unlock(&i3c_core_lock);
+
+	if (ret < 0)
+		return ret;
+
+	i3cbus->id = ret;
+
+	return 0;
+}
+
+static const char * const i3c_bus_mode_strings[] = {
+	[I3C_BUS_MODE_PURE] = "pure",
+	[I3C_BUS_MODE_MIXED_FAST] = "mixed-fast",
+	[I3C_BUS_MODE_MIXED_SLOW] = "mixed-slow",
+};
+
+static ssize_t mode_show(struct device *dev,
+			 struct device_attribute *da,
+			 char *buf)
+{
+	struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+	ssize_t ret;
+
+	i3c_bus_normaluse_lock(i3cbus);
+	if (i3cbus->mode < 0 ||
+	    i3cbus->mode >= ARRAY_SIZE(i3c_bus_mode_strings) ||
+	    !i3c_bus_mode_strings[i3cbus->mode])
+		ret = sprintf(buf, "unknown\n");
+	else
+		ret = sprintf(buf, "%s\n", i3c_bus_mode_strings[i3cbus->mode]);
+	i3c_bus_normaluse_unlock(i3cbus);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(mode);
+
+static ssize_t current_master_show(struct device *dev,
+				   struct device_attribute *da,
+				   char *buf)
+{
+	struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+	ssize_t ret;
+
+	i3c_bus_normaluse_lock(i3cbus);
+	ret = sprintf(buf, "%d-%llx\n", i3cbus->id,
+		      i3cbus->cur_master->info.pid);
+	i3c_bus_normaluse_unlock(i3cbus);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(current_master);
+
+static ssize_t i3c_scl_frequency_show(struct device *dev,
+				      struct device_attribute *da,
+				      char *buf)
+{
+	struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+	ssize_t ret;
+
+	i3c_bus_normaluse_lock(i3cbus);
+	ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i3c);
+	i3c_bus_normaluse_unlock(i3cbus);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(i3c_scl_frequency);
+
+static ssize_t i2c_scl_frequency_show(struct device *dev,
+				      struct device_attribute *da,
+				      char *buf)
+{
+	struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+	ssize_t ret;
+
+	i3c_bus_normaluse_lock(i3cbus);
+	ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i2c);
+	i3c_bus_normaluse_unlock(i3cbus);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(i2c_scl_frequency);
+
+static struct attribute *i3c_masterdev_attrs[] = {
+	&dev_attr_mode.attr,
+	&dev_attr_current_master.attr,
+	&dev_attr_i3c_scl_frequency.attr,
+	&dev_attr_i2c_scl_frequency.attr,
+	&dev_attr_bcr.attr,
+	&dev_attr_dcr.attr,
+	&dev_attr_pid.attr,
+	&dev_attr_dynamic_address.attr,
+	&dev_attr_hdrcap.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(i3c_masterdev);
+
+static void i3c_masterdev_release(struct device *dev)
+{
+	struct i3c_master_controller *master = dev_to_i3cmaster(dev);
+	struct i3c_bus *bus = dev_to_i3cbus(dev);
+
+	if (master->wq)
+		destroy_workqueue(master->wq);
+
+	WARN_ON(!list_empty(&bus->devs.i2c) || !list_empty(&bus->devs.i3c));
+	i3c_bus_cleanup(bus);
+
+	of_node_put(dev->of_node);
+}
+
+static const struct device_type i3c_masterdev_type = {
+	.groups	= i3c_masterdev_groups,
+};
+
+int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode)
+{
+	i3cbus->mode = mode;
+
+	if (!i3cbus->scl_rate.i3c)
+		i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+
+	if (!i3cbus->scl_rate.i2c) {
+		if (i3cbus->mode == I3C_BUS_MODE_MIXED_SLOW)
+			i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
+		else
+			i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+	}
+
+	/*
+	 * I3C/I2C frequency may have been overridden, check that user-provided
+	 * values are not exceeding max possible frequency.
+	 */
+	if (i3cbus->scl_rate.i3c > I3C_BUS_MAX_I3C_SCL_RATE ||
+	    i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_RATE)
+		return -EINVAL;
+
+	return 0;
+}
+
+static struct i3c_master_controller *
+i2c_adapter_to_i3c_master(struct i2c_adapter *adap)
+{
+	return container_of(adap, struct i3c_master_controller, i2c);
+}
+
+static struct i2c_adapter *
+i3c_master_to_i2c_adapter(struct i3c_master_controller *master)
+{
+	return &master->i2c;
+}
+
+static void i3c_master_free_i2c_dev(struct i2c_dev_desc *dev)
+{
+	kfree(dev);
+}
+
+static struct i2c_dev_desc *
+i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
+			 const struct i2c_dev_boardinfo *boardinfo)
+{
+	struct i2c_dev_desc *dev;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
+
+	dev->common.master = master;
+	dev->boardinfo = boardinfo;
+
+	return dev;
+}
+
+static void *i3c_ccc_cmd_dest_init(struct i3c_ccc_cmd_dest *dest, u8 addr,
+				   u16 payloadlen)
+{
+	dest->addr = addr;
+	dest->payload.len = payloadlen;
+	if (payloadlen)
+		dest->payload.data = kzalloc(payloadlen, GFP_KERNEL);
+	else
+		dest->payload.data = NULL;
+
+	return dest->payload.data;
+}
+
+static void i3c_ccc_cmd_dest_cleanup(struct i3c_ccc_cmd_dest *dest)
+{
+	kfree(dest->payload.data);
+}
+
+static void i3c_ccc_cmd_init(struct i3c_ccc_cmd *cmd, bool rnw, u8 id,
+			     struct i3c_ccc_cmd_dest *dests,
+			     unsigned int ndests)
+{
+	cmd->rnw = rnw ? 1 : 0;
+	cmd->id = id;
+	cmd->dests = dests;
+	cmd->ndests = ndests;
+	cmd->err = I3C_ERROR_UNKNOWN;
+}
+
+static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
+					  struct i3c_ccc_cmd *cmd)
+{
+	int ret;
+
+	if (!cmd || !master)
+		return -EINVAL;
+
+	if (WARN_ON(master->init_done &&
+		    !rwsem_is_locked(&master->bus.lock)))
+		return -EINVAL;
+
+	if (!master->ops->send_ccc_cmd)
+		return -ENOTSUPP;
+
+	if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests))
+		return -EINVAL;
+
+	if (master->ops->supports_ccc_cmd &&
+	    !master->ops->supports_ccc_cmd(master, cmd))
+		return -ENOTSUPP;
+
+	ret = master->ops->send_ccc_cmd(master, cmd);
+	if (ret) {
+		if (cmd->err != I3C_ERROR_UNKNOWN)
+			return cmd->err;
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct i2c_dev_desc *
+i3c_master_find_i2c_dev_by_addr(const struct i3c_master_controller *master,
+				u16 addr)
+{
+	struct i2c_dev_desc *dev;
+
+	i3c_bus_for_each_i2cdev(&master->bus, dev) {
+		if (dev->boardinfo->base.addr == addr)
+			return dev;
+	}
+
+	return NULL;
+}
+
+/**
+ * i3c_master_get_free_addr() - get a free address on the bus
+ * @master: I3C master object
+ * @start_addr: where to start searching
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: the first free address starting at @start_addr (included) or -ENOMEM
+ * if there's no more address available.
+ */
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+			     u8 start_addr)
+{
+	return i3c_bus_get_free_addr(&master->bus, start_addr);
+}
+EXPORT_SYMBOL_GPL(i3c_master_get_free_addr);
+
+static void i3c_device_release(struct device *dev)
+{
+	struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+
+	WARN_ON(i3cdev->desc);
+
+	of_node_put(i3cdev->dev.of_node);
+	kfree(i3cdev);
+}
+
+static void i3c_master_free_i3c_dev(struct i3c_dev_desc *dev)
+{
+	kfree(dev);
+}
+
+static struct i3c_dev_desc *
+i3c_master_alloc_i3c_dev(struct i3c_master_controller *master,
+			 const struct i3c_device_info *info)
+{
+	struct i3c_dev_desc *dev;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
+
+	dev->common.master = master;
+	dev->info = *info;
+	mutex_init(&dev->ibi_lock);
+
+	return dev;
+}
+
+static int i3c_master_rstdaa_locked(struct i3c_master_controller *master,
+				    u8 addr)
+{
+	enum i3c_addr_slot_status addrstat;
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	if (!master)
+		return -EINVAL;
+
+	addrstat = i3c_bus_get_addr_slot_status(&master->bus, addr);
+	if (addr != I3C_BROADCAST_ADDR && addrstat != I3C_ADDR_SLOT_I3C_DEV)
+		return -EINVAL;
+
+	i3c_ccc_cmd_dest_init(&dest, addr, 0);
+	i3c_ccc_cmd_init(&cmd, false,
+			 I3C_CCC_RSTDAA(addr == I3C_BROADCAST_ADDR),
+			 &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+/**
+ * i3c_master_entdaa_locked() - start a DAA (Dynamic Address Assignment)
+ *				procedure
+ * @master: master used to send frames on the bus
+ *
+ * Send a ENTDAA CCC command to start a DAA procedure.
+ *
+ * Note that this function only sends the ENTDAA CCC command, all the logic
+ * behind dynamic address assignment has to be handled in the I3C master
+ * driver.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_entdaa_locked(struct i3c_master_controller *master)
+{
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 0);
+	i3c_ccc_cmd_init(&cmd, false, I3C_CCC_ENTDAA, &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_entdaa_locked);
+
+static int i3c_master_enec_disec_locked(struct i3c_master_controller *master,
+					u8 addr, bool enable, u8 evts)
+{
+	struct i3c_ccc_events *events;
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	events = i3c_ccc_cmd_dest_init(&dest, addr, sizeof(*events));
+	if (!events)
+		return -ENOMEM;
+
+	events->events = evts;
+	i3c_ccc_cmd_init(&cmd, false,
+			 enable ?
+			 I3C_CCC_ENEC(addr == I3C_BROADCAST_ADDR) :
+			 I3C_CCC_DISEC(addr == I3C_BROADCAST_ADDR),
+			 &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+/**
+ * i3c_master_disec_locked() - send a DISEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Send a DISEC CCC command to disable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+			    u8 evts)
+{
+	return i3c_master_enec_disec_locked(master, addr, false, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_disec_locked);
+
+/**
+ * i3c_master_enec_locked() - send an ENEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Sends an ENEC CCC command to enable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+			   u8 evts)
+{
+	return i3c_master_enec_disec_locked(master, addr, true, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_enec_locked);
+
+/**
+ * i3c_master_defslvs_locked() - send a DEFSLVS CCC command
+ * @master: master used to send frames on the bus
+ *
+ * Send a DEFSLVS CCC command containing all the devices known to the @master.
+ * This is useful when you have secondary masters on the bus to propagate
+ * device information.
+ *
+ * This should be called after all I3C devices have been discovered (in other
+ * words, after the DAA procedure has finished) and instantiated in
+ * &i3c_master_controller_ops->bus_init().
+ * It should also be called if a master ACKed an Hot-Join request and assigned
+ * a dynamic address to the device joining the bus.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_defslvs_locked(struct i3c_master_controller *master)
+{
+	struct i3c_ccc_defslvs *defslvs;
+	struct i3c_ccc_dev_desc *desc;
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_dev_desc *i3cdev;
+	struct i2c_dev_desc *i2cdev;
+	struct i3c_ccc_cmd cmd;
+	struct i3c_bus *bus;
+	bool send = false;
+	int ndevs = 0, ret;
+
+	if (!master)
+		return -EINVAL;
+
+	bus = i3c_master_get_bus(master);
+	i3c_bus_for_each_i3cdev(bus, i3cdev) {
+		ndevs++;
+
+		if (i3cdev == master->this)
+			continue;
+
+		if (I3C_BCR_DEVICE_ROLE(i3cdev->info.bcr) ==
+		    I3C_BCR_I3C_MASTER)
+			send = true;
+	}
+
+	/* No other master on the bus, skip DEFSLVS. */
+	if (!send)
+		return 0;
+
+	i3c_bus_for_each_i2cdev(bus, i2cdev)
+		ndevs++;
+
+	defslvs = i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR,
+					sizeof(*defslvs) +
+					((ndevs - 1) *
+					 sizeof(struct i3c_ccc_dev_desc)));
+	if (!defslvs)
+		return -ENOMEM;
+
+	defslvs->count = ndevs;
+	defslvs->master.bcr = master->this->info.bcr;
+	defslvs->master.dcr = master->this->info.dcr;
+	defslvs->master.dyn_addr = master->this->info.dyn_addr << 1;
+	defslvs->master.static_addr = I3C_BROADCAST_ADDR << 1;
+
+	desc = defslvs->slaves;
+	i3c_bus_for_each_i2cdev(bus, i2cdev) {
+		desc->lvr = i2cdev->boardinfo->lvr;
+		desc->static_addr = i2cdev->boardinfo->base.addr << 1;
+		desc++;
+	}
+
+	i3c_bus_for_each_i3cdev(bus, i3cdev) {
+		/* Skip the I3C dev representing this master. */
+		if (i3cdev == master->this)
+			continue;
+
+		desc->bcr = i3cdev->info.bcr;
+		desc->dcr = i3cdev->info.dcr;
+		desc->dyn_addr = i3cdev->info.dyn_addr << 1;
+		desc->static_addr = i3cdev->info.static_addr << 1;
+		desc++;
+	}
+
+	i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DEFSLVS, &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_defslvs_locked);
+
+static int i3c_master_setda_locked(struct i3c_master_controller *master,
+				   u8 oldaddr, u8 newaddr, bool setdasa)
+{
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_setda *setda;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	if (!oldaddr || !newaddr)
+		return -EINVAL;
+
+	setda = i3c_ccc_cmd_dest_init(&dest, oldaddr, sizeof(*setda));
+	if (!setda)
+		return -ENOMEM;
+
+	setda->addr = newaddr << 1;
+	i3c_ccc_cmd_init(&cmd, false,
+			 setdasa ? I3C_CCC_SETDASA : I3C_CCC_SETNEWDA,
+			 &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+static int i3c_master_setdasa_locked(struct i3c_master_controller *master,
+				     u8 static_addr, u8 dyn_addr)
+{
+	return i3c_master_setda_locked(master, static_addr, dyn_addr, true);
+}
+
+static int i3c_master_setnewda_locked(struct i3c_master_controller *master,
+				      u8 oldaddr, u8 newaddr)
+{
+	return i3c_master_setda_locked(master, oldaddr, newaddr, false);
+}
+
+static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
+				    struct i3c_device_info *info)
+{
+	struct i3c_ccc_cmd_dest dest;
+	unsigned int expected_len;
+	struct i3c_ccc_mrl *mrl;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	mrl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mrl));
+	if (!mrl)
+		return -ENOMEM;
+
+	/*
+	 * When the device does not have IBI payload GETMRL only returns 2
+	 * bytes of data.
+	 */
+	if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
+		dest.payload.len -= 1;
+
+	expected_len = dest.payload.len;
+	i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	if (ret)
+		goto out;
+
+	if (dest.payload.len != expected_len) {
+		ret = -EIO;
+		goto out;
+	}
+
+	info->max_read_len = be16_to_cpu(mrl->read_len);
+
+	if (info->bcr & I3C_BCR_IBI_PAYLOAD)
+		info->max_ibi_len = mrl->ibi_len;
+
+out:
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
+				    struct i3c_device_info *info)
+{
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_mwl *mwl;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	mwl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mwl));
+	if (!mwl)
+		return -ENOMEM;
+
+	i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMWL, &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	if (ret)
+		goto out;
+
+	if (dest.payload.len != sizeof(*mwl))
+		return -EIO;
+
+	info->max_write_len = be16_to_cpu(mwl->len);
+
+out:
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
+				     struct i3c_device_info *info)
+{
+	struct i3c_ccc_getmxds *getmaxds;
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	getmaxds = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+					 sizeof(*getmaxds));
+	if (!getmaxds)
+		return -ENOMEM;
+
+	i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	if (ret)
+		goto out;
+
+	if (dest.payload.len != 2 && dest.payload.len != 5) {
+		ret = -EIO;
+		goto out;
+	}
+
+	info->max_read_ds = getmaxds->maxrd;
+	info->max_write_ds = getmaxds->maxwr;
+	if (dest.payload.len == 5)
+		info->max_read_turnaround = getmaxds->maxrdturn[0] |
+					    ((u32)getmaxds->maxrdturn[1] << 8) |
+					    ((u32)getmaxds->maxrdturn[2] << 16);
+
+out:
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+static int i3c_master_gethdrcap_locked(struct i3c_master_controller *master,
+				       struct i3c_device_info *info)
+{
+	struct i3c_ccc_gethdrcap *gethdrcap;
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	gethdrcap = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+					  sizeof(*gethdrcap));
+	if (!gethdrcap)
+		return -ENOMEM;
+
+	i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETHDRCAP, &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	if (ret)
+		goto out;
+
+	if (dest.payload.len != 1) {
+		ret = -EIO;
+		goto out;
+	}
+
+	info->hdr_cap = gethdrcap->modes;
+
+out:
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+static int i3c_master_getpid_locked(struct i3c_master_controller *master,
+				    struct i3c_device_info *info)
+{
+	struct i3c_ccc_getpid *getpid;
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_cmd cmd;
+	int ret, i;
+
+	getpid = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getpid));
+	if (!getpid)
+		return -ENOMEM;
+
+	i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETPID, &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	if (ret)
+		goto out;
+
+	info->pid = 0;
+	for (i = 0; i < sizeof(getpid->pid); i++) {
+		int sft = (sizeof(getpid->pid) - i - 1) * 8;
+
+		info->pid |= (u64)getpid->pid[i] << sft;
+	}
+
+out:
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+static int i3c_master_getbcr_locked(struct i3c_master_controller *master,
+				    struct i3c_device_info *info)
+{
+	struct i3c_ccc_getbcr *getbcr;
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	getbcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getbcr));
+	if (!getbcr)
+		return -ENOMEM;
+
+	i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETBCR, &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	if (ret)
+		goto out;
+
+	info->bcr = getbcr->bcr;
+
+out:
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+static int i3c_master_getdcr_locked(struct i3c_master_controller *master,
+				    struct i3c_device_info *info)
+{
+	struct i3c_ccc_getdcr *getdcr;
+	struct i3c_ccc_cmd_dest dest;
+	struct i3c_ccc_cmd cmd;
+	int ret;
+
+	getdcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getdcr));
+	if (!getdcr)
+		return -ENOMEM;
+
+	i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETDCR, &dest, 1);
+	ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+	if (ret)
+		goto out;
+
+	info->dcr = getdcr->dcr;
+
+out:
+	i3c_ccc_cmd_dest_cleanup(&dest);
+
+	return ret;
+}
+
+static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+	enum i3c_addr_slot_status slot_status;
+	int ret;
+
+	if (!dev->info.dyn_addr)
+		return -EINVAL;
+
+	slot_status = i3c_bus_get_addr_slot_status(&master->bus,
+						   dev->info.dyn_addr);
+	if (slot_status == I3C_ADDR_SLOT_RSVD ||
+	    slot_status == I3C_ADDR_SLOT_I2C_DEV)
+		return -EINVAL;
+
+	ret = i3c_master_getpid_locked(master, &dev->info);
+	if (ret)
+		return ret;
+
+	ret = i3c_master_getbcr_locked(master, &dev->info);
+	if (ret)
+		return ret;
+
+	ret = i3c_master_getdcr_locked(master, &dev->info);
+	if (ret)
+		return ret;
+
+	if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) {
+		ret = i3c_master_getmxds_locked(master, &dev->info);
+		if (ret)
+			return ret;
+	}
+
+	if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+		dev->info.max_ibi_len = 1;
+
+	i3c_master_getmrl_locked(master, &dev->info);
+	i3c_master_getmwl_locked(master, &dev->info);
+
+	if (dev->info.bcr & I3C_BCR_HDR_CAP) {
+		ret = i3c_master_gethdrcap_locked(master, &dev->info);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+	if (dev->info.static_addr)
+		i3c_bus_set_addr_slot_status(&master->bus,
+					     dev->info.static_addr,
+					     I3C_ADDR_SLOT_FREE);
+
+	if (dev->info.dyn_addr)
+		i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+					     I3C_ADDR_SLOT_FREE);
+
+	if (dev->boardinfo && dev->boardinfo->init_dyn_addr)
+		i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+					     I3C_ADDR_SLOT_FREE);
+}
+
+static int i3c_master_get_i3c_addrs(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+	enum i3c_addr_slot_status status;
+
+	if (!dev->info.static_addr && !dev->info.dyn_addr)
+		return 0;
+
+	if (dev->info.static_addr) {
+		status = i3c_bus_get_addr_slot_status(&master->bus,
+						      dev->info.static_addr);
+		if (status != I3C_ADDR_SLOT_FREE)
+			return -EBUSY;
+
+		i3c_bus_set_addr_slot_status(&master->bus,
+					     dev->info.static_addr,
+					     I3C_ADDR_SLOT_I3C_DEV);
+	}
+
+	/*
+	 * ->init_dyn_addr should have been reserved before that, so, if we're
+	 * trying to apply a pre-reserved dynamic address, we should not try
+	 * to reserve the address slot a second time.
+	 */
+	if (dev->info.dyn_addr &&
+	    (!dev->boardinfo ||
+	     dev->boardinfo->init_dyn_addr != dev->info.dyn_addr)) {
+		status = i3c_bus_get_addr_slot_status(&master->bus,
+						      dev->info.dyn_addr);
+		if (status != I3C_ADDR_SLOT_FREE)
+			goto err_release_static_addr;
+
+		i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+					     I3C_ADDR_SLOT_I3C_DEV);
+	}
+
+	return 0;
+
+err_release_static_addr:
+	if (dev->info.static_addr)
+		i3c_bus_set_addr_slot_status(&master->bus,
+					     dev->info.static_addr,
+					     I3C_ADDR_SLOT_FREE);
+
+	return -EBUSY;
+}
+
+static int i3c_master_attach_i3c_dev(struct i3c_master_controller *master,
+				     struct i3c_dev_desc *dev)
+{
+	int ret;
+
+	/*
+	 * We don't attach devices to the controller until they are
+	 * addressable on the bus.
+	 */
+	if (!dev->info.static_addr && !dev->info.dyn_addr)
+		return 0;
+
+	ret = i3c_master_get_i3c_addrs(dev);
+	if (ret)
+		return ret;
+
+	/* Do not attach the master device itself. */
+	if (master->this != dev && master->ops->attach_i3c_dev) {
+		ret = master->ops->attach_i3c_dev(dev);
+		if (ret) {
+			i3c_master_put_i3c_addrs(dev);
+			return ret;
+		}
+	}
+
+	list_add_tail(&dev->common.node, &master->bus.devs.i3c);
+
+	return 0;
+}
+
+static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+				       u8 old_dyn_addr)
+{
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+	enum i3c_addr_slot_status status;
+	int ret;
+
+	if (dev->info.dyn_addr != old_dyn_addr) {
+		status = i3c_bus_get_addr_slot_status(&master->bus,
+						      dev->info.dyn_addr);
+		if (status != I3C_ADDR_SLOT_FREE)
+			return -EBUSY;
+		i3c_bus_set_addr_slot_status(&master->bus,
+					     dev->info.dyn_addr,
+					     I3C_ADDR_SLOT_I3C_DEV);
+	}
+
+	if (master->ops->reattach_i3c_dev) {
+		ret = master->ops->reattach_i3c_dev(dev, old_dyn_addr);
+		if (ret) {
+			i3c_master_put_i3c_addrs(dev);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+	/* Do not detach the master device itself. */
+	if (master->this != dev && master->ops->detach_i3c_dev)
+		master->ops->detach_i3c_dev(dev);
+
+	i3c_master_put_i3c_addrs(dev);
+	list_del(&dev->common.node);
+}
+
+static int i3c_master_attach_i2c_dev(struct i3c_master_controller *master,
+				     struct i2c_dev_desc *dev)
+{
+	int ret;
+
+	if (master->ops->attach_i2c_dev) {
+		ret = master->ops->attach_i2c_dev(dev);
+		if (ret)
+			return ret;
+	}
+
+	list_add_tail(&dev->common.node, &master->bus.devs.i2c);
+
+	return 0;
+}
+
+static void i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+	struct i3c_master_controller *master = i2c_dev_get_master(dev);
+
+	list_del(&dev->common.node);
+
+	if (master->ops->detach_i2c_dev)
+		master->ops->detach_i2c_dev(dev);
+}
+
+static void i3c_master_pre_assign_dyn_addr(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+	int ret;
+
+	if (!dev->boardinfo || !dev->boardinfo->init_dyn_addr ||
+	    !dev->boardinfo->static_addr)
+		return;
+
+	ret = i3c_master_setdasa_locked(master, dev->info.static_addr,
+					dev->boardinfo->init_dyn_addr);
+	if (ret)
+		return;
+
+	dev->info.dyn_addr = dev->boardinfo->init_dyn_addr;
+	ret = i3c_master_reattach_i3c_dev(dev, 0);
+	if (ret)
+		goto err_rstdaa;
+
+	ret = i3c_master_retrieve_dev_info(dev);
+	if (ret)
+		goto err_rstdaa;
+
+	return;
+
+err_rstdaa:
+	i3c_master_rstdaa_locked(master, dev->boardinfo->init_dyn_addr);
+}
+
+static void
+i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+{
+	struct i3c_dev_desc *desc;
+	int ret;
+
+	if (!master->init_done)
+		return;
+
+	i3c_bus_for_each_i3cdev(&master->bus, desc) {
+		if (desc->dev || !desc->info.dyn_addr || desc == master->this)
+			continue;
+
+		desc->dev = kzalloc(sizeof(*desc->dev), GFP_KERNEL);
+		if (!desc->dev)
+			continue;
+
+		desc->dev->bus = &master->bus;
+		desc->dev->desc = desc;
+		desc->dev->dev.parent = &master->dev;
+		desc->dev->dev.type = &i3c_device_type;
+		desc->dev->dev.bus = &i3c_bus_type;
+		desc->dev->dev.release = i3c_device_release;
+		dev_set_name(&desc->dev->dev, "%d-%llx", master->bus.id,
+			     desc->info.pid);
+
+		if (desc->boardinfo)
+			desc->dev->dev.of_node = desc->boardinfo->of_node;
+
+		ret = device_register(&desc->dev->dev);
+		if (ret)
+			dev_err(&master->dev,
+				"Failed to add I3C device (err = %d)\n", ret);
+	}
+}
+
+/**
+ * i3c_master_do_daa() - do a DAA (Dynamic Address Assignment)
+ * @master: master doing the DAA
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_do_daa(struct i3c_master_controller *master)
+{
+	int ret;
+
+	i3c_bus_maintenance_lock(&master->bus);
+	ret = master->ops->do_daa(master);
+	i3c_bus_maintenance_unlock(&master->bus);
+
+	if (ret)
+		return ret;
+
+	i3c_bus_normaluse_lock(&master->bus);
+	i3c_master_register_new_i3c_devs(master);
+	i3c_bus_normaluse_unlock(&master->bus);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_do_daa);
+
+/**
+ * i3c_master_set_info() - set master device information
+ * @master: master used to send frames on the bus
+ * @info: I3C device information
+ *
+ * Set master device info. This should be called from
+ * &i3c_master_controller_ops->bus_init().
+ *
+ * Not all &i3c_device_info fields are meaningful for a master device.
+ * Here is a list of fields that should be properly filled:
+ *
+ * - &i3c_device_info->dyn_addr
+ * - &i3c_device_info->bcr
+ * - &i3c_device_info->dcr
+ * - &i3c_device_info->pid
+ * - &i3c_device_info->hdr_cap if %I3C_BCR_HDR_CAP bit is set in
+ *   &i3c_device_info->bcr
+ *
+ * This function must be called with the bus lock held in maintenance mode.
+ *
+ * Return: 0 if @info contains valid information (not every piece of
+ * information can be checked, but we can at least make sure @info->dyn_addr
+ * and @info->bcr are correct), -EINVAL otherwise.
+ */
+int i3c_master_set_info(struct i3c_master_controller *master,
+			const struct i3c_device_info *info)
+{
+	struct i3c_dev_desc *i3cdev;
+	int ret;
+
+	if (!i3c_bus_dev_addr_is_avail(&master->bus, info->dyn_addr))
+		return -EINVAL;
+
+	if (I3C_BCR_DEVICE_ROLE(info->bcr) == I3C_BCR_I3C_MASTER &&
+	    master->secondary)
+		return -EINVAL;
+
+	if (master->this)
+		return -EINVAL;
+
+	i3cdev = i3c_master_alloc_i3c_dev(master, info);
+	if (IS_ERR(i3cdev))
+		return PTR_ERR(i3cdev);
+
+	master->this = i3cdev;
+	master->bus.cur_master = master->this;
+
+	ret = i3c_master_attach_i3c_dev(master, i3cdev);
+	if (ret)
+		goto err_free_dev;
+
+	return 0;
+
+err_free_dev:
+	i3c_master_free_i3c_dev(i3cdev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_set_info);
+
+static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
+{
+	struct i3c_dev_desc *i3cdev, *i3ctmp;
+	struct i2c_dev_desc *i2cdev, *i2ctmp;
+
+	list_for_each_entry_safe(i3cdev, i3ctmp, &master->bus.devs.i3c,
+				 common.node) {
+		i3c_master_detach_i3c_dev(i3cdev);
+
+		if (i3cdev->boardinfo && i3cdev->boardinfo->init_dyn_addr)
+			i3c_bus_set_addr_slot_status(&master->bus,
+					i3cdev->boardinfo->init_dyn_addr,
+					I3C_ADDR_SLOT_FREE);
+
+		i3c_master_free_i3c_dev(i3cdev);
+	}
+
+	list_for_each_entry_safe(i2cdev, i2ctmp, &master->bus.devs.i2c,
+				 common.node) {
+		i3c_master_detach_i2c_dev(i2cdev);
+		i3c_bus_set_addr_slot_status(&master->bus,
+					i2cdev->boardinfo->base.addr,
+					I3C_ADDR_SLOT_FREE);
+		i3c_master_free_i2c_dev(i2cdev);
+	}
+}
+
+/**
+ * i3c_master_bus_init() - initialize an I3C bus
+ * @master: main master initializing the bus
+ *
+ * This function is following all initialisation steps described in the I3C
+ * specification:
+ *
+ * 1. Attach I2C and statically defined I3C devs to the master so that the
+ *    master can fill its internal device table appropriately
+ *
+ * 2. Call &i3c_master_controller_ops->bus_init() method to initialize
+ *    the master controller. That's usually where the bus mode is selected
+ *    (pure bus or mixed fast/slow bus)
+ *
+ * 3. Instruct all devices on the bus to drop their dynamic address. This is
+ *    particularly important when the bus was previously configured by someone
+ *    else (for example the bootloader)
+ *
+ * 4. Disable all slave events.
+ *
+ * 5. Pre-assign dynamic addresses requested by the FW with SETDASA for I3C
+ *    devices that have a static address
+ *
+ * 6. Do a DAA (Dynamic Address Assignment) to assign dynamic addresses to all
+ *    remaining I3C devices
+ *
+ * Once this is done, all I3C and I2C devices should be usable.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+static int i3c_master_bus_init(struct i3c_master_controller *master)
+{
+	enum i3c_addr_slot_status status;
+	struct i2c_dev_boardinfo *i2cboardinfo;
+	struct i3c_dev_boardinfo *i3cboardinfo;
+	struct i3c_dev_desc *i3cdev;
+	struct i2c_dev_desc *i2cdev;
+	int ret;
+
+	/*
+	 * First attach all devices with static definitions provided by the
+	 * FW.
+	 */
+	list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
+		status = i3c_bus_get_addr_slot_status(&master->bus,
+						      i2cboardinfo->base.addr);
+		if (status != I3C_ADDR_SLOT_FREE) {
+			ret = -EBUSY;
+			goto err_detach_devs;
+		}
+
+		i3c_bus_set_addr_slot_status(&master->bus,
+					     i2cboardinfo->base.addr,
+					     I3C_ADDR_SLOT_I2C_DEV);
+
+		i2cdev = i3c_master_alloc_i2c_dev(master, i2cboardinfo);
+		if (IS_ERR(i2cdev)) {
+			ret = PTR_ERR(i2cdev);
+			goto err_detach_devs;
+		}
+
+		ret = i3c_master_attach_i2c_dev(master, i2cdev);
+		if (ret) {
+			i3c_master_free_i2c_dev(i2cdev);
+			goto err_detach_devs;
+		}
+	}
+	list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
+		struct i3c_device_info info = {
+			.static_addr = i3cboardinfo->static_addr,
+		};
+
+		if (i3cboardinfo->init_dyn_addr) {
+			status = i3c_bus_get_addr_slot_status(&master->bus,
+						i3cboardinfo->init_dyn_addr);
+			if (status != I3C_ADDR_SLOT_FREE) {
+				ret = -EBUSY;
+				goto err_detach_devs;
+			}
+		}
+
+		i3cdev = i3c_master_alloc_i3c_dev(master, &info);
+		if (IS_ERR(i3cdev)) {
+			ret = PTR_ERR(i3cdev);
+			goto err_detach_devs;
+		}
+
+		i3cdev->boardinfo = i3cboardinfo;
+
+		ret = i3c_master_attach_i3c_dev(master, i3cdev);
+		if (ret) {
+			i3c_master_free_i3c_dev(i3cdev);
+			goto err_detach_devs;
+		}
+	}
+
+	/*
+	 * Now execute the controller specific ->bus_init() routine, which
+	 * might configure its internal logic to match the bus limitations.
+	 */
+	ret = master->ops->bus_init(master);
+	if (ret)
+		goto err_detach_devs;
+
+	/*
+	 * The master device should have been instantiated in ->bus_init(),
+	 * complain if this was not the case.
+	 */
+	if (!master->this) {
+		dev_err(&master->dev,
+			"master_set_info() was not called in ->bus_init()\n");
+		ret = -EINVAL;
+		goto err_bus_cleanup;
+	}
+
+	/*
+	 * Reset all dynamic address that may have been assigned before
+	 * (assigned by the bootloader for example).
+	 */
+	ret = i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+	if (ret && ret != I3C_ERROR_M2)
+		goto err_bus_cleanup;
+
+	/* Disable all slave events before starting DAA. */
+	ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
+				      I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
+				      I3C_CCC_EVENT_HJ);
+	if (ret && ret != I3C_ERROR_M2)
+		goto err_bus_cleanup;
+
+	/*
+	 * Pre-assign dynamic address and retrieve device information if
+	 * needed.
+	 */
+	i3c_bus_for_each_i3cdev(&master->bus, i3cdev)
+		i3c_master_pre_assign_dyn_addr(i3cdev);
+
+	ret = i3c_master_do_daa(master);
+	if (ret)
+		goto err_rstdaa;
+
+	return 0;
+
+err_rstdaa:
+	i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+
+err_bus_cleanup:
+	if (master->ops->bus_cleanup)
+		master->ops->bus_cleanup(master);
+
+err_detach_devs:
+	i3c_master_detach_free_devs(master);
+
+	return ret;
+}
+
+static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
+{
+	if (master->ops->bus_cleanup)
+		master->ops->bus_cleanup(master);
+
+	i3c_master_detach_free_devs(master);
+}
+
+static struct i3c_dev_desc *
+i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
+{
+	struct i3c_master_controller *master = refdev->common.master;
+	struct i3c_dev_desc *i3cdev;
+
+	i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+		if (i3cdev != refdev && i3cdev->info.pid == refdev->info.pid)
+			return i3cdev;
+	}
+
+	return NULL;
+}
+
+/**
+ * i3c_master_add_i3c_dev_locked() - add an I3C slave to the bus
+ * @master: master used to send frames on the bus
+ * @addr: I3C slave dynamic address assigned to the device
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+				  u8 addr)
+{
+	struct i3c_device_info info = { .dyn_addr = addr };
+	struct i3c_dev_desc *newdev, *olddev;
+	u8 old_dyn_addr = addr, expected_dyn_addr;
+	struct i3c_ibi_setup ibireq = { };
+	bool enable_ibi = false;
+	int ret;
+
+	if (!master)
+		return -EINVAL;
+
+	newdev = i3c_master_alloc_i3c_dev(master, &info);
+	if (IS_ERR(newdev))
+		return PTR_ERR(newdev);
+
+	ret = i3c_master_attach_i3c_dev(master, newdev);
+	if (ret)
+		goto err_free_dev;
+
+	ret = i3c_master_retrieve_dev_info(newdev);
+	if (ret)
+		goto err_detach_dev;
+
+	olddev = i3c_master_search_i3c_dev_duplicate(newdev);
+	if (olddev) {
+		newdev->boardinfo = olddev->boardinfo;
+		newdev->info.static_addr = olddev->info.static_addr;
+		newdev->dev = olddev->dev;
+		if (newdev->dev)
+			newdev->dev->desc = newdev;
+
+		/*
+		 * We need to restore the IBI state too, so let's save the
+		 * IBI information and try to restore them after olddev has
+		 * been detached+released and its IBI has been stopped and
+		 * the associated resources have been freed.
+		 */
+		mutex_lock(&olddev->ibi_lock);
+		if (olddev->ibi) {
+			ibireq.handler = olddev->ibi->handler;
+			ibireq.max_payload_len = olddev->ibi->max_payload_len;
+			ibireq.num_slots = olddev->ibi->num_slots;
+
+			if (olddev->ibi->enabled) {
+				enable_ibi = true;
+				i3c_dev_disable_ibi_locked(olddev);
+			}
+
+			i3c_dev_free_ibi_locked(olddev);
+		}
+		mutex_unlock(&olddev->ibi_lock);
+
+		old_dyn_addr = olddev->info.dyn_addr;
+
+		i3c_master_detach_i3c_dev(olddev);
+		i3c_master_free_i3c_dev(olddev);
+	}
+
+	ret = i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
+	if (ret)
+		goto err_detach_dev;
+
+	/*
+	 * Depending on our previous state, the expected dynamic address might
+	 * differ:
+	 * - if the device already had a dynamic address assigned, let's try to
+	 *   re-apply this one
+	 * - if the device did not have a dynamic address and the firmware
+	 *   requested a specific address, pick this one
+	 * - in any other case, keep the address automatically assigned by the
+	 *   master
+	 */
+	if (old_dyn_addr && old_dyn_addr != newdev->info.dyn_addr)
+		expected_dyn_addr = old_dyn_addr;
+	else if (newdev->boardinfo && newdev->boardinfo->init_dyn_addr)
+		expected_dyn_addr = newdev->boardinfo->init_dyn_addr;
+	else
+		expected_dyn_addr = newdev->info.dyn_addr;
+
+	if (newdev->info.dyn_addr != expected_dyn_addr) {
+		/*
+		 * Try to apply the expected dynamic address. If it fails, keep
+		 * the address assigned by the master.
+		 */
+		ret = i3c_master_setnewda_locked(master,
+						 newdev->info.dyn_addr,
+						 expected_dyn_addr);
+		if (!ret) {
+			old_dyn_addr = newdev->info.dyn_addr;
+			newdev->info.dyn_addr = expected_dyn_addr;
+			i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
+		} else {
+			dev_err(&master->dev,
+				"Failed to assign reserved/old address to device %d%llx",
+				master->bus.id, newdev->info.pid);
+		}
+	}
+
+	/*
+	 * Now is time to try to restore the IBI setup. If we're lucky,
+	 * everything works as before, otherwise, all we can do is complain.
+	 * FIXME: maybe we should add callback to inform the driver that it
+	 * should request the IBI again instead of trying to hide that from
+	 * him.
+	 */
+	if (ibireq.handler) {
+		mutex_lock(&newdev->ibi_lock);
+		ret = i3c_dev_request_ibi_locked(newdev, &ibireq);
+		if (ret) {
+			dev_err(&master->dev,
+				"Failed to request IBI on device %d-%llx",
+				master->bus.id, newdev->info.pid);
+		} else if (enable_ibi) {
+			ret = i3c_dev_enable_ibi_locked(newdev);
+			if (ret)
+				dev_err(&master->dev,
+					"Failed to re-enable IBI on device %d-%llx",
+					master->bus.id, newdev->info.pid);
+		}
+		mutex_unlock(&newdev->ibi_lock);
+	}
+
+	return 0;
+
+err_detach_dev:
+	if (newdev->dev && newdev->dev->desc)
+		newdev->dev->desc = NULL;
+
+	i3c_master_detach_i3c_dev(newdev);
+
+err_free_dev:
+	i3c_master_free_i3c_dev(newdev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_add_i3c_dev_locked);
+
+#define OF_I3C_REG1_IS_I2C_DEV			BIT(31)
+
+static int
+of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
+				struct device_node *node, u32 *reg)
+{
+	struct i2c_dev_boardinfo *boardinfo;
+	struct device *dev = &master->dev;
+	int ret;
+
+	boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+	if (!boardinfo)
+		return -ENOMEM;
+
+	ret = of_i2c_get_board_info(dev, node, &boardinfo->base);
+	if (ret)
+		return ret;
+
+	/* LVR is encoded in reg[2]. */
+	boardinfo->lvr = reg[2];
+
+	if (boardinfo->lvr & I3C_LVR_I2C_FM_MODE)
+		master->bus.scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
+
+	list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
+	of_node_get(node);
+
+	return 0;
+}
+
+static int
+of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
+				struct device_node *node, u32 *reg)
+{
+	struct i3c_dev_boardinfo *boardinfo;
+	struct device *dev = &master->dev;
+	struct i3c_device_info info = { };
+	enum i3c_addr_slot_status addrstatus;
+	u32 init_dyn_addr = 0;
+
+	boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+	if (!boardinfo)
+		return -ENOMEM;
+
+	if (reg[0]) {
+		if (reg[0] > I3C_MAX_ADDR)
+			return -EINVAL;
+
+		addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+							  reg[0]);
+		if (addrstatus != I3C_ADDR_SLOT_FREE)
+			return -EINVAL;
+	}
+
+	boardinfo->static_addr = reg[0];
+
+	if (!of_property_read_u32(node, "assigned-address", &init_dyn_addr)) {
+		if (init_dyn_addr > I3C_MAX_ADDR)
+			return -EINVAL;
+
+		addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+							  init_dyn_addr);
+		if (addrstatus != I3C_ADDR_SLOT_FREE)
+			return -EINVAL;
+	}
+
+	boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
+
+	if ((info.pid & GENMASK_ULL(63, 48)) ||
+	    I3C_PID_RND_LOWER_32BITS(info.pid))
+		return -EINVAL;
+
+	boardinfo->init_dyn_addr = init_dyn_addr;
+	boardinfo->of_node = of_node_get(node);
+	list_add_tail(&boardinfo->node, &master->boardinfo.i3c);
+
+	return 0;
+}
+
+static int of_i3c_master_add_dev(struct i3c_master_controller *master,
+				 struct device_node *node)
+{
+	u32 reg[3];
+	int ret;
+
+	if (!master || !node)
+		return -EINVAL;
+
+	ret = of_property_read_u32_array(node, "reg", reg, ARRAY_SIZE(reg));
+	if (ret)
+		return ret;
+
+	/*
+	 * The manufacturer ID can't be 0. If reg[1] == 0 that means we're
+	 * dealing with an I2C device.
+	 */
+	if (!reg[1])
+		ret = of_i3c_master_add_i2c_boardinfo(master, node, reg);
+	else
+		ret = of_i3c_master_add_i3c_boardinfo(master, node, reg);
+
+	return ret;
+}
+
+static int of_populate_i3c_bus(struct i3c_master_controller *master)
+{
+	struct device *dev = &master->dev;
+	struct device_node *i3cbus_np = dev->of_node;
+	struct device_node *node;
+	int ret;
+	u32 val;
+
+	if (!i3cbus_np)
+		return 0;
+
+	for_each_available_child_of_node(i3cbus_np, node) {
+		ret = of_i3c_master_add_dev(master, node);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * The user might want to limit I2C and I3C speed in case some devices
+	 * on the bus are not supporting typical rates, or if the bus topology
+	 * prevents it from using max possible rate.
+	 */
+	if (!of_property_read_u32(i3cbus_np, "i2c-scl-hz", &val))
+		master->bus.scl_rate.i2c = val;
+
+	if (!of_property_read_u32(i3cbus_np, "i3c-scl-hz", &val))
+		master->bus.scl_rate.i3c = val;
+
+	return 0;
+}
+
+static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
+				       struct i2c_msg *xfers, int nxfers)
+{
+	struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+	struct i2c_dev_desc *dev;
+	int i, ret;
+	u16 addr;
+
+	if (!xfers || !master || nxfers <= 0)
+		return -EINVAL;
+
+	if (!master->ops->i2c_xfers)
+		return -ENOTSUPP;
+
+	/* Doing transfers to different devices is not supported. */
+	addr = xfers[0].addr;
+	for (i = 1; i < nxfers; i++) {
+		if (addr != xfers[i].addr)
+			return -ENOTSUPP;
+	}
+
+	i3c_bus_normaluse_lock(&master->bus);
+	dev = i3c_master_find_i2c_dev_by_addr(master, addr);
+	if (!dev)
+		ret = -ENOENT;
+	else
+		ret = master->ops->i2c_xfers(dev, xfers, nxfers);
+	i3c_bus_normaluse_unlock(&master->bus);
+
+	return ret ? ret : nxfers;
+}
+
+static u32 i3c_master_i2c_functionalities(struct i2c_adapter *adap)
+{
+	struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+
+	return master->ops->i2c_funcs(master);
+}
+
+static const struct i2c_algorithm i3c_master_i2c_algo = {
+	.master_xfer = i3c_master_i2c_adapter_xfer,
+	.functionality = i3c_master_i2c_functionalities,
+};
+
+static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
+{
+	struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
+	struct i2c_dev_desc *i2cdev;
+	int ret;
+
+	adap->dev.parent = master->dev.parent;
+	adap->owner = master->dev.parent->driver->owner;
+	adap->algo = &i3c_master_i2c_algo;
+	strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+
+	/* FIXME: Should we allow i3c masters to override these values? */
+	adap->timeout = 1000;
+	adap->retries = 3;
+
+	ret = i2c_add_adapter(adap);
+	if (ret)
+		return ret;
+
+	/*
+	 * We silently ignore failures here. The bus should keep working
+	 * correctly even if one or more i2c devices are not registered.
+	 */
+	i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
+		i2cdev->dev = i2c_new_device(adap, &i2cdev->boardinfo->base);
+
+	return 0;
+}
+
+static void i3c_master_i2c_adapter_cleanup(struct i3c_master_controller *master)
+{
+	struct i2c_dev_desc *i2cdev;
+
+	i2c_del_adapter(&master->i2c);
+
+	i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
+		i2cdev->dev = NULL;
+}
+
+static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
+{
+	struct i3c_dev_desc *i3cdev;
+
+	i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+		if (!i3cdev->dev)
+			continue;
+
+		i3cdev->dev->desc = NULL;
+		if (device_is_registered(&i3cdev->dev->dev))
+			device_unregister(&i3cdev->dev->dev);
+		else
+			put_device(&i3cdev->dev->dev);
+		i3cdev->dev = NULL;
+	}
+}
+
+/**
+ * i3c_master_queue_ibi() - Queue an IBI
+ * @dev: the device this IBI is coming from
+ * @slot: the IBI slot used to store the payload
+ *
+ * Queue an IBI to the controller workqueue. The IBI handler attached to
+ * the dev will be called from a workqueue context.
+ */
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
+{
+	atomic_inc(&dev->ibi->pending_ibis);
+	queue_work(dev->common.master->wq, &slot->work);
+}
+EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
+
+static void i3c_master_handle_ibi(struct work_struct *work)
+{
+	struct i3c_ibi_slot *slot = container_of(work, struct i3c_ibi_slot,
+						 work);
+	struct i3c_dev_desc *dev = slot->dev;
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+	struct i3c_ibi_payload payload;
+
+	payload.data = slot->data;
+	payload.len = slot->len;
+
+	if (dev->dev)
+		dev->ibi->handler(dev->dev, &payload);
+
+	master->ops->recycle_ibi_slot(dev, slot);
+	if (atomic_dec_and_test(&dev->ibi->pending_ibis))
+		complete(&dev->ibi->all_ibis_handled);
+}
+
+static void i3c_master_init_ibi_slot(struct i3c_dev_desc *dev,
+				     struct i3c_ibi_slot *slot)
+{
+	slot->dev = dev;
+	INIT_WORK(&slot->work, i3c_master_handle_ibi);
+}
+
+struct i3c_generic_ibi_slot {
+	struct list_head node;
+	struct i3c_ibi_slot base;
+};
+
+struct i3c_generic_ibi_pool {
+	spinlock_t lock;
+	unsigned int num_slots;
+	struct i3c_generic_ibi_slot *slots;
+	void *payload_buf;
+	struct list_head free_slots;
+	struct list_head pending;
+};
+
+/**
+ * i3c_generic_ibi_free_pool() - Free a generic IBI pool
+ * @pool: the IBI pool to free
+ *
+ * Free all IBI slots allated by a generic IBI pool.
+ */
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool)
+{
+	struct i3c_generic_ibi_slot *slot;
+	unsigned int nslots = 0;
+
+	while (!list_empty(&pool->free_slots)) {
+		slot = list_first_entry(&pool->free_slots,
+					struct i3c_generic_ibi_slot, node);
+		list_del(&slot->node);
+		nslots++;
+	}
+
+	/*
+	 * If the number of freed slots is not equal to the number of allocated
+	 * slots we have a leak somewhere.
+	 */
+	WARN_ON(nslots != pool->num_slots);
+
+	kfree(pool->payload_buf);
+	kfree(pool->slots);
+	kfree(pool);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_free_pool);
+
+/**
+ * i3c_generic_ibi_alloc_pool() - Create a generic IBI pool
+ * @dev: the device this pool will be used for
+ * @req: IBI setup request describing what the device driver expects
+ *
+ * Create a generic IBI pool based on the information provided in @req.
+ *
+ * Return: a valid IBI pool in case of success, an ERR_PTR() otherwise.
+ */
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+			   const struct i3c_ibi_setup *req)
+{
+	struct i3c_generic_ibi_pool *pool;
+	struct i3c_generic_ibi_slot *slot;
+	unsigned int i;
+	int ret;
+
+	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&pool->lock);
+	INIT_LIST_HEAD(&pool->free_slots);
+	INIT_LIST_HEAD(&pool->pending);
+
+	pool->slots = kcalloc(req->num_slots, sizeof(*slot), GFP_KERNEL);
+	if (!pool->slots) {
+		ret = -ENOMEM;
+		goto err_free_pool;
+	}
+
+	if (req->max_payload_len) {
+		pool->payload_buf = kcalloc(req->num_slots,
+					    req->max_payload_len, GFP_KERNEL);
+		if (!pool->payload_buf) {
+			ret = -ENOMEM;
+			goto err_free_pool;
+		}
+	}
+
+	for (i = 0; i < req->num_slots; i++) {
+		slot = &pool->slots[i];
+		i3c_master_init_ibi_slot(dev, &slot->base);
+
+		if (req->max_payload_len)
+			slot->base.data = pool->payload_buf +
+					  (i * req->max_payload_len);
+
+		list_add_tail(&slot->node, &pool->free_slots);
+		pool->num_slots++;
+	}
+
+	return pool;
+
+err_free_pool:
+	i3c_generic_ibi_free_pool(pool);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_alloc_pool);
+
+/**
+ * i3c_generic_ibi_get_free_slot() - Get a free slot from a generic IBI pool
+ * @pool: the pool to query an IBI slot on
+ *
+ * Search for a free slot in a generic IBI pool.
+ * The slot should be returned to the pool using i3c_generic_ibi_recycle_slot()
+ * when it's no longer needed.
+ *
+ * Return: a pointer to a free slot, or NULL if there's no free slot available.
+ */
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool)
+{
+	struct i3c_generic_ibi_slot *slot;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pool->lock, flags);
+	slot = list_first_entry_or_null(&pool->free_slots,
+					struct i3c_generic_ibi_slot, node);
+	if (slot)
+		list_del(&slot->node);
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	return slot ? &slot->base : NULL;
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_get_free_slot);
+
+/**
+ * i3c_generic_ibi_recycle_slot() - Return a slot to a generic IBI pool
+ * @pool: the pool to return the IBI slot to
+ * @s: IBI slot to recycle
+ *
+ * Add an IBI slot back to its generic IBI pool. Should be called from the
+ * master driver struct_master_controller_ops->recycle_ibi() method.
+ */
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+				  struct i3c_ibi_slot *s)
+{
+	struct i3c_generic_ibi_slot *slot;
+	unsigned long flags;
+
+	if (!s)
+		return;
+
+	slot = container_of(s, struct i3c_generic_ibi_slot, base);
+	spin_lock_irqsave(&pool->lock, flags);
+	list_add_tail(&slot->node, &pool->free_slots);
+	spin_unlock_irqrestore(&pool->lock, flags);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot);
+
+static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
+{
+	if (!ops || !ops->bus_init || !ops->priv_xfers ||
+	    !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers ||
+	    !ops->i2c_funcs)
+		return -EINVAL;
+
+	if (ops->request_ibi &&
+	    (!ops->enable_ibi || !ops->disable_ibi || !ops->free_ibi ||
+	     !ops->recycle_ibi_slot))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * i3c_master_register() - register an I3C master
+ * @master: master used to send frames on the bus
+ * @parent: the parent device (the one that provides this I3C master
+ *	    controller)
+ * @ops: the master controller operations
+ * @secondary: true if you are registering a secondary master. Will return
+ *	       -ENOTSUPP if set to true since secondary masters are not yet
+ *	       supported
+ *
+ * This function takes care of everything for you:
+ *
+ * - creates and initializes the I3C bus
+ * - populates the bus with static I2C devs if @parent->of_node is not
+ *   NULL
+ * - registers all I3C devices added by the controller during bus
+ *   initialization
+ * - registers the I2C adapter and all I2C devices
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_register(struct i3c_master_controller *master,
+			struct device *parent,
+			const struct i3c_master_controller_ops *ops,
+			bool secondary)
+{
+	struct i3c_bus *i3cbus = i3c_master_get_bus(master);
+	enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
+	struct i2c_dev_boardinfo *i2cbi;
+	int ret;
+
+	/* We do not support secondary masters yet. */
+	if (secondary)
+		return -ENOTSUPP;
+
+	ret = i3c_master_check_ops(ops);
+	if (ret)
+		return ret;
+
+	master->dev.parent = parent;
+	master->dev.of_node = of_node_get(parent->of_node);
+	master->dev.bus = &i3c_bus_type;
+	master->dev.type = &i3c_masterdev_type;
+	master->dev.release = i3c_masterdev_release;
+	master->ops = ops;
+	master->secondary = secondary;
+	INIT_LIST_HEAD(&master->boardinfo.i2c);
+	INIT_LIST_HEAD(&master->boardinfo.i3c);
+
+	ret = i3c_bus_init(i3cbus);
+	if (ret)
+		return ret;
+
+	device_initialize(&master->dev);
+	dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
+
+	ret = of_populate_i3c_bus(master);
+	if (ret)
+		goto err_put_dev;
+
+	list_for_each_entry(i2cbi, &master->boardinfo.i2c, node) {
+		switch (i2cbi->lvr & I3C_LVR_I2C_INDEX_MASK) {
+		case I3C_LVR_I2C_INDEX(0):
+			if (mode < I3C_BUS_MODE_MIXED_FAST)
+				mode = I3C_BUS_MODE_MIXED_FAST;
+			break;
+		case I3C_LVR_I2C_INDEX(1):
+		case I3C_LVR_I2C_INDEX(2):
+			if (mode < I3C_BUS_MODE_MIXED_SLOW)
+				mode = I3C_BUS_MODE_MIXED_SLOW;
+			break;
+		default:
+			ret = -EINVAL;
+			goto err_put_dev;
+		}
+	}
+
+	ret = i3c_bus_set_mode(i3cbus, mode);
+	if (ret)
+		goto err_put_dev;
+
+	master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent));
+	if (!master->wq) {
+		ret = -ENOMEM;
+		goto err_put_dev;
+	}
+
+	ret = i3c_master_bus_init(master);
+	if (ret)
+		goto err_put_dev;
+
+	ret = device_add(&master->dev);
+	if (ret)
+		goto err_cleanup_bus;
+
+	/*
+	 * Expose our I3C bus as an I2C adapter so that I2C devices are exposed
+	 * through the I2C subsystem.
+	 */
+	ret = i3c_master_i2c_adapter_init(master);
+	if (ret)
+		goto err_del_dev;
+
+	/*
+	 * We're done initializing the bus and the controller, we can now
+	 * register I3C devices dicovered during the initial DAA.
+	 */
+	master->init_done = true;
+	i3c_bus_normaluse_lock(&master->bus);
+	i3c_master_register_new_i3c_devs(master);
+	i3c_bus_normaluse_unlock(&master->bus);
+
+	return 0;
+
+err_del_dev:
+	device_del(&master->dev);
+
+err_cleanup_bus:
+	i3c_master_bus_cleanup(master);
+
+err_put_dev:
+	put_device(&master->dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_register);
+
+/**
+ * i3c_master_unregister() - unregister an I3C master
+ * @master: master used to send frames on the bus
+ *
+ * Basically undo everything done in i3c_master_register().
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_unregister(struct i3c_master_controller *master)
+{
+	i3c_master_i2c_adapter_cleanup(master);
+	i3c_master_unregister_i3c_devs(master);
+	i3c_master_bus_cleanup(master);
+	device_unregister(&master->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_unregister);
+
+int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+				 struct i3c_priv_xfer *xfers,
+				 int nxfers)
+{
+	struct i3c_master_controller *master;
+
+	if (!dev)
+		return -ENOENT;
+
+	master = i3c_dev_get_master(dev);
+	if (!master || !xfers)
+		return -EINVAL;
+
+	if (!master->ops->priv_xfers)
+		return -ENOTSUPP;
+
+	return master->ops->priv_xfers(dev, xfers, nxfers);
+}
+
+int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *master;
+	int ret;
+
+	if (!dev->ibi)
+		return -EINVAL;
+
+	master = i3c_dev_get_master(dev);
+	ret = master->ops->disable_ibi(dev);
+	if (ret)
+		return ret;
+
+	reinit_completion(&dev->ibi->all_ibis_handled);
+	if (atomic_read(&dev->ibi->pending_ibis))
+		wait_for_completion(&dev->ibi->all_ibis_handled);
+
+	dev->ibi->enabled = false;
+
+	return 0;
+}
+
+int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+	int ret;
+
+	if (!dev->ibi)
+		return -EINVAL;
+
+	ret = master->ops->enable_ibi(dev);
+	if (!ret)
+		dev->ibi->enabled = true;
+
+	return ret;
+}
+
+int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+			       const struct i3c_ibi_setup *req)
+{
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+	struct i3c_device_ibi_info *ibi;
+	int ret;
+
+	if (!master->ops->request_ibi)
+		return -ENOTSUPP;
+
+	if (dev->ibi)
+		return -EBUSY;
+
+	ibi = kzalloc(sizeof(*ibi), GFP_KERNEL);
+	if (!ibi)
+		return -ENOMEM;
+
+	atomic_set(&ibi->pending_ibis, 0);
+	init_completion(&ibi->all_ibis_handled);
+	ibi->handler = req->handler;
+	ibi->max_payload_len = req->max_payload_len;
+	ibi->num_slots = req->num_slots;
+
+	dev->ibi = ibi;
+	ret = master->ops->request_ibi(dev, req);
+	if (ret) {
+		kfree(ibi);
+		dev->ibi = NULL;
+	}
+
+	return ret;
+}
+
+void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+	if (!dev->ibi)
+		return;
+
+	if (WARN_ON(dev->ibi->enabled))
+		WARN_ON(i3c_dev_disable_ibi_locked(dev));
+
+	master->ops->free_ibi(dev);
+	kfree(dev->ibi);
+	dev->ibi = NULL;
+}
+
+static int __init i3c_init(void)
+{
+	return bus_register(&i3c_bus_type);
+}
+subsys_initcall(i3c_init);
+
+static void __exit i3c_exit(void)
+{
+	idr_destroy(&i3c_bus_idr);
+	bus_unregister(&i3c_bus_type);
+}
+module_exit(i3c_exit);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("I3C core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
new file mode 100644
index 0000000..83f8bc0
--- /dev/null
+++ b/drivers/i3c/master/Kconfig
@@ -0,0 +1,10 @@
+config I3C_MASTER_QCOM_GENI
+	tristate "Qualcomm Technologies Inc.'s GENI based I3C controller"
+	depends on ARCH_QCOM
+	depends on I3C
+	help
+	  If you say yes to this option, support will be included for the
+	  built-in I3C interface on the Qualcomm Technologies Inc SoCs.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i3c-master-qcom-geni.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
new file mode 100644
index 0000000..cb605d0
--- /dev/null
+++ b/drivers/i3c/master/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_I3C_MASTER_QCOM_GENI)	+= i3c-master-qcom-geni.o
diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c
new file mode 100644
index 0000000..ca3e046
--- /dev/null
+++ b/drivers/i3c/master/i3c-master-qcom-geni.c
@@ -0,0 +1,1348 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/ipc_logging.h>
+
+#define SE_I3C_SCL_HIGH			0x268
+#define SE_I3C_TX_TRANS_LEN		0x26C
+#define SE_I3C_RX_TRANS_LEN		0x270
+#define SE_I3C_DELAY_COUNTER		0x274
+#define SE_I2C_SCL_COUNTERS		0x278
+#define SE_I3C_SCL_CYCLE		0x27C
+#define SE_GENI_HW_IRQ_EN		0x920
+#define SE_GENI_HW_IRQ_IGNORE_ON_ACTIVE	0x924
+#define SE_GENI_HW_IRQ_CMD_PARAM_0	0x930
+
+/* SE_GENI_M_CLK_CFG field shifts */
+#define CLK_DEV_VALUE_SHFT	4
+#define SER_CLK_EN_SHFT		0
+
+/* SE_GENI_HW_IRQ_CMD_PARAM_0 field shifts */
+#define M_IBI_IRQ_PARAM_7E_SHFT		0
+#define M_IBI_IRQ_PARAM_STOP_STALL_SHFT	1
+
+/* SE_I2C_SCL_COUNTERS field shifts */
+#define I2C_SCL_HIGH_COUNTER_SHFT	20
+#define I2C_SCL_LOW_COUNTER_SHFT	10
+
+#define	SE_I3C_ERR  (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\
+	M_CMD_ABORT_EN | M_GP_IRQ_0_EN | M_GP_IRQ_1_EN | M_GP_IRQ_2_EN | \
+	M_GP_IRQ_3_EN | M_GP_IRQ_4_EN)
+
+/* M_CMD OP codes for I2C/I3C */
+#define I3C_READ_IBI_HW			0
+#define I2C_WRITE			1
+#define I2C_READ			2
+#define I2C_WRITE_READ			3
+#define I2C_ADDR_ONLY			4
+#define I3C_INBAND_RESET		5
+#define I2C_BUS_CLEAR			6
+#define I2C_STOP_ON_BUS			7
+#define I3C_HDR_DDR_EXIT		8
+#define I3C_PRIVATE_WRITE		9
+#define I3C_PRIVATE_READ		10
+#define I3C_HDR_DDR_WRITE		11
+#define I3C_HDR_DDR_READ		12
+#define I3C_DIRECT_CCC_ADDR_ONLY	13
+#define I3C_BCAST_CCC_ADDR_ONLY		14
+#define I3C_READ_IBI			15
+#define I3C_BCAST_CCC_WRITE		16
+#define I3C_DIRECT_CCC_WRITE		17
+#define I3C_DIRECT_CCC_READ		18
+/* M_CMD params for I3C */
+#define PRE_CMD_DELAY		BIT(0)
+#define TIMESTAMP_BEFORE	BIT(1)
+#define STOP_STRETCH		BIT(2)
+#define TIMESTAMP_AFTER		BIT(3)
+#define POST_COMMAND_DELAY	BIT(4)
+#define IGNORE_ADD_NACK		BIT(6)
+#define READ_FINISHED_WITH_ACK	BIT(7)
+#define CONTINUOUS_MODE_DAA	BIT(8)
+#define SLV_ADDR_MSK		GENMASK(15, 9)
+#define SLV_ADDR_SHFT		9
+#define CCC_HDR_CMD_MSK		GENMASK(23, 16)
+#define CCC_HDR_CMD_SHFT	16
+#define IBI_NACK_TBL_CTRL	BIT(24)
+#define USE_7E			BIT(25)
+#define BYPASS_ADDR_PHASE	BIT(26)
+
+enum geni_i3c_err_code {
+	RD_TERM,
+	NACK,
+	CRC_ERR,
+	BUS_PROTO,
+	NACK_7E,
+	NACK_IBI,
+	GENI_OVERRUN,
+	GENI_ILLEGAL_CMD,
+	GENI_ABORT_DONE,
+	GENI_TIMEOUT,
+};
+
+#define DM_I3C_CB_ERR   ((BIT(NACK) | BIT(BUS_PROTO) | BIT(NACK_7E)) << 5)
+
+#define I3C_AUTO_SUSPEND_DELAY	250
+#define KHZ(freq)		(1000 * freq)
+#define PACKING_BYTES_PW	4
+#define XFER_TIMEOUT		HZ
+#define DFS_INDEX_MAX		7
+#define I3C_CORE2X_VOTE		(960)
+#define DEFAULT_BUS_WIDTH	(4)
+#define DEFAULT_SE_CLK		(19200000)
+
+#define I3C_DDR_READ_CMD BIT(7)
+#define I3C_ADDR_MASK	0x7f
+
+enum i3c_trans_dir {
+	WRITE_TRANSACTION = 0,
+	READ_TRANSACTION = 1
+};
+
+struct geni_se {
+	void __iomem *base;
+	struct device *dev;
+	struct se_geni_rsc i3c_rsc;
+};
+
+struct geni_i3c_dev {
+	struct geni_se se;
+	unsigned int tx_wm;
+	int irq;
+	int err;
+	struct i3c_master_controller ctrlr;
+	void *ipcl;
+	struct completion done;
+	struct mutex lock;
+	spinlock_t spinlock;
+	u32 clk_src_freq;
+	u32 dfs_idx;
+	u8 *cur_buf;
+	enum i3c_trans_dir cur_rnw;
+	int cur_len;
+	int cur_idx;
+	unsigned long newaddrslots[(I3C_ADDR_MASK + 1) / BITS_PER_LONG];
+	const struct geni_i3c_clk_fld *clk_fld;
+};
+
+struct geni_i3c_i2c_dev_data {
+	u32 dummy;  /* placeholder for now, later will hold IBI information */
+};
+
+struct i3c_xfer_params {
+	enum se_xfer_mode mode;
+	u32 m_cmd;
+	u32 m_param;
+};
+
+struct geni_i3c_err_log {
+	int err;
+	const char *msg;
+};
+
+static struct geni_i3c_err_log gi3c_log[] = {
+	[RD_TERM] = { -EINVAL, "I3C slave early read termination" },
+	[NACK] = { -ENOTCONN, "NACK: slave unresponsive, check power/reset" },
+	[CRC_ERR] = { -EINVAL, "CRC or parity error" },
+	[BUS_PROTO] = { -EPROTO, "Bus proto err, noisy/unexpected start/stop" },
+	[NACK_7E] = { -EBUSY, "NACK on 7E, unexpected protocol error" },
+	[NACK_IBI] = { -EINVAL, "NACK on IBI" },
+	[GENI_OVERRUN] = { -EIO, "Cmd overrun, check GENI cmd-state machine" },
+	[GENI_ILLEGAL_CMD] = { -EILSEQ,
+				"Illegal cmd, check GENI cmd-state machine" },
+	[GENI_ABORT_DONE] = { -ETIMEDOUT, "Abort after timeout successful" },
+	[GENI_TIMEOUT] = { -ETIMEDOUT, "I3C transaction timed out" },
+};
+
+struct geni_i3c_clk_fld {
+	u32 clk_freq_out;
+	u32 clk_src_freq;
+	u8  clk_div;
+	u8  i2c_t_high_cnt;
+	u8  i2c_t_low_cnt;
+	u8  i2c_t_cycle_cnt;
+	u8  i3c_t_high_cnt;
+	u8  i3c_t_cycle_cnt;
+};
+
+static struct geni_i3c_dev*
+to_geni_i3c_master(struct i3c_master_controller *master)
+{
+	return container_of(master, struct geni_i3c_dev, ctrlr);
+}
+
+/*
+ * Hardware uses the underlying formula to calculate time periods of
+ * SCL clock cycle. Firmware uses some additional cycles excluded from the
+ * below formula and it is confirmed that the time periods are within
+ * specification limits.
+ *
+ * time of high period of I2C SCL:
+ *         i2c_t_high = (i2c_t_high_cnt * clk_div) / source_clock
+ * time of low period of I2C SCL:
+ *         i2c_t_low = (i2c_t_low_cnt * clk_div) / source_clock
+ * time of full period of I2C SCL:
+ *         i2c_t_cycle = (i2c_t_cycle_cnt * clk_div) / source_clock
+ * time of high period of I3C SCL:
+ *         i3c_t_high = (i3c_t_high_cnt * clk_div) / source_clock
+ * time of full period of I3C SCL:
+ *         i3c_t_cycle = (i3c_t_cycle_cnt * clk_div) / source_clock
+ * clk_freq_out = t / t_cycle
+ */
+static const struct geni_i3c_clk_fld geni_i3c_clk_map[] = {
+	{ KHZ(100),    19200, 7, 10, 11, 26, 0, 0 },
+	{ KHZ(400),    19200, 2,  5, 12, 24, 0, 0 },
+	{ KHZ(1000),   19200, 1,  3,  9, 18, 0, 0 },
+	{ KHZ(12500), 100000, 1, 60, 140, 250, 8, 16 },
+};
+
+static int geni_i3c_clk_map_idx(struct geni_i3c_dev *gi3c)
+{
+	int i;
+	struct i3c_master_controller *m = &gi3c->ctrlr;
+	const struct geni_i3c_clk_fld *itr = geni_i3c_clk_map;
+	struct i3c_bus *bus = i3c_master_get_bus(m);
+
+	for (i = 0; i < ARRAY_SIZE(geni_i3c_clk_map); i++, itr++) {
+		if ((!bus ||
+			 itr->clk_freq_out == bus->scl_rate.i3c) &&
+			 KHZ(itr->clk_src_freq) == gi3c->clk_src_freq) {
+			gi3c->clk_fld = itr;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void set_new_addr_slot(unsigned long *addrslot, u8 addr)
+{
+	unsigned long *ptr;
+
+	if (addr > I3C_ADDR_MASK)
+		return;
+
+	ptr = addrslot + (addr / BITS_PER_LONG);
+	*ptr |= 1 << (addr % BITS_PER_LONG);
+}
+
+static void clear_new_addr_slot(unsigned long *addrslot, u8 addr)
+{
+	unsigned long *ptr;
+
+	if (addr > I3C_ADDR_MASK)
+		return;
+
+	ptr = addrslot + (addr / BITS_PER_LONG);
+	*ptr &= ~(1 << (addr % BITS_PER_LONG));
+}
+
+static bool is_new_addr_slot_set(unsigned long *addrslot, u8 addr)
+{
+	unsigned long *ptr;
+
+	if (addr > I3C_ADDR_MASK)
+		return false;
+
+	ptr = addrslot + (addr / BITS_PER_LONG);
+	return ((*ptr & (1 << (addr % BITS_PER_LONG))) != 0);
+}
+
+static void qcom_geni_i3c_conf(struct geni_i3c_dev *gi3c)
+{
+	const struct geni_i3c_clk_fld *itr = gi3c->clk_fld;
+	u32 val;
+	unsigned long freq;
+	int ret = 0;
+
+	if (gi3c->dfs_idx > DFS_INDEX_MAX)
+		ret = geni_se_clk_freq_match(&gi3c->se.i3c_rsc,
+				KHZ(itr->clk_src_freq),
+				&gi3c->dfs_idx, &freq, false);
+	if (ret)
+		gi3c->dfs_idx = 0;
+
+	writel_relaxed(gi3c->dfs_idx, gi3c->se.base + SE_GENI_CLK_SEL);
+
+	val = itr->clk_div << CLK_DEV_VALUE_SHFT;
+	val |= 1 << SER_CLK_EN_SHFT;
+	writel_relaxed(val, gi3c->se.base + GENI_SER_M_CLK_CFG);
+
+	val = itr->i2c_t_high_cnt << I2C_SCL_HIGH_COUNTER_SHFT;
+	val |= itr->i2c_t_low_cnt << I2C_SCL_LOW_COUNTER_SHFT;
+	val |= itr->i2c_t_cycle_cnt;
+	writel_relaxed(val, gi3c->se.base + SE_I2C_SCL_COUNTERS);
+
+	writel_relaxed(itr->i3c_t_cycle_cnt, gi3c->se.base + SE_I3C_SCL_CYCLE);
+	writel_relaxed(itr->i3c_t_high_cnt, gi3c->se.base + SE_I3C_SCL_HIGH);
+
+	writel_relaxed(1, gi3c->se.base + SE_GENI_HW_IRQ_IGNORE_ON_ACTIVE);
+
+	val = 1 << M_IBI_IRQ_PARAM_STOP_STALL_SHFT;
+	val |= 1 << M_IBI_IRQ_PARAM_7E_SHFT;
+	writel_relaxed(val, gi3c->se.base + SE_GENI_HW_IRQ_CMD_PARAM_0);
+
+	writel_relaxed(1, gi3c->se.base + SE_GENI_HW_IRQ_EN);
+}
+
+static void geni_i3c_err(struct geni_i3c_dev *gi3c, int err)
+{
+	if (gi3c->cur_rnw == WRITE_TRANSACTION)
+		dev_dbg(gi3c->se.dev, "len:%d, write\n", gi3c->cur_len);
+	else
+		dev_dbg(gi3c->se.dev, "len:%d, read\n", gi3c->cur_len);
+
+	dev_dbg(gi3c->se.dev, "%s\n", gi3c_log[err].msg);
+	gi3c->err = gi3c_log[err].err;
+}
+
+static irqreturn_t geni_i3c_irq(int irq, void *dev)
+{
+	struct geni_i3c_dev *gi3c = dev;
+	int j;
+	u32 m_stat, m_stat_mask, rx_st;
+	u32 dm_tx_st, dm_rx_st, dma;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gi3c->spinlock, flags);
+
+	m_stat = readl_relaxed(gi3c->se.base + SE_GENI_M_IRQ_STATUS);
+	m_stat_mask = readl_relaxed(gi3c->se.base + SE_GENI_M_IRQ_EN);
+	rx_st = readl_relaxed(gi3c->se.base + SE_GENI_RX_FIFO_STATUS);
+	dm_tx_st = readl_relaxed(gi3c->se.base + SE_DMA_TX_IRQ_STAT);
+	dm_rx_st = readl_relaxed(gi3c->se.base + SE_DMA_RX_IRQ_STAT);
+	dma = readl_relaxed(gi3c->se.base + SE_GENI_DMA_MODE_EN);
+
+	if ((m_stat   & SE_I3C_ERR) ||
+		(dm_rx_st & DM_I3C_CB_ERR)) {
+		if (m_stat & M_GP_IRQ_0_EN)
+			geni_i3c_err(gi3c, RD_TERM);
+		if (m_stat & M_GP_IRQ_1_EN)
+			geni_i3c_err(gi3c, NACK);
+		if (m_stat & M_GP_IRQ_2_EN)
+			geni_i3c_err(gi3c, CRC_ERR);
+		if (m_stat & M_GP_IRQ_3_EN)
+			geni_i3c_err(gi3c, BUS_PROTO);
+		if (m_stat & M_GP_IRQ_4_EN)
+			geni_i3c_err(gi3c, NACK_7E);
+		if (m_stat & M_CMD_OVERRUN_EN)
+			geni_i3c_err(gi3c, GENI_OVERRUN);
+		if (m_stat & M_ILLEGAL_CMD_EN)
+			geni_i3c_err(gi3c, GENI_ILLEGAL_CMD);
+		if (m_stat & M_CMD_ABORT_EN)
+			geni_i3c_err(gi3c, GENI_ABORT_DONE);
+
+		/* Disable the TX Watermark interrupt to stop TX */
+		if (!dma)
+			writel_relaxed(0, gi3c->se.base +
+				SE_GENI_TX_WATERMARK_REG);
+		goto irqret;
+	}
+
+	if (dma) {
+		dev_dbg(gi3c->se.dev, "i3c dma tx:0x%x, dma rx:0x%x\n",
+			dm_tx_st, dm_rx_st);
+		goto irqret;
+	}
+
+	if ((m_stat & (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN)) &&
+		(gi3c->cur_rnw == READ_TRANSACTION) &&
+		gi3c->cur_buf) {
+		u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
+
+		for (j = 0; j < rxcnt; j++) {
+			u32 val;
+			int p = 0;
+
+			val = readl_relaxed(gi3c->se.base + SE_GENI_RX_FIFOn);
+			while (gi3c->cur_idx < gi3c->cur_len &&
+				 p < sizeof(val)) {
+				gi3c->cur_buf[gi3c->cur_idx++] = val & 0xff;
+				val >>= 8;
+				p++;
+			}
+			if (gi3c->cur_idx == gi3c->cur_len)
+				break;
+		}
+	} else if ((m_stat & M_TX_FIFO_WATERMARK_EN) &&
+		(gi3c->cur_rnw == WRITE_TRANSACTION) &&
+		(gi3c->cur_buf)) {
+		for (j = 0; j < gi3c->tx_wm; j++) {
+			u32 temp;
+			u32 val = 0;
+			int p = 0;
+
+			while (gi3c->cur_idx < gi3c->cur_len &&
+					p < sizeof(val)) {
+				temp = gi3c->cur_buf[gi3c->cur_idx++];
+				val |= temp << (p * 8);
+				p++;
+			}
+			writel_relaxed(val, gi3c->se.base + SE_GENI_TX_FIFOn);
+			if (gi3c->cur_idx == gi3c->cur_len) {
+				writel_relaxed(0, gi3c->se.base +
+					SE_GENI_TX_WATERMARK_REG);
+				break;
+			}
+		}
+	}
+irqret:
+	if (m_stat)
+		writel_relaxed(m_stat, gi3c->se.base + SE_GENI_M_IRQ_CLEAR);
+
+	if (dma) {
+		if (dm_tx_st)
+			writel_relaxed(dm_tx_st,
+				gi3c->se.base + SE_DMA_TX_IRQ_CLR);
+		if (dm_rx_st)
+			writel_relaxed(dm_rx_st,
+				gi3c->se.base + SE_DMA_RX_IRQ_CLR);
+	}
+	/* if this is err with done-bit not set, handle that through timeout. */
+	if (m_stat & M_CMD_DONE_EN || m_stat & M_CMD_ABORT_EN) {
+		writel_relaxed(0, gi3c->se.base + SE_GENI_TX_WATERMARK_REG);
+		complete(&gi3c->done);
+	} else if ((dm_tx_st & TX_DMA_DONE) ||
+		(dm_rx_st & RX_DMA_DONE) ||
+		(dm_rx_st & RX_RESET_DONE))
+		complete(&gi3c->done);
+
+	spin_unlock_irqrestore(&gi3c->spinlock, flags);
+	return IRQ_HANDLED;
+}
+
+static int i3c_geni_runtime_get_mutex_lock(struct geni_i3c_dev *gi3c)
+{
+	int ret;
+
+	mutex_lock(&gi3c->lock);
+
+	reinit_completion(&gi3c->done);
+	ret = pm_runtime_get_sync(gi3c->se.dev);
+	if (ret < 0) {
+		dev_err(gi3c->se.dev,
+			"error turning on SE resources:%d\n", ret);
+		pm_runtime_put_noidle(gi3c->se.dev);
+		/* Set device in suspended since resume failed */
+		pm_runtime_set_suspended(gi3c->se.dev);
+
+		mutex_unlock(&gi3c->lock);
+		return ret;
+	}
+
+	qcom_geni_i3c_conf(gi3c);
+
+	return 0; /* return 0 to indicate SUCCESS */
+}
+
+static void i3c_geni_runtime_put_mutex_unlock(struct geni_i3c_dev *gi3c)
+{
+	pm_runtime_mark_last_busy(gi3c->se.dev);
+	pm_runtime_put_autosuspend(gi3c->se.dev);
+	mutex_unlock(&gi3c->lock);
+}
+
+static int _i3c_geni_execute_command
+(
+	struct geni_i3c_dev *gi3c,
+	struct i3c_xfer_params *xfer
+)
+{
+	dma_addr_t tx_dma = 0;
+	dma_addr_t rx_dma = 0;
+	int ret, time_remaining = 0;
+	enum i3c_trans_dir rnw = gi3c->cur_rnw;
+	u32 len = gi3c->cur_len;
+
+	geni_se_select_mode(gi3c->se.base, xfer->mode);
+
+	gi3c->err = 0;
+	gi3c->cur_idx = 0;
+
+	if (rnw == READ_TRANSACTION) {
+		dev_dbg(gi3c->se.dev, "I3C cmd:0x%x param:0x%x READ len:%d\n",
+			xfer->m_cmd, xfer->m_param, len);
+		writel_relaxed(len, gi3c->se.base + SE_I3C_RX_TRANS_LEN);
+		geni_setup_m_cmd(gi3c->se.base, xfer->m_cmd, xfer->m_param);
+		if (xfer->mode == SE_DMA) {
+			ret = geni_se_rx_dma_prep(gi3c->se.i3c_rsc.wrapper_dev,
+					gi3c->se.base, gi3c->cur_buf,
+					len, &rx_dma);
+			if (ret) {
+				xfer->mode = FIFO_MODE;
+				geni_se_select_mode(gi3c->se.base, xfer->mode);
+			}
+		}
+	} else {
+		dev_dbg(gi3c->se.dev, "I3C cmd:0x%x param:0x%x WRITE len:%d\n",
+			xfer->m_cmd, xfer->m_param, len);
+		writel_relaxed(len, gi3c->se.base + SE_I3C_TX_TRANS_LEN);
+		geni_setup_m_cmd(gi3c->se.base, xfer->m_cmd, xfer->m_param);
+		if (xfer->mode == SE_DMA) {
+			ret = geni_se_tx_dma_prep(gi3c->se.i3c_rsc.wrapper_dev,
+					gi3c->se.base, gi3c->cur_buf,
+					len, &tx_dma);
+			if (ret) {
+				xfer->mode = FIFO_MODE;
+				geni_se_select_mode(gi3c->se.base, xfer->mode);
+			}
+		}
+		if (xfer->mode == FIFO_MODE && len > 0) /* Get FIFO IRQ */
+			writel_relaxed(1, gi3c->se.base +
+				SE_GENI_TX_WATERMARK_REG);
+	}
+	time_remaining = wait_for_completion_timeout(&gi3c->done,
+						XFER_TIMEOUT);
+	if (!time_remaining) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&gi3c->spinlock, flags);
+		geni_i3c_err(gi3c, GENI_TIMEOUT);
+		gi3c->cur_buf = NULL;
+		gi3c->cur_len = gi3c->cur_idx = 0;
+		gi3c->cur_rnw = 0;
+		geni_abort_m_cmd(gi3c->se.base);
+		spin_unlock_irqrestore(&gi3c->spinlock, flags);
+		time_remaining = wait_for_completion_timeout(&gi3c->done,
+							XFER_TIMEOUT);
+	}
+	if (xfer->mode == SE_DMA) {
+		if (gi3c->err) {
+			if (rnw == READ_TRANSACTION)
+				writel_relaxed(1, gi3c->se.base +
+					SE_DMA_TX_FSM_RST);
+			else
+				writel_relaxed(1, gi3c->se.base +
+					SE_DMA_RX_FSM_RST);
+			wait_for_completion_timeout(&gi3c->done, XFER_TIMEOUT);
+		}
+		geni_se_rx_dma_unprep(gi3c->se.i3c_rsc.wrapper_dev,
+				rx_dma, len);
+		geni_se_tx_dma_unprep(gi3c->se.i3c_rsc.wrapper_dev,
+				tx_dma, len);
+	}
+	ret = gi3c->err;
+	if (gi3c->err)
+		dev_err(gi3c->se.dev, "I3C transaction error :%d\n", gi3c->err);
+
+	gi3c->cur_buf = NULL;
+	gi3c->cur_len = gi3c->cur_idx = 0;
+	gi3c->cur_rnw = 0;
+	gi3c->err = 0;
+
+	return ret;
+}
+
+static int i3c_geni_execute_read_command
+(
+	struct geni_i3c_dev *gi3c,
+	struct i3c_xfer_params *xfer,
+	u8 *buf,
+	u32 len
+)
+{
+	gi3c->cur_rnw = READ_TRANSACTION;
+	gi3c->cur_buf = buf;
+	gi3c->cur_len = len;
+	return _i3c_geni_execute_command(gi3c, xfer);
+}
+
+static int i3c_geni_execute_write_command
+(
+	struct geni_i3c_dev *gi3c,
+	struct i3c_xfer_params *xfer,
+	u8 *buf,
+	u32 len
+)
+{
+	gi3c->cur_rnw = WRITE_TRANSACTION;
+	gi3c->cur_buf = buf;
+	gi3c->cur_len = len;
+	return _i3c_geni_execute_command(gi3c, xfer);
+}
+
+static void geni_i3c_perform_daa(struct geni_i3c_dev *gi3c)
+{
+	struct i3c_master_controller *m = &gi3c->ctrlr;
+	struct i3c_bus *bus = i3c_master_get_bus(m);
+	u8 last_dyn_addr = 0;
+	int ret;
+
+	while (1) {
+		u8 rx_buf[8], tx_buf[8];
+		struct i3c_xfer_params xfer = { FIFO_MODE };
+		struct i3c_device_info info = { 0 };
+		struct i3c_dev_desc *i3cdev;
+		bool new_device = true;
+		u64 pid;
+		u8 bcr, dcr, addr;
+
+		dev_dbg(gi3c->se.dev, "i3c entdaa read\n");
+
+		xfer.m_cmd = I2C_READ;
+		xfer.m_param = STOP_STRETCH | CONTINUOUS_MODE_DAA | USE_7E;
+
+		ret = i3c_geni_execute_read_command(gi3c, &xfer, rx_buf, 8);
+		if (ret)
+			break;
+
+		dcr = rx_buf[7];
+		bcr = rx_buf[6];
+		pid = ((u64)rx_buf[0] << 40) |
+			((u64)rx_buf[1] << 32) |
+			((u64)rx_buf[2] << 24) |
+			((u64)rx_buf[3] << 16) |
+			((u64)rx_buf[4] <<  8) |
+			((u64)rx_buf[5]);
+
+		i3c_bus_for_each_i3cdev(bus, i3cdev) {
+			i3c_device_get_info(i3cdev->dev, &info);
+			if (pid == info.pid &&
+				dcr == info.dcr &&
+				bcr == info.bcr) {
+				new_device = false;
+				addr = (info.dyn_addr) ? info.dyn_addr :
+					info.static_addr;
+				break;
+			}
+		}
+
+		if (new_device) {
+			ret = i3c_master_get_free_addr(m,
+						last_dyn_addr + 1);
+			if (ret < 0)
+				goto daa_err;
+			addr = last_dyn_addr = (u8)ret;
+			set_new_addr_slot(gi3c->newaddrslots, addr);
+		}
+
+		tx_buf[0] = (addr & I3C_ADDR_MASK) << 1;
+		tx_buf[0] |= ~(hweight8(addr & I3C_ADDR_MASK) & 1);
+
+		dev_dbg(gi3c->se.dev, "i3c entdaa write\n");
+
+		xfer.m_cmd = I2C_WRITE;
+		xfer.m_param = STOP_STRETCH | BYPASS_ADDR_PHASE | USE_7E;
+
+		ret = i3c_geni_execute_write_command(gi3c, &xfer, tx_buf, 1);
+		if (ret)
+			break;
+	}
+daa_err:
+	return;
+}
+
+static int geni_i3c_master_send_ccc_cmd
+(
+	struct i3c_master_controller *m,
+	struct i3c_ccc_cmd *cmd
+)
+{
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	int i, ret;
+
+	if (!(cmd->id & I3C_CCC_DIRECT) && (cmd->ndests != 1))
+		return -EINVAL;
+
+	ret = i3c_geni_runtime_get_mutex_lock(gi3c);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < cmd->ndests; i++) {
+		int stall = (i < (cmd->ndests - 1)) ||
+			(cmd->id == I3C_CCC_ENTDAA);
+		struct i3c_xfer_params xfer = { FIFO_MODE };
+
+		xfer.m_param  = (stall ? STOP_STRETCH : 0);
+		xfer.m_param |= (cmd->id << CCC_HDR_CMD_SHFT);
+		xfer.m_param |= IBI_NACK_TBL_CTRL;
+		if (cmd->id & I3C_CCC_DIRECT) {
+			xfer.m_param |= ((cmd->dests[i].addr & I3C_ADDR_MASK)
+					<< SLV_ADDR_SHFT);
+			if (cmd->rnw) {
+				if (i == 0)
+					xfer.m_cmd = I3C_DIRECT_CCC_READ;
+				else
+					xfer.m_cmd = I3C_PRIVATE_READ;
+			} else {
+				if (i == 0)
+					xfer.m_cmd =
+					   (cmd->dests[i].payload.len > 0) ?
+						I3C_DIRECT_CCC_WRITE :
+						I3C_DIRECT_CCC_ADDR_ONLY;
+				else
+					xfer.m_cmd = I3C_PRIVATE_WRITE;
+			}
+		} else {
+			if (cmd->dests[i].payload.len > 0)
+				xfer.m_cmd = I3C_BCAST_CCC_WRITE;
+			else
+				xfer.m_cmd = I3C_BCAST_CCC_ADDR_ONLY;
+		}
+
+		if (i == 0)
+			xfer.m_param |= USE_7E;
+
+		if (cmd->rnw)
+			ret = i3c_geni_execute_read_command(gi3c, &xfer,
+				cmd->dests[i].payload.data,
+				cmd->dests[i].payload.len);
+		else
+			ret = i3c_geni_execute_write_command(gi3c, &xfer,
+				cmd->dests[i].payload.data,
+				cmd->dests[i].payload.len);
+		if (ret)
+			break;
+
+		if (cmd->id == I3C_CCC_ENTDAA)
+			geni_i3c_perform_daa(gi3c);
+	}
+
+	dev_dbg(gi3c->se.dev, "i3c ccc: txn ret:%d\n", ret);
+
+	i3c_geni_runtime_put_mutex_unlock(gi3c);
+
+	return ret;
+}
+
+static int geni_i3c_master_priv_xfers
+(
+	struct i3c_dev_desc *dev,
+	struct i3c_priv_xfer *xfers,
+	int nxfers
+)
+{
+	struct i3c_master_controller *m = i3c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	int i, ret;
+	bool use_7e = true;
+
+	if (nxfers <= 0)
+		return 0;
+
+	ret = i3c_geni_runtime_get_mutex_lock(gi3c);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < nxfers; i++) {
+		bool stall = (i < (nxfers - 1));
+		struct i3c_xfer_params xfer = { FIFO_MODE };
+
+		xfer.m_param  = (stall ? STOP_STRETCH : 0);
+		xfer.m_param |= ((dev->info.dyn_addr & I3C_ADDR_MASK)
+				<< SLV_ADDR_SHFT);
+		xfer.m_param |= (use_7e) ? USE_7E : 0;
+
+		/* Update use_7e status for next loop iteration */
+		use_7e = !stall;
+
+		if (xfers[i].rnw) {
+			xfer.m_cmd = I3C_PRIVATE_READ;
+			ret = i3c_geni_execute_read_command(gi3c, &xfer,
+				(u8 *)xfers[i].data.in,
+				xfers[i].len);
+		} else {
+			xfer.m_cmd = I3C_PRIVATE_WRITE;
+			ret = i3c_geni_execute_write_command(gi3c, &xfer,
+				(u8 *)xfers[i].data.out,
+				xfers[i].len);
+		}
+
+		if (ret)
+			break;
+	}
+
+	dev_dbg(gi3c->se.dev, "i3c priv: txn ret:%d\n", ret);
+
+	i3c_geni_runtime_put_mutex_unlock(gi3c);
+
+	return ret;
+}
+
+static int geni_i3c_master_i2c_xfers
+(
+	struct i2c_dev_desc *dev,
+	const struct i2c_msg *msgs,
+	int num
+)
+{
+	struct i3c_master_controller *m = i2c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	int i, ret;
+
+	ret = i3c_geni_runtime_get_mutex_lock(gi3c);
+	if (ret)
+		return ret;
+
+	dev_dbg(gi3c->se.dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
+		num, msgs[0].len, msgs[0].flags);
+	for (i = 0; i < num; i++) {
+		struct i3c_xfer_params xfer;
+
+		xfer.m_cmd    = (msgs[i].flags & I2C_M_RD) ? I2C_READ :
+							I2C_WRITE;
+		xfer.m_param  = (i < (num - 1)) ? STOP_STRETCH : 0;
+		xfer.m_param |= ((msgs[i].addr & I3C_ADDR_MASK)
+				<< SLV_ADDR_SHFT);
+		xfer.mode     = msgs[i].len > 32 ? SE_DMA : FIFO_MODE;
+		if (msgs[i].flags & I2C_M_RD)
+			ret = i3c_geni_execute_read_command(gi3c, &xfer,
+						msgs[i].buf, msgs[i].len);
+		else
+			ret = i3c_geni_execute_write_command(gi3c, &xfer,
+						msgs[i].buf, msgs[i].len);
+		if (ret)
+			break;
+	}
+
+	dev_dbg(gi3c->se.dev, "i2c: txn ret:%d\n", ret);
+
+	i3c_geni_runtime_put_mutex_unlock(gi3c);
+
+	return ret;
+}
+
+static u32 geni_i3c_master_i2c_funcs(struct i3c_master_controller *m)
+{
+	return I2C_FUNC_I2C;
+}
+
+static int geni_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+	struct i3c_master_controller *m = i2c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	struct geni_i3c_i2c_dev_data *data;
+
+	data = devm_kzalloc(gi3c->se.dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	i2c_dev_set_master_data(dev, data);
+
+	return 0;
+}
+
+static void geni_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+	struct geni_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+
+	i2c_dev_set_master_data(dev, NULL);
+	kfree(data);
+}
+
+static int geni_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+	struct i3c_master_controller *m = i3c_dev_get_master(dev);
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	struct geni_i3c_i2c_dev_data *data;
+
+	data = devm_kzalloc(gi3c->se.dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	i3c_dev_set_master_data(dev, data);
+
+	return 0;
+}
+
+static int geni_i3c_master_reattach_i3c_dev
+(
+	struct i3c_dev_desc *dev,
+	u8 old_dyn_addr
+)
+{
+	return 0;
+}
+
+static void geni_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+	struct geni_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+	i3c_dev_set_master_data(dev, NULL);
+	kfree(data);
+}
+
+static int geni_i3c_master_entdaa_locked(struct geni_i3c_dev *gi3c)
+{
+	struct i3c_master_controller *m = &gi3c->ctrlr;
+	u8 addr;
+	int ret;
+
+	ret = i3c_master_entdaa_locked(m);
+	if (ret && ret != I3C_ERROR_M2)
+		return ret;
+
+	for (addr = 0; addr <= I3C_ADDR_MASK; addr++) {
+		if (is_new_addr_slot_set(gi3c->newaddrslots, addr)) {
+			clear_new_addr_slot(gi3c->newaddrslots, addr);
+			i3c_master_add_i3c_dev_locked(m, addr);
+		}
+	}
+
+	return 0;
+}
+
+static int geni_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+
+	return geni_i3c_master_entdaa_locked(gi3c);
+}
+
+static int geni_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+	struct i3c_bus *bus = i3c_master_get_bus(m);
+	struct i3c_device_info info = { };
+	int ret;
+
+	ret = pm_runtime_get_sync(gi3c->se.dev);
+	if (ret < 0) {
+		dev_err(gi3c->se.dev, "%s: error turning SE resources:%d\n",
+			__func__, ret);
+		pm_runtime_put_noidle(gi3c->se.dev);
+		/* Set device in suspended since resume failed */
+		pm_runtime_set_suspended(gi3c->se.dev);
+		return ret;
+	}
+
+	ret = geni_i3c_clk_map_idx(gi3c);
+	if (ret) {
+		dev_err(gi3c->se.dev,
+			"Invalid clk frequency %d Hz src or %ld Hz bus: %d\n",
+			gi3c->clk_src_freq, bus->scl_rate.i3c,
+			ret);
+		goto err_cleanup;
+	}
+
+	qcom_geni_i3c_conf(gi3c);
+
+	/* Get an address for the master. */
+	ret = i3c_master_get_free_addr(m, 0);
+	if (ret < 0)
+		goto err_cleanup;
+
+	info.dyn_addr = ret;
+	info.dcr = I3C_DCR_GENERIC_DEVICE;
+	info.bcr = I3C_BCR_I3C_MASTER | I3C_BCR_HDR_CAP;
+	info.pid = 0;
+
+	ret = i3c_master_set_info(&gi3c->ctrlr, &info);
+
+err_cleanup:
+	pm_runtime_mark_last_busy(gi3c->se.dev);
+	pm_runtime_put_autosuspend(gi3c->se.dev);
+
+	return ret;
+}
+
+static void geni_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+}
+
+static bool geni_i3c_master_supports_ccc_cmd
+(
+	struct i3c_master_controller *m,
+	const struct i3c_ccc_cmd *cmd
+)
+{
+	switch (cmd->id) {
+	case I3C_CCC_ENEC(true):
+	/* fallthrough */
+	case I3C_CCC_ENEC(false):
+	/* fallthrough */
+	case I3C_CCC_DISEC(true):
+	/* fallthrough */
+	case I3C_CCC_DISEC(false):
+	/* fallthrough */
+	case I3C_CCC_ENTAS(0, true):
+	/* fallthrough */
+	case I3C_CCC_ENTAS(0, false):
+	/* fallthrough */
+	case I3C_CCC_RSTDAA(true):
+	/* fallthrough */
+	case I3C_CCC_RSTDAA(false):
+	/* fallthrough */
+	case I3C_CCC_ENTDAA:
+	/* fallthrough */
+	case I3C_CCC_SETMWL(true):
+	/* fallthrough */
+	case I3C_CCC_SETMWL(false):
+	/* fallthrough */
+	case I3C_CCC_SETMRL(true):
+	/* fallthrough */
+	case I3C_CCC_SETMRL(false):
+	/* fallthrough */
+	case I3C_CCC_DEFSLVS:
+	/* fallthrough */
+	case I3C_CCC_ENTHDR(0):
+	/* fallthrough */
+	case I3C_CCC_SETDASA:
+	/* fallthrough */
+	case I3C_CCC_SETNEWDA:
+	/* fallthrough */
+	case I3C_CCC_GETMWL:
+	/* fallthrough */
+	case I3C_CCC_GETMRL:
+	/* fallthrough */
+	case I3C_CCC_GETPID:
+	/* fallthrough */
+	case I3C_CCC_GETBCR:
+	/* fallthrough */
+	case I3C_CCC_GETDCR:
+	/* fallthrough */
+	case I3C_CCC_GETSTATUS:
+	/* fallthrough */
+	case I3C_CCC_GETACCMST:
+	/* fallthrough */
+	case I3C_CCC_GETMXDS:
+	/* fallthrough */
+	case I3C_CCC_GETHDRCAP:
+		return true;
+	default:
+		break;
+	}
+
+	return false;
+}
+
+static int geni_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+	return -ENOTSUPP;
+}
+
+static int geni_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+	return -ENOTSUPP;
+}
+
+static int geni_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+	const struct i3c_ibi_setup *req)
+{
+	return -ENOTSUPP;
+}
+
+static void geni_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+}
+
+static void geni_i3c_master_recycle_ibi_slot
+(
+	struct i3c_dev_desc *dev,
+	struct i3c_ibi_slot *slot
+)
+{
+}
+
+static const struct i3c_master_controller_ops geni_i3c_master_ops = {
+	.bus_init = geni_i3c_master_bus_init,
+	.bus_cleanup = geni_i3c_master_bus_cleanup,
+	.do_daa = geni_i3c_master_do_daa,
+	.attach_i3c_dev = geni_i3c_master_attach_i3c_dev,
+	.reattach_i3c_dev = geni_i3c_master_reattach_i3c_dev,
+	.detach_i3c_dev = geni_i3c_master_detach_i3c_dev,
+	.attach_i2c_dev = geni_i3c_master_attach_i2c_dev,
+	.detach_i2c_dev = geni_i3c_master_detach_i2c_dev,
+	.supports_ccc_cmd = geni_i3c_master_supports_ccc_cmd,
+	.send_ccc_cmd = geni_i3c_master_send_ccc_cmd,
+	.priv_xfers = geni_i3c_master_priv_xfers,
+	.i2c_xfers = geni_i3c_master_i2c_xfers,
+	.i2c_funcs = geni_i3c_master_i2c_funcs,
+	.enable_ibi = geni_i3c_master_enable_ibi,
+	.disable_ibi = geni_i3c_master_disable_ibi,
+	.request_ibi = geni_i3c_master_request_ibi,
+	.free_ibi = geni_i3c_master_free_ibi,
+	.recycle_ibi_slot = geni_i3c_master_recycle_ibi_slot,
+};
+
+static int i3c_geni_rsrcs_clk_init(struct geni_i3c_dev *gi3c)
+{
+	int ret;
+
+	gi3c->se.i3c_rsc.se_clk = devm_clk_get(gi3c->se.dev, "se-clk");
+	if (IS_ERR(gi3c->se.i3c_rsc.se_clk)) {
+		ret = PTR_ERR(gi3c->se.i3c_rsc.se_clk);
+		dev_err(gi3c->se.dev, "Error getting SE Core clk %d\n", ret);
+		return ret;
+	}
+
+	gi3c->se.i3c_rsc.m_ahb_clk = devm_clk_get(gi3c->se.dev, "m-ahb");
+	if (IS_ERR(gi3c->se.i3c_rsc.m_ahb_clk)) {
+		ret = PTR_ERR(gi3c->se.i3c_rsc.m_ahb_clk);
+		dev_err(gi3c->se.dev, "Error getting M AHB clk %d\n", ret);
+		return ret;
+	}
+
+	gi3c->se.i3c_rsc.s_ahb_clk = devm_clk_get(gi3c->se.dev, "s-ahb");
+	if (IS_ERR(gi3c->se.i3c_rsc.s_ahb_clk)) {
+		ret = PTR_ERR(gi3c->se.i3c_rsc.s_ahb_clk);
+		dev_err(gi3c->se.dev, "Error getting S AHB clk %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c,
+			struct platform_device *pdev)
+{
+	struct resource *res;
+	struct platform_device *wrapper_pdev;
+	struct device_node *wrapper_ph_node;
+	int ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	gi3c->se.base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(gi3c->se.base))
+		return PTR_ERR(gi3c->se.base);
+
+	wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
+			"qcom,wrapper-core", 0);
+	if (IS_ERR_OR_NULL(wrapper_ph_node)) {
+		ret = PTR_ERR(wrapper_ph_node);
+		dev_err(&pdev->dev, "No wrapper core defined\n");
+		return ret;
+	}
+
+	wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
+	of_node_put(wrapper_ph_node);
+	if (IS_ERR_OR_NULL(wrapper_pdev)) {
+		ret = PTR_ERR(wrapper_pdev);
+		dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
+		return ret;
+	}
+
+	gi3c->se.i3c_rsc.wrapper_dev = &wrapper_pdev->dev;
+
+	ret = geni_se_resources_init(&gi3c->se.i3c_rsc, I3C_CORE2X_VOTE,
+				     (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
+	if (ret) {
+		dev_err(gi3c->se.dev, "geni_se_resources_init\n");
+		return ret;
+	}
+
+	ret = device_property_read_u32(&pdev->dev, "se-clock-frequency",
+		&gi3c->clk_src_freq);
+	if (ret) {
+		dev_info(&pdev->dev,
+			"SE clk freq not specified, default to 100 MHz.\n");
+		gi3c->clk_src_freq = 100000000;
+	}
+
+	ret = device_property_read_u32(&pdev->dev, "dfs-index",
+		&gi3c->dfs_idx);
+	if (ret)
+		gi3c->dfs_idx = 0xf;
+
+	gi3c->se.i3c_rsc.geni_pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR(gi3c->se.i3c_rsc.geni_pinctrl)) {
+		dev_err(&pdev->dev, "Error no pinctrl config specified\n");
+		ret = PTR_ERR(gi3c->se.i3c_rsc.geni_pinctrl);
+		return ret;
+	}
+	gi3c->se.i3c_rsc.geni_gpio_active =
+		pinctrl_lookup_state(gi3c->se.i3c_rsc.geni_pinctrl, "default");
+	if (IS_ERR(gi3c->se.i3c_rsc.geni_gpio_active)) {
+		dev_err(&pdev->dev, "No default config specified\n");
+		ret = PTR_ERR(gi3c->se.i3c_rsc.geni_gpio_active);
+		return ret;
+	}
+	gi3c->se.i3c_rsc.geni_gpio_sleep =
+		pinctrl_lookup_state(gi3c->se.i3c_rsc.geni_pinctrl, "sleep");
+	if (IS_ERR(gi3c->se.i3c_rsc.geni_gpio_sleep)) {
+		dev_err(&pdev->dev, "No sleep config specified\n");
+		ret = PTR_ERR(gi3c->se.i3c_rsc.geni_gpio_sleep);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int geni_i3c_probe(struct platform_device *pdev)
+{
+	struct geni_i3c_dev *gi3c;
+	u32 proto, tx_depth;
+	int ret;
+	u32 se_mode;
+
+	gi3c = devm_kzalloc(&pdev->dev, sizeof(*gi3c), GFP_KERNEL);
+	if (!gi3c)
+		return -ENOMEM;
+
+	gi3c->se.dev = &pdev->dev;
+
+	ret = i3c_geni_rsrcs_init(gi3c, pdev);
+	if (ret)
+		return ret;
+
+	ret = i3c_geni_rsrcs_clk_init(gi3c);
+	if (ret)
+		return ret;
+
+	gi3c->irq = platform_get_irq(pdev, 0);
+	if (gi3c->irq < 0) {
+		dev_err(&pdev->dev, "IRQ error for i3c-master-geni\n");
+		return gi3c->irq;
+	}
+
+	ret = geni_i3c_clk_map_idx(gi3c);
+	if (ret) {
+		dev_err(&pdev->dev, "Invalid source clk frequency %d Hz: %d\n",
+			gi3c->clk_src_freq, ret);
+		return ret;
+	}
+
+	init_completion(&gi3c->done);
+	mutex_init(&gi3c->lock);
+	spin_lock_init(&gi3c->spinlock);
+	platform_set_drvdata(pdev, gi3c);
+	ret = devm_request_irq(&pdev->dev, gi3c->irq, geni_i3c_irq,
+		IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), gi3c);
+	if (ret) {
+		dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+			gi3c->irq, ret);
+		return ret;
+	}
+	/* Disable the interrupt so that the system can enter low-power mode */
+	disable_irq(gi3c->irq);
+
+	ret = se_geni_resources_on(&gi3c->se.i3c_rsc);
+	if (ret) {
+		dev_err(&pdev->dev, "Error turning on resources %d\n", ret);
+		return ret;
+	}
+
+	if (!gi3c->ipcl) {
+		char ipc_name[I2C_NAME_SIZE];
+
+		snprintf(ipc_name, I2C_NAME_SIZE, "i3c-%d", gi3c->ctrlr.bus.id);
+		gi3c->ipcl = ipc_log_context_create(2, ipc_name, 0);
+	}
+
+	proto = get_se_proto(gi3c->se.base);
+	if (proto != I3C) {
+		dev_err(&pdev->dev, "Invalid proto %d\n", proto);
+		se_geni_resources_off(&gi3c->se.i3c_rsc);
+		return -ENXIO;
+	}
+
+	se_mode = readl_relaxed(gi3c->se.base + GENI_IF_FIFO_DISABLE_RO);
+	if (se_mode) {
+		dev_err(&pdev->dev, "Non supported mode %d\n", se_mode);
+		se_geni_resources_off(&gi3c->se.i3c_rsc);
+		return -ENXIO;
+	}
+
+	tx_depth = get_tx_fifo_depth(gi3c->se.base);
+	gi3c->tx_wm = tx_depth - 1;
+	geni_se_init(gi3c->se.base, gi3c->tx_wm, tx_depth);
+	se_config_packing(gi3c->se.base, BITS_PER_BYTE, PACKING_BYTES_PW, true);
+	se_geni_resources_off(&gi3c->se.i3c_rsc);
+	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+		"i3c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
+
+	pm_runtime_set_suspended(gi3c->se.dev);
+	pm_runtime_set_autosuspend_delay(gi3c->se.dev, I3C_AUTO_SUSPEND_DELAY);
+	pm_runtime_use_autosuspend(gi3c->se.dev);
+	pm_runtime_enable(gi3c->se.dev);
+
+	ret = i3c_master_register(&gi3c->ctrlr, &pdev->dev,
+		&geni_i3c_master_ops, false);
+
+	if (ret)
+		return ret;
+
+	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "I3C probed\n");
+	return ret;
+}
+
+static int geni_i3c_remove(struct platform_device *pdev)
+{
+	struct geni_i3c_dev *gi3c = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	pm_runtime_disable(gi3c->se.dev);
+	ret = i3c_master_unregister(&gi3c->ctrlr);
+	if (gi3c->ipcl)
+		ipc_log_context_destroy(gi3c->ipcl);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int geni_i3c_runtime_suspend(struct device *dev)
+{
+	struct geni_i3c_dev *gi3c = dev_get_drvdata(dev);
+
+	disable_irq(gi3c->irq);
+	se_geni_resources_off(&gi3c->se.i3c_rsc);
+	return 0;
+}
+
+static int geni_i3c_runtime_resume(struct device *dev)
+{
+	int ret;
+	struct geni_i3c_dev *gi3c = dev_get_drvdata(dev);
+
+	ret = se_geni_resources_on(&gi3c->se.i3c_rsc);
+	if (ret)
+		return ret;
+
+	enable_irq(gi3c->irq);
+
+	return 0;
+}
+#else
+static int geni_i3c_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int geni_i3c_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops geni_i3c_pm_ops = {
+	.runtime_suspend = geni_i3c_runtime_suspend,
+	.runtime_resume  = geni_i3c_runtime_resume,
+};
+
+static const struct of_device_id geni_i3c_dt_match[] = {
+	{ .compatible = "qcom,geni-i3c" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, geni_i3c_dt_match);
+
+static struct platform_driver geni_i3c_master = {
+	.probe  = geni_i3c_probe,
+	.remove = geni_i3c_remove,
+	.driver = {
+		.name = "geni_i3c_master",
+		.pm = &geni_i3c_pm_ops,
+		.of_match_table = geni_i3c_dt_match,
+	},
+};
+
+module_platform_driver(geni_i3c_master);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:geni_i3c_master");
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 45c9974..0e51803 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -544,7 +544,7 @@
 		drive->proc = proc_mkdir(drive->name, parent);
 		if (drive->proc) {
 			ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
-			proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
+			proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
 					drive->proc, &ide_settings_proc_fops,
 					drive);
 		}
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index af53a10..471caa5 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1490,6 +1490,7 @@
 	{"KXCJ1008", KXCJ91008},
 	{"KXCJ9000", KXCJ91008},
 	{"KIOX000A", KXCJ91008},
+	{"KIOX010A", KXCJ91008}, /* KXCJ91008 inside the display of a 2-in-1 */
 	{"KXTJ1009", KXTJ21009},
 	{"SMO8500",  KXCJ91008},
 	{ },
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 031d568..4e339cf 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -27,9 +27,18 @@
 #include <linux/iio/machine.h>
 #include <linux/iio/driver.h>
 
-#define AXP288_ADC_EN_MASK		0xF1
-#define AXP288_ADC_TS_PIN_GPADC		0xF2
-#define AXP288_ADC_TS_PIN_ON		0xF3
+/*
+ * This mask enables all ADCs except for the battery temp-sensor (TS), that is
+ * left as-is to avoid breaking charging on devices without a temp-sensor.
+ */
+#define AXP288_ADC_EN_MASK				0xF0
+#define AXP288_ADC_TS_ENABLE				0x01
+
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK		GENMASK(1, 0)
+#define AXP288_ADC_TS_CURRENT_OFF			(0 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING		(1 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND		(2 << 0)
+#define AXP288_ADC_TS_CURRENT_ON			(3 << 0)
 
 enum axp288_adc_id {
 	AXP288_ADC_TS,
@@ -44,6 +53,7 @@
 struct axp288_adc_info {
 	int irq;
 	struct regmap *regmap;
+	bool ts_enabled;
 };
 
 static const struct iio_chan_spec axp288_adc_channels[] = {
@@ -115,21 +125,33 @@
 	return IIO_VAL_INT;
 }
 
-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
-				unsigned long address)
+/*
+ * The current-source used for the battery temp-sensor (TS) is shared
+ * with the GPADC. For proper fuel-gauge and charger operation the TS
+ * current-source needs to be permanently on. But to read the GPADC we
+ * need to temporary switch the TS current-source to ondemand, so that
+ * the GPADC can use it, otherwise we will always read an all 0 value.
+ */
+static int axp288_adc_set_ts(struct axp288_adc_info *info,
+			     unsigned int mode, unsigned long address)
 {
 	int ret;
 
-	/* channels other than GPADC do not need to switch TS pin */
+	/* No need to switch the current-source if the TS pin is disabled */
+	if (!info->ts_enabled)
+		return 0;
+
+	/* Channels other than GPADC do not need the current source */
 	if (address != AXP288_GP_ADC_H)
 		return 0;
 
-	ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+	ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+				 AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
 	if (ret)
 		return ret;
 
 	/* When switching to the GPADC pin give things some time to settle */
-	if (mode == AXP288_ADC_TS_PIN_GPADC)
+	if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
 		usleep_range(6000, 10000);
 
 	return 0;
@@ -145,14 +167,14 @@
 	mutex_lock(&indio_dev->mlock);
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
-		if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+		if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
 					chan->address)) {
 			dev_err(&indio_dev->dev, "GPADC mode\n");
 			ret = -EINVAL;
 			break;
 		}
 		ret = axp288_adc_read_channel(val, chan->address, info->regmap);
-		if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+		if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
 						chan->address))
 			dev_err(&indio_dev->dev, "TS pin restore\n");
 		break;
@@ -164,13 +186,35 @@
 	return ret;
 }
 
-static int axp288_adc_set_state(struct regmap *regmap)
+static int axp288_adc_initialize(struct axp288_adc_info *info)
 {
-	/* ADC should be always enabled for internal FG to function */
-	if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
-		return -EIO;
+	int ret, adc_enable_val;
 
-	return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+	/*
+	 * Determine if the TS pin is enabled and set the TS current-source
+	 * accordingly.
+	 */
+	ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
+	if (ret)
+		return ret;
+
+	if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
+		info->ts_enabled = true;
+		ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+					 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+					 AXP288_ADC_TS_CURRENT_ON);
+	} else {
+		info->ts_enabled = false;
+		ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+					 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+					 AXP288_ADC_TS_CURRENT_OFF);
+	}
+	if (ret)
+		return ret;
+
+	/* Turn on the ADC for all channels except TS, leave TS as is */
+	return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
+				  AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
 }
 
 static const struct iio_info axp288_adc_iio_info = {
@@ -200,7 +244,7 @@
 	 * Set ADC to enabled state at all time, including system suspend.
 	 * otherwise internal fuel gauge functionality may be affected.
 	 */
-	ret = axp288_adc_set_state(axp20x->regmap);
+	ret = axp288_adc_initialize(info);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to enable ADC device\n");
 		return ret;
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index da2d16d..5dd104c 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -587,8 +587,11 @@
 	struct clk_init_data init;
 	const char *clk_parents[1];
 
-	init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_div",
-				   indio_dev->dev.of_node);
+	init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_div",
+				   dev_name(indio_dev->dev.parent));
+	if (!init.name)
+		return -ENOMEM;
+
 	init.flags = 0;
 	init.ops = &clk_divider_ops;
 	clk_parents[0] = __clk_get_name(priv->clkin);
@@ -606,8 +609,11 @@
 	if (WARN_ON(IS_ERR(priv->adc_div_clk)))
 		return PTR_ERR(priv->adc_div_clk);
 
-	init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_en",
-				   indio_dev->dev.of_node);
+	init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_en",
+				   dev_name(indio_dev->dev.parent));
+	if (!init.name)
+		return -ENOMEM;
+
 	init.flags = CLK_SET_RATE_PARENT;
 	init.ops = &clk_gate_ops;
 	clk_parents[0] = __clk_get_name(priv->adc_div_clk);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 184d686..8b4568e 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -41,6 +41,7 @@
 
 #define ADS8688_VREF_MV			4096
 #define ADS8688_REALBITS		16
+#define ADS8688_MAX_CHANNELS		8
 
 /*
  * enum ads8688_range - ADS8688 reference voltage range
@@ -385,7 +386,7 @@
 {
 	struct iio_poll_func *pf = p;
 	struct iio_dev *indio_dev = pf->indio_dev;
-	u16 buffer[8];
+	u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
 	int i, j = 0;
 
 	for (i = 0; i < indio_dev->masklength; i++) {
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index a406ad3..3a20cb5 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -444,9 +444,8 @@
 	case IIO_CHAN_INFO_SCALE:
 		switch (chan->type) {
 		case IIO_TEMP:
-			*val = 1; /* 0.01 */
-			*val2 = 100;
-			break;
+			*val = 10;
+			return IIO_VAL_INT;
 		case IIO_PH:
 			*val = 1; /* 0.001 */
 			*val2 = 1000;
@@ -477,7 +476,7 @@
 			   int val, int val2, long mask)
 {
 	struct atlas_data *data = iio_priv(indio_dev);
-	__be32 reg = cpu_to_be32(val);
+	__be32 reg = cpu_to_be32(val / 10);
 
 	if (val2 != 0 || val < 0 || val > 20000)
 		return -EINVAL;
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 2ddbfc3..cba62ad2 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -124,7 +124,8 @@
 		mutex_unlock(&indio_dev->mlock);
 		if (ret < 0)
 			return ret;
-		*val = ret;
+		*val = (ret >> chan->scan_type.shift) &
+			GENMASK(chan->scan_type.realbits - 1, 0);
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_SCALE:
 		*val = st->vref_mv;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 0385ab4..f6fa9b1 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -579,10 +579,6 @@
 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
 		goto err;
-	if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
-	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
-			pd->unsafe_global_rkey))
-		goto err;
 
 	if (fill_res_name_pid(msg, res))
 		goto err;
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 25d43c8..558de0b 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -267,6 +267,9 @@
 	struct net_device *cookie_ndev = cookie;
 	bool match = false;
 
+	if (!rdma_ndev)
+		return false;
+
 	rcu_read_lock();
 	if (netif_is_bond_master(cookie_ndev) &&
 	    rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1fc7564..34ffca6 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -488,7 +488,7 @@
 		vmf = 1;
 		break;
 	case STATUS:
-		if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
+		if (flags & VM_WRITE) {
 			ret = -EPERM;
 			goto done;
 		}
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 9bd63ab..6f013a5 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1157,6 +1157,7 @@
 		if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
 		    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
 			break;
+		rvt_qp_wqe_unreserve(qp, wqe);
 		s_last = qp->s_last;
 		trace_hfi1_qp_send_completion(qp, wqe, s_last);
 		if (++s_last >= qp->s_size)
@@ -1209,6 +1210,7 @@
 		u32 s_last;
 
 		rvt_put_swqe(wqe);
+		rvt_qp_wqe_unreserve(qp, wqe);
 		s_last = qp->s_last;
 		trace_hfi1_qp_send_completion(qp, wqe, s_last);
 		if (++s_last >= qp->s_size)
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 5f56f3c..62a3832 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -278,6 +278,8 @@
 			goto op_err;
 		if (!ret)
 			goto rnr_nak;
+		if (wqe->length > qp->r_len)
+			goto inv_err;
 		break;
 
 	case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -445,7 +447,10 @@
 	goto err;
 
 inv_err:
-	send_status = IB_WC_REM_INV_REQ_ERR;
+	send_status =
+		sqp->ibqp.qp_type == IB_QPT_RC ?
+			IB_WC_REM_INV_REQ_ERR :
+			IB_WC_SUCCESS;
 	wc.status = IB_WC_LOC_QP_OP_ERR;
 	goto err;
 
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 70d39fc..54eb695 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -980,7 +980,6 @@
 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 		wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
-		tlen -= sizeof(u32);
 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 		wc.ex.imm_data = 0;
 		wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 3dfb4cf..48692ad 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1141,6 +1141,8 @@
 
 				if (slen > len)
 					slen = len;
+				if (slen > ss->sge.sge_length)
+					slen = ss->sge.sge_length;
 				rvt_update_sge(ss, slen, false);
 				seg_pio_copy_mid(pbuf, addr, slen);
 				len -= slen;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index f2f11e6..02f36ab 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -857,7 +857,9 @@
 
 	err = uverbs_get_flags32(&access, attrs,
 				 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
-				 IB_ACCESS_SUPPORTED);
+				 IB_ACCESS_LOCAL_WRITE |
+				 IB_ACCESS_REMOTE_WRITE |
+				 IB_ACCESS_REMOTE_READ);
 	if (err)
 		return err;
 
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 0d3473b..21f4239 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -533,7 +533,7 @@
 	{
 		struct mthca_ucontext *context;
 
-		qp = kmalloc(sizeof *qp, GFP_KERNEL);
+		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
 		if (!qp)
 			return ERR_PTR(-ENOMEM);
 
@@ -599,7 +599,7 @@
 		if (pd->uobject)
 			return ERR_PTR(-EINVAL);
 
-		qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
+		qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
 		if (!qp)
 			return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index f8a7de7..563f71e 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -274,6 +274,8 @@
 			goto op_err;
 		if (!ret)
 			goto rnr_nak;
+		if (wqe->length > qp->r_len)
+			goto inv_err;
 		break;
 
 	case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -434,7 +436,10 @@
 	goto err;
 
 inv_err:
-	send_status = IB_WC_REM_INV_REQ_ERR;
+	send_status =
+		sqp->ibqp.qp_type == IB_QPT_RC ?
+			IB_WC_REM_INV_REQ_ERR :
+			IB_WC_SUCCESS;
 	wc.status = IB_WC_LOC_QP_OP_ERR;
 	goto err;
 
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index f8d029a..bce2b5c 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -513,7 +513,6 @@
 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 		wc.ex.imm_data = ohdr->u.ud.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
-		tlen -= sizeof(u32);
 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 		wc.ex.imm_data = 0;
 		wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 9973ac8..3db2324 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -334,13 +334,16 @@
 
 	usnic_dbg("\n");
 
-	mutex_lock(&us_ibdev->usdev_lock);
 	if (ib_get_eth_speed(ibdev, port, &props->active_speed,
-			     &props->active_width)) {
-		mutex_unlock(&us_ibdev->usdev_lock);
+			     &props->active_width))
 		return -EINVAL;
-	}
 
+	/*
+	 * usdev_lock is acquired after (and not before) ib_get_eth_speed call
+	 * because acquiring rtnl_lock in ib_get_eth_speed, while holding
+	 * usdev_lock could lead to a deadlock.
+	 */
+	mutex_lock(&us_ibdev->usdev_lock);
 	/* props being zeroed by the caller, avoid zeroing it here */
 
 	props->lid = 0;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 42b8685..3c633ab 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -427,7 +427,40 @@
 
 static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
 {
-	return (enum pvrdma_wr_opcode)op;
+	switch (op) {
+	case IB_WR_RDMA_WRITE:
+		return PVRDMA_WR_RDMA_WRITE;
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
+	case IB_WR_SEND:
+		return PVRDMA_WR_SEND;
+	case IB_WR_SEND_WITH_IMM:
+		return PVRDMA_WR_SEND_WITH_IMM;
+	case IB_WR_RDMA_READ:
+		return PVRDMA_WR_RDMA_READ;
+	case IB_WR_ATOMIC_CMP_AND_SWP:
+		return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
+	case IB_WR_ATOMIC_FETCH_AND_ADD:
+		return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
+	case IB_WR_LSO:
+		return PVRDMA_WR_LSO;
+	case IB_WR_SEND_WITH_INV:
+		return PVRDMA_WR_SEND_WITH_INV;
+	case IB_WR_RDMA_READ_WITH_INV:
+		return PVRDMA_WR_RDMA_READ_WITH_INV;
+	case IB_WR_LOCAL_INV:
+		return PVRDMA_WR_LOCAL_INV;
+	case IB_WR_REG_MR:
+		return PVRDMA_WR_FAST_REG_MR;
+	case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+		return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
+	case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+		return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
+	case IB_WR_REG_SIG_MR:
+		return PVRDMA_WR_REG_SIG_MR;
+	default:
+		return PVRDMA_WR_ERROR;
+	}
 }
 
 static inline enum ib_wc_status pvrdma_wc_status_to_ib(
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 60083c0..9aeb330 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -721,6 +721,12 @@
 		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
 			wqe_hdr->ex.imm_data = wr->ex.imm_data;
 
+		if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
+			*bad_wr = wr;
+			ret = -EINVAL;
+			goto out;
+		}
+
 		switch (qp->ibqp.qp_type) {
 		case IB_QPT_GSI:
 		case IB_QPT_UD:
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 8be2723..fa98a52 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -640,6 +640,7 @@
 			rmr->access = wqe->wr.wr.reg.access;
 			rmr->lkey = wqe->wr.wr.reg.key;
 			rmr->rkey = wqe->wr.wr.reg.key;
+			rmr->iova = wqe->wr.wr.reg.mr->iova;
 			wqe->state = wqe_state_done;
 			wqe->status = IB_WC_SUCCESS;
 		} else {
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index fc6c880..4111b79 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -844,11 +844,16 @@
 
 	memset(&cqe, 0, sizeof(cqe));
 
-	wc->wr_id		= wqe->wr_id;
-	wc->status		= qp->resp.status;
-	wc->qp			= &qp->ibqp;
+	if (qp->rcq->is_user) {
+		uwc->status             = qp->resp.status;
+		uwc->qp_num             = qp->ibqp.qp_num;
+		uwc->wr_id              = wqe->wr_id;
+	} else {
+		wc->status              = qp->resp.status;
+		wc->qp                  = &qp->ibqp;
+		wc->wr_id               = wqe->wr_id;
+	}
 
-	/* fields after status are not required for errors */
 	if (wc->status == IB_WC_SUCCESS) {
 		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
 				pkt->mask & RXE_WRITE_MASK) ?
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1abe3c6..b22d02c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -248,7 +248,6 @@
 	struct list_head     list;
 	struct net_device   *dev;
 	struct ipoib_neigh  *neigh;
-	struct ipoib_path   *path;
 	struct ipoib_tx_buf *tx_ring;
 	unsigned int	     tx_head;
 	unsigned int	     tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0428e01e..aa9dcfc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1312,7 +1312,6 @@
 
 	neigh->cm = tx;
 	tx->neigh = neigh;
-	tx->path = path;
 	tx->dev = dev;
 	list_add(&tx->list, &priv->cm.start_list);
 	set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1371,7 +1370,7 @@
 				neigh->daddr + QPN_AND_OPTIONS_OFFSET);
 			goto free_neigh;
 		}
-		memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
+		memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
 
 		spin_unlock_irqrestore(&priv->lock, flags);
 		netif_tx_unlock_bh(dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 0b34e90..2c1114e 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2951,7 +2951,6 @@
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
 	struct srp_rdma_ch *ch;
-	int i, j;
 	u8 status;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2963,15 +2962,6 @@
 	if (status)
 		return FAILED;
 
-	for (i = 0; i < target->ch_count; i++) {
-		ch = &target->ch[i];
-		for (j = 0; j < target->req_ring_size; ++j) {
-			struct srp_request *req = &ch->req_ring[j];
-
-			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
-		}
-	}
-
 	return SUCCESS;
 }
 
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index f37cbad..f4bce5a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2009,6 +2009,14 @@
 	kfree_rcu(ch, rcu);
 }
 
+/*
+ * Shut down the SCSI target session, tell the connection manager to
+ * disconnect the associated RDMA channel, transition the QP to the error
+ * state and remove the channel from the channel list. This function is
+ * typically called from inside srpt_zerolength_write_done(). Concurrent
+ * srpt_zerolength_write() calls from inside srpt_close_ch() are possible
+ * as long as the channel is on sport->nexus_list.
+ */
 static void srpt_release_channel_work(struct work_struct *w)
 {
 	struct srpt_rdma_ch *ch;
@@ -2036,6 +2044,11 @@
 	else
 		ib_destroy_cm_id(ch->ib_cm.cm_id);
 
+	sport = ch->sport;
+	mutex_lock(&sport->mutex);
+	list_del_rcu(&ch->list);
+	mutex_unlock(&sport->mutex);
+
 	srpt_destroy_ch_ib(ch);
 
 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2046,11 +2059,6 @@
 			     sdev, ch->rq_size,
 			     srp_max_req_size, DMA_FROM_DEVICE);
 
-	sport = ch->sport;
-	mutex_lock(&sport->mutex);
-	list_del_rcu(&ch->list);
-	mutex_unlock(&sport->mutex);
-
 	wake_up(&sport->ch_releaseQ);
 
 	kref_put(&ch->kref, srpt_free_ch);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cfc8b94..aa4e431 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -252,6 +252,8 @@
 	{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
 	{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
 	{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
+	{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+	{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
 	{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
 	{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
 	{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -428,6 +430,7 @@
 	XPAD_XBOXONE_VENDOR(0x0e6f),		/* 0x0e6f X-Box One controllers */
 	XPAD_XBOX360_VENDOR(0x0f0d),		/* Hori Controllers */
 	XPAD_XBOXONE_VENDOR(0x0f0d),		/* Hori Controllers */
+	XPAD_XBOX360_VENDOR(0x1038),		/* SteelSeries Controllers */
 	XPAD_XBOX360_VENDOR(0x11c9),		/* Nacon GC100XF */
 	XPAD_XBOX360_VENDOR(0x12ab),		/* X-Box 360 dance pads */
 	XPAD_XBOX360_VENDOR(0x1430),		/* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index a7dc286..840e537 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -126,12 +126,8 @@
 {
 	struct omap4_keypad *keypad_data = dev_id;
 
-	if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) {
-		/* Disable interrupts */
-		kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
-				 OMAP4_VAL_IRQDISABLE);
+	if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS))
 		return IRQ_WAKE_THREAD;
-	}
 
 	return IRQ_NONE;
 }
@@ -173,11 +169,6 @@
 	kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
 			 kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
 
-	/* enable interrupts */
-	kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
-		OMAP4_DEF_IRQENABLE_EVENTEN |
-				OMAP4_DEF_IRQENABLE_LONGKEY);
-
 	return IRQ_HANDLED;
 }
 
@@ -214,9 +205,10 @@
 
 	disable_irq(keypad_data->irq);
 
-	/* Disable interrupts */
+	/* Disable interrupts and wake-up events */
 	kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
 			 OMAP4_VAL_IRQDISABLE);
+	kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0);
 
 	/* clear pending interrupts */
 	kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
@@ -365,7 +357,7 @@
 	}
 
 	error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
-				     omap4_keypad_irq_thread_fn, 0,
+				     omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
 				     "omap4-keypad", keypad_data);
 	if (error) {
 		dev_err(&pdev->dev, "failed to register interrupt\n");
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 1efcfdf..dd9dd4e 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -481,13 +481,14 @@
 	idev->close = bma150_irq_close;
 	input_set_drvdata(idev, bma150);
 
+	bma150->input = idev;
+
 	error = input_register_device(idev);
 	if (error) {
 		input_free_device(idev);
 		return error;
 	}
 
-	bma150->input = idev;
 	return 0;
 }
 
@@ -510,15 +511,15 @@
 
 	bma150_init_input_device(bma150, ipoll_dev->input);
 
+	bma150->input_polled = ipoll_dev;
+	bma150->input = ipoll_dev->input;
+
 	error = input_register_polled_device(ipoll_dev);
 	if (error) {
 		input_free_polled_device(ipoll_dev);
 		return error;
 	}
 
-	bma150->input_polled = ipoll_dev;
-	bma150->input = ipoll_dev->input;
-
 	return 0;
 }
 
diff --git a/drivers/input/misc/qti-haptics.c b/drivers/input/misc/qti-haptics.c
index fab47fc..5e5274a 100644
--- a/drivers/input/misc/qti-haptics.c
+++ b/drivers/input/misc/qti-haptics.c
@@ -179,6 +179,7 @@
 	int			brake_pattern_length;
 	bool			brake_en;
 	bool			lra_auto_res_disable;
+	enum wf_src		wf_src;
 };
 
 struct qti_hap_play_info {
@@ -193,11 +194,9 @@
 	enum actutor_type	act_type;
 	enum lra_res_sig_shape	lra_shape;
 	enum lra_auto_res_mode	lra_auto_res_mode;
-	enum wf_src		ext_src;
 	u16			vmax_mv;
 	u16			play_rate_us;
 	bool			lra_allow_variable_play_rate;
-	bool			use_ext_wf_src;
 };
 
 struct qti_hap_chip {
@@ -205,7 +204,6 @@
 	struct device			*dev;
 	struct regmap			*regmap;
 	struct input_dev		*input_dev;
-	struct pwm_device		*pwm_dev;
 	struct qti_hap_config		config;
 	struct qti_hap_play_info	play;
 	struct qti_hap_effect		*predefined;
@@ -228,6 +226,7 @@
 
 static int wf_repeat[8] = {1, 2, 4, 8, 16, 32, 64, 128};
 static int wf_s_repeat[4] = {1, 2, 4, 8};
+const static char * const wf_src_str[] = {"vmax", "buffer", "audio", "pwm"};
 
 static inline bool is_secure(u8 addr)
 {
@@ -398,6 +397,11 @@
 	int rc = 0;
 	size_t len;
 
+	if (effect->pattern == NULL) {
+		dev_dbg(chip->dev, "no pattern for effect %d\n", effect->id);
+		return 0;
+	}
+
 	if (play->playing_pos == effect->pattern_length) {
 		dev_dbg(chip->dev, "pattern playing done\n");
 		return 0;
@@ -493,11 +497,8 @@
 	int rc;
 
 	addr = REG_HAP_SEL;
-	mask = HAP_WF_SOURCE_MASK | HAP_WF_TRIGGER_BIT;
+	mask = HAP_WF_SOURCE_MASK;
 	val = src << HAP_WF_SOURCE_SHIFT;
-	if (src == EXT_WF_AUDIO || src == EXT_WF_PWM)
-		val |= HAP_WF_TRIGGER_BIT;
-
 	rc = qti_haptics_masked_write(chip, addr, mask, val);
 	if (rc < 0)
 		dev_err(chip->dev, "set HAP_SEL failed, rc=%d\n", rc);
@@ -704,20 +705,22 @@
 	if (rc < 0)
 		return rc;
 
-	rc = qti_haptics_config_wf_buffer(chip);
+	/* Set corresponding WF_SOURCE */
+	rc = qti_haptics_config_wf_src(chip, play->effect->wf_src);
 	if (rc < 0)
 		return rc;
 
-	rc = qti_haptics_config_wf_repeat(chip);
-	if (rc < 0)
-		return rc;
+	if (play->effect->wf_src == INT_WF_BUFFER) {
+		rc = qti_haptics_config_wf_buffer(chip);
+		if (rc < 0)
+			return rc;
 
-	/* Set WF_SOURCE to buffer */
-	rc = qti_haptics_config_wf_src(chip, INT_WF_BUFFER);
-	if (rc < 0)
-		return rc;
+		rc = qti_haptics_config_wf_repeat(chip);
+		if (rc < 0)
+			return rc;
 
-	play->playing_pattern = true;
+		play->playing_pattern = true;
+	}
 
 	return 0;
 }
@@ -812,6 +815,17 @@
 	struct qti_hap_effect *effect = play->effect;
 	int tmp;
 
+	/*
+	 * Return play_length to 0 if playing LINE-IN signal,
+	 * the playing has to be stopped explicitly from the
+	 * requester.
+	 */
+	if (effect->wf_src == EXT_WF_PWM ||
+			effect->wf_src == EXT_WF_AUDIO) {
+		*length_us = 0;
+		return;
+	}
+
 	tmp = effect->pattern_length * effect->play_rate_us;
 	tmp *= wf_s_repeat[effect->wf_s_repeat_n];
 	tmp *= wf_repeat[effect->wf_repeat_n];
@@ -961,11 +975,15 @@
 				disable_irq_nosync(chip->play_irq);
 				chip->play_irq_en = false;
 			}
-			secs = play->length_us / USEC_PER_SEC;
-			nsecs = (play->length_us % USEC_PER_SEC) *
-				NSEC_PER_USEC;
-			hrtimer_start(&chip->stop_timer, ktime_set(secs, nsecs),
-					HRTIMER_MODE_REL);
+
+			if (play->length_us != 0) {
+				secs = play->length_us / USEC_PER_SEC;
+				nsecs = (play->length_us % USEC_PER_SEC) *
+					NSEC_PER_USEC;
+				hrtimer_start(&chip->stop_timer,
+						ktime_set(secs, nsecs),
+						HRTIMER_MODE_REL);
+			}
 		}
 	} else {
 		play->length_us = 0;
@@ -1085,19 +1103,15 @@
 	if (rc < 0)
 		return rc;
 
-	/* Set external waveform source if it's used */
-	if (config->use_ext_wf_src) {
-		rc = qti_haptics_config_wf_src(chip, config->ext_src);
-		if (rc < 0)
-			return rc;
-	}
-
 	/*
 	 * Skip configurations below for ERM actuator
 	 * as they're only for LRA actuators
 	 */
-	if (config->act_type == ACT_ERM)
-		return 0;
+	if (config->act_type == ACT_ERM) {
+		/* Disable AUTO_RES for ERM */
+		rc = qti_haptics_lra_auto_res_enable(chip, false);
+		return rc;
+	}
 
 	addr = REG_HAP_CFG2;
 	val = config->lra_shape;
@@ -1206,6 +1220,61 @@
 			effect->vmax_mv = (tmp > HAP_VMAX_MV_MAX) ?
 				HAP_VMAX_MV_MAX : tmp;
 
+		effect->play_rate_us = config->play_rate_us;
+		rc = of_property_read_u32(child_node, "qcom,wf-play-rate-us",
+				&tmp);
+		if (rc < 0)
+			dev_dbg(chip->dev, "Read qcom,wf-play-rate-us failed, rc=%d\n",
+					rc);
+		else
+			effect->play_rate_us = tmp;
+
+		if (config->act_type == ACT_LRA &&
+				!config->lra_allow_variable_play_rate &&
+				config->play_rate_us != effect->play_rate_us) {
+			dev_warn(chip->dev, "play rate should match with LRA resonance frequency\n");
+			effect->play_rate_us = config->play_rate_us;
+		}
+
+		effect->lra_auto_res_disable = of_property_read_bool(child_node,
+				"qcom,lra-auto-resonance-disable");
+
+		tmp = of_property_count_elems_of_size(child_node,
+				"qcom,wf-brake-pattern", sizeof(u8));
+		if (tmp > 0) {
+			if (tmp > HAP_BRAKE_PATTERN_MAX) {
+				dev_err(chip->dev, "wf-brake-pattern shouldn't be more than %d bytes\n",
+						HAP_BRAKE_PATTERN_MAX);
+				return -EINVAL;
+			}
+
+			rc = of_property_read_u8_array(child_node,
+					"qcom,wf-brake-pattern",
+					effect->brake, tmp);
+			if (rc < 0) {
+				dev_err(chip->dev, "Failed to get wf-brake-pattern, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			effect->brake_pattern_length = tmp;
+			verify_brake_setting(effect);
+		}
+
+		effect->wf_src = INT_WF_BUFFER;
+		if (of_property_read_bool(child_node, "qcom,wf-line-in-pwm"))
+			effect->wf_src = EXT_WF_PWM;
+		if (of_property_read_bool(child_node, "qcom,wf-line-in-audio"))
+			effect->wf_src = EXT_WF_AUDIO;
+
+		/*
+		 * Ignore wf-pattern configuration iff it's
+		 * supposed to play waveform/signal from LINE-IN
+		 * pin
+		 */
+		if (effect->wf_src != INT_WF_BUFFER)
+			continue;
+
 		rc = of_property_count_elems_of_size(child_node,
 				"qcom,wf-pattern", sizeof(u8));
 		if (rc < 0) {
@@ -1231,22 +1300,6 @@
 			return rc;
 		}
 
-		effect->play_rate_us = config->play_rate_us;
-		rc = of_property_read_u32(child_node, "qcom,wf-play-rate-us",
-				&tmp);
-		if (rc < 0)
-			dev_dbg(chip->dev, "Read qcom,wf-play-rate-us failed, rc=%d\n",
-					rc);
-		else
-			effect->play_rate_us = tmp;
-
-		if (config->act_type == ACT_LRA &&
-				!config->lra_allow_variable_play_rate &&
-				config->play_rate_us != effect->play_rate_us) {
-			dev_warn(chip->dev, "play rate should match with LRA resonance frequency\n");
-			effect->play_rate_us = config->play_rate_us;
-		}
-
 		rc = of_property_read_u32(child_node, "qcom,wf-repeat-count",
 				&tmp);
 		if (rc < 0) {
@@ -1272,53 +1325,33 @@
 
 			effect->wf_s_repeat_n = j;
 		}
-
-		effect->lra_auto_res_disable = of_property_read_bool(child_node,
-				"qcom,lra-auto-resonance-disable");
-
-		tmp = of_property_count_elems_of_size(child_node,
-				"qcom,wf-brake-pattern", sizeof(u8));
-		if (tmp <= 0)
-			continue;
-
-		if (tmp > HAP_BRAKE_PATTERN_MAX) {
-			dev_err(chip->dev, "wf-brake-pattern shouldn't be more than %d bytes\n",
-					HAP_BRAKE_PATTERN_MAX);
-			return -EINVAL;
-		}
-
-		rc = of_property_read_u8_array(child_node,
-				"qcom,wf-brake-pattern", effect->brake, tmp);
-		if (rc < 0) {
-			dev_err(chip->dev, "Failed to get wf-brake-pattern, rc=%d\n",
-					rc);
-			return rc;
-		}
-
-		effect->brake_pattern_length = tmp;
-		verify_brake_setting(effect);
 	}
 
 	for (j = 0; j < i; j++) {
 		dev_dbg(chip->dev, "effect: %d\n", chip->predefined[j].id);
-		dev_dbg(chip->dev, "        vmax: %d mv\n",
+		dev_dbg(chip->dev, "    vmax: %d mv\n",
 				chip->predefined[j].vmax_mv);
-		dev_dbg(chip->dev, "        play_rate: %d us\n",
-				chip->predefined[j].play_rate_us);
-		for (m = 0; m < chip->predefined[j].pattern_length; m++)
-			dev_dbg(chip->dev, "        pattern[%d]: 0x%x\n",
-					m, chip->predefined[j].pattern[m]);
-		for (m = 0; m < chip->predefined[j].brake_pattern_length; m++)
-			dev_dbg(chip->dev, "        brake_pattern[%d]: 0x%x\n",
-					m, chip->predefined[j].brake[m]);
+		dev_dbg(chip->dev, "    waveform source: %s\n",
+				wf_src_str[chip->predefined[j].wf_src]);
 		dev_dbg(chip->dev, "    brake_en: %d\n",
 				chip->predefined[j].brake_en);
+		for (m = 0; m < chip->predefined[j].brake_pattern_length; m++)
+			dev_dbg(chip->dev, "    brake_pattern[%d]: 0x%x\n",
+					m, chip->predefined[j].brake[m]);
+		dev_dbg(chip->dev, "    lra_auto_res_disable: %d\n",
+				chip->predefined[j].lra_auto_res_disable);
+		if (chip->predefined[j].wf_src != INT_WF_BUFFER)
+			continue;
+
+		for (m = 0; m < chip->predefined[j].pattern_length; m++)
+			dev_dbg(chip->dev, "    pattern[%d]: 0x%x\n",
+					m, chip->predefined[j].pattern[m]);
+		dev_dbg(chip->dev, "    play_rate: %d us\n",
+				chip->predefined[j].play_rate_us);
 		dev_dbg(chip->dev, "    wf_repeat_n: %d\n",
 				chip->predefined[j].wf_repeat_n);
 		dev_dbg(chip->dev, "    wf_s_repeat_n: %d\n",
 				chip->predefined[j].wf_s_repeat_n);
-		dev_dbg(chip->dev, "    lra_auto_res_disable: %d\n",
-				chip->predefined[j].lra_auto_res_disable);
 	}
 
 	return 0;
@@ -1422,22 +1455,6 @@
 		config->play_rate_us = (tmp >= HAP_PLAY_RATE_US_MAX) ?
 			HAP_PLAY_RATE_US_MAX : tmp;
 
-	if (of_find_property(node, "qcom,external-waveform-source", NULL)) {
-		if (!of_property_read_string(node,
-				"qcom,external-waveform-source", &str)) {
-			if (strcmp(str, "audio") == 0) {
-				config->ext_src = EXT_WF_AUDIO;
-			} else if (strcmp(str, "pwm") == 0) {
-				config->ext_src = EXT_WF_PWM;
-			} else {
-				dev_err(chip->dev, "Invalid external waveform source: %s\n",
-						str);
-				return -EINVAL;
-			}
-		}
-		config->use_ext_wf_src = true;
-	}
-
 	if (of_find_property(node, "vdd-supply", NULL)) {
 		chip->vdd_supply = devm_regulator_get(chip->dev, "vdd");
 		if (IS_ERR(chip->vdd_supply)) {
@@ -1604,6 +1621,34 @@
 DEFINE_DEBUGFS_ATTRIBUTE(auto_res_debugfs_ops,  auto_res_dbgfs_read,
 		auto_res_dbgfs_write, "%llu\n");
 
+#define WF_SRC_BYTES	12
+static ssize_t wf_src_dbgfs_read(struct file *filep,
+		char __user *buf, size_t count, loff_t *ppos)
+{
+	struct qti_hap_effect *effect =
+		(struct qti_hap_effect *)filep->private_data;
+	char kbuf[WF_SRC_BYTES] = {0};
+	int rc, length;
+
+	length = snprintf(kbuf, WF_SRC_BYTES, "%s",
+			wf_src_str[effect->wf_src]);
+
+	if (length > WF_SRC_BYTES - 2)
+		return -EINVAL;
+
+	kbuf[length++] = '\n';
+	kbuf[length++] = '\0';
+
+	rc = simple_read_from_buffer(buf, count, ppos, kbuf, length);
+	return rc;
+}
+
+static const struct file_operations wf_src_dbgfs_ops = {
+	.read = wf_src_dbgfs_read,
+	.owner = THIS_MODULE,
+	.open = simple_open,
+};
+
 #define CHAR_PER_PATTERN 8
 static ssize_t brake_pattern_dbgfs_read(struct file *filep,
 		char __user *buf, size_t count, loff_t *ppos)
@@ -1787,20 +1832,6 @@
 		return -ENOMEM;
 	}
 
-	file = debugfs_create_file("wf_repeat_n", 0644, dir,
-			effect, &wf_repeat_n_debugfs_ops);
-	if (!file) {
-		pr_err("create wf-repeat debugfs node failed\n");
-		return -ENOMEM;
-	}
-
-	file = debugfs_create_file("wf_s_repeat_n", 0644, dir,
-			effect, &wf_s_repeat_n_debugfs_ops);
-	if (!file) {
-		pr_err("create wf-s-repeat debugfs node failed\n");
-		return -ENOMEM;
-	}
-
 	file = debugfs_create_file("lra_auto_res_en", 0644, dir,
 			effect, &auto_res_debugfs_ops);
 	if (!file) {
@@ -1815,6 +1846,16 @@
 		return -ENOMEM;
 	}
 
+	file = debugfs_create_file("wf_src", 0444, dir,
+			effect, &wf_src_dbgfs_ops);
+	if (!file) {
+		pr_err("create wf_src debugfs node failed\n");
+		return -ENOMEM;
+	}
+
+	if (effect->wf_src == EXT_WF_AUDIO || effect->wf_src == EXT_WF_PWM)
+		return 0;
+
 	file = debugfs_create_file("pattern", 0644, dir,
 			effect, &pattern_dbgfs_ops);
 	if (!file) {
@@ -1822,6 +1863,20 @@
 		return -ENOMEM;
 	}
 
+	file = debugfs_create_file("wf_repeat_n", 0644, dir,
+			effect, &wf_repeat_n_debugfs_ops);
+	if (!file) {
+		pr_err("create wf_repeat debugfs node failed\n");
+		return -ENOMEM;
+	}
+
+	file = debugfs_create_file("wf_s_repeat_n", 0644, dir,
+			effect, &wf_s_repeat_n_debugfs_ops);
+	if (!file) {
+		pr_err("create wf_s_repeat debugfs node failed\n");
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 8ec483e..26ec603f 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -39,6 +39,7 @@
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
+#include <linux/overflow.h>
 #include <linux/input/mt.h>
 #include "../input-compat.h"
 
@@ -405,7 +406,7 @@
 static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
 				   const struct input_absinfo *abs)
 {
-	int min, max;
+	int min, max, range;
 
 	min = abs->minimum;
 	max = abs->maximum;
@@ -417,7 +418,7 @@
 		return -EINVAL;
 	}
 
-	if (abs->flat > max - min) {
+	if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
 		printk(KERN_DEBUG
 		       "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
 		       UINPUT_NAME, code, abs->flat, min, max);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index a94b649..628ef61 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1337,6 +1337,7 @@
 	{ "ELAN0000", 0 },
 	{ "ELAN0100", 0 },
 	{ "ELAN0600", 0 },
+	{ "ELAN0601", 0 },
 	{ "ELAN0602", 0 },
 	{ "ELAN0605", 0 },
 	{ "ELAN0608", 0 },
@@ -1345,6 +1346,7 @@
 	{ "ELAN060C", 0 },
 	{ "ELAN0611", 0 },
 	{ "ELAN0612", 0 },
+	{ "ELAN0617", 0 },
 	{ "ELAN0618", 0 },
 	{ "ELAN061C", 0 },
 	{ "ELAN061D", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 2d95e8d..a7f8b16 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1119,6 +1119,8 @@
  * Asus UX31               0x361f00        20, 15, 0e      clickpad
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
+ * Fujitsu CELSIUS H760    0x570f02        40, 14, 0c      3 hw buttons (**)
+ * Fujitsu CELSIUS H780    0x5d0f02        41, 16, 0d      3 hw buttons (**)
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E546   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
@@ -1171,6 +1173,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
 		},
 	},
+	{
+		/* Fujitsu H780 also has a middle button */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
+		},
+	},
 #endif
 	{ }
 };
@@ -1767,6 +1776,18 @@
 module_param_named(elantech_smbus, elantech_smbus, int, 0644);
 MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device.");
 
+static const char * const i2c_blacklist_pnp_ids[] = {
+	/*
+	 * These are known to not be working properly as bits are missing
+	 * in elan_i2c.
+	 */
+	"LEN2131", /* ThinkPad P52 w/ NFC */
+	"LEN2132", /* ThinkPad P52 */
+	"LEN2133", /* ThinkPad P72 w/ NFC */
+	"LEN2134", /* ThinkPad P72 */
+	NULL
+};
+
 static int elantech_create_smbus(struct psmouse *psmouse,
 				 struct elantech_device_info *info,
 				 bool leave_breadcrumbs)
@@ -1802,10 +1823,12 @@
 
 	if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) {
 		/*
-		 * New ICs are enabled by default.
+		 * New ICs are enabled by default, unless mentioned in
+		 * i2c_blacklist_pnp_ids.
 		 * Old ICs are up to the user to decide.
 		 */
-		if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+		if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+		    psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
 			return -ENXIO;
 	}
 
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2bd5bb1..b6da0c1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -171,6 +171,7 @@
 	"LEN0046", /* X250 */
 	"LEN004a", /* W541 */
 	"LEN005b", /* P50 */
+	"LEN005e", /* T560 */
 	"LEN0071", /* T480 */
 	"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
 	"LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -178,6 +179,7 @@
 	"LEN0096", /* X280 */
 	"LEN0097", /* X280 -> ALPS trackpoint */
 	"LEN200f", /* T450s */
+	"SYN3052", /* HP EliteBook 840 G4 */
 	"SYN3221", /* HP 15-ay000 */
 	NULL
 };
diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
index 38bfaca..150f9ee 100644
--- a/drivers/input/tablet/wacom_serial4.c
+++ b/drivers/input/tablet/wacom_serial4.c
@@ -187,6 +187,7 @@
 	MODEL_DIGITIZER_II	= 0x5544, /* UD */
 	MODEL_GRAPHIRE		= 0x4554, /* ET */
 	MODEL_PENPARTNER	= 0x4354, /* CT */
+	MODEL_ARTPAD_II		= 0x4B54, /* KT */
 };
 
 static void wacom_handle_model_response(struct wacom *wacom)
@@ -245,6 +246,7 @@
 		wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
 		break;
 
+	case MODEL_ARTPAD_II:
 	case MODEL_DIGITIZER_II:
 		wacom->dev->name = "Wacom Digitizer II";
 		wacom->dev->id.version = MODEL_DIGITIZER_II;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 3232af5..a7ace07 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1586,10 +1586,10 @@
 	/* T7 config may have changed */
 	mxt_init_t7_power_cfg(data);
 
-release_raw:
-	kfree(cfg.raw);
 release_mem:
 	kfree(cfg.mem);
+release_raw:
+	kfree(cfg.raw);
 	return ret;
 }
 
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index 775e92d..1afb997 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -3526,14 +3526,12 @@
 	hrtimer_start(&info->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
 #else
 	logError(0, "%s Interrupt Mode\n", tag);
-	if (request_irq(info->client->irq, fts_interrupt_handler,
-		IRQF_TRIGGER_LOW, info->client->name, info)) {
+	if (request_threaded_irq(info->client->irq, NULL, fts_interrupt_handler,
+		IRQF_TRIGGER_LOW | IRQF_ONESHOT, info->client->name, info)) {
 		logError(1, "%s Request irq failed\n", tag);
 		kfree(info->event_dispatch_table);
 		error = -EBUSY;
-	} /*else {*/
-	/*error = fts_enableInterrupt();*/
-	/*}*/
+	}
 #endif
 	return error;
 }
@@ -3558,6 +3556,21 @@
 #else
 	enable_irq(info->client->irq);
 #endif
+	/* enable the touch IC irq */
+	fts_enableInterrupt();
+}
+
+static void fts_interrupt_disable(struct fts_ts_info *info)
+{
+	/* disable the touch IC irq */
+	fts_disableInterrupt();
+
+#ifdef FTS_USE_POLLING_MODE
+	hrtimer_cancel(&info->timer);
+#else
+	disable_irq(info->client->irq);
+#endif
+
 }
 
 static int fts_init(struct fts_ts_info *info)
@@ -4041,11 +4054,6 @@
 
 	__pm_wakeup_event(&info->wakeup_source, HZ);
 
-	if (fts_enable_reg(info, true) < 0) {
-		logError(1, "%s %s: ERROR Failed to enable regulators\n",
-			tag, __func__);
-	}
-
 	if (info->ts_pinctrl) {
 		/*
 		 * Pinctrl handle is optional. If pinctrl handle is found
@@ -4060,10 +4068,11 @@
 	}
 
 	info->resume_bit = 1;
+
+	fts_system_reset();
 #ifdef USE_NOISE_PARAM
 	readNoiseParameters(noise_params);
 #endif
-	fts_system_reset();
 
 #ifdef USE_NOISE_PARAM
 	writeNoiseParameters(noise_params);
@@ -4075,7 +4084,7 @@
 
 	info->sensor_sleep = false;
 
-	fts_enableInterrupt();
+	fts_interrupt_enable(info);
 }
 
 
@@ -4091,11 +4100,10 @@
 
 	fts_mode_handler(info, 0);
 
+	fts_interrupt_disable(info);
 	release_all_touches(info);
 	info->sensor_sleep = true;
 
-	fts_enableInterrupt();
-
 	if (info->ts_pinctrl) {
 		/*
 		 * Pinctrl handle is optional. If pinctrl handle is found
@@ -4109,7 +4117,6 @@
 		}
 	}
 
-	fts_enable_reg(info, false);
 }
 
 
diff --git a/drivers/input/touchscreen/st/fts.h b/drivers/input/touchscreen/st/fts.h
index 7575590..84553fe 100644
--- a/drivers/input/touchscreen/st/fts.h
+++ b/drivers/input/touchscreen/st/fts.h
@@ -66,7 +66,7 @@
 
 
 /**** FEATURES USED IN THE IC ***/
-#define PHONE_KEY /*enable the keys*/
+/* #define PHONE_KEY enable the keys */
 
 #define PHONE_GESTURE /*allow to use the gestures*/
 #ifdef PHONE_GESTURE
diff --git a/drivers/input/touchscreen/st/fts_gui.c b/drivers/input/touchscreen/st/fts_gui.c
index a6aa89c..a2bfe1a 100644
--- a/drivers/input/touchscreen/st/fts_gui.c
+++ b/drivers/input/touchscreen/st/fts_gui.c
@@ -122,8 +122,7 @@
 	ret = sscanf(buf, "%x %x %x %x %x %x %x %x %x ",
 		(data + 8), (data), (data + 1), (data + 2), (data + 3),
 		(data + 4), (data + 5), (data + 6), (data + 7));
-	if (ret != 9)
-		return -EINVAL;
+
 	byte_count = data[8];
 
 	/**
@@ -251,8 +250,7 @@
 	ret = sscanf(buf, "%x %x %x %x %x %x %x %x %x ",
 		(data + 8), (data), (data + 1), (data + 2), (data + 3),
 		(data + 4), (data + 5), (data + 6), (data + 7));
-	if (ret != 9)
-		return -EINVAL;
+
 	byte_count = data[8];
 
 	if (byte_count > 8) {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 33a1acb..7ca65c5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -405,6 +405,16 @@
 
 	  If unsure, say N here.
 
+config IOMMU_TLBSYNC_DEBUG
+	bool "TLB sync timeout debug"
+	depends on ARM_SMMU
+	help
+	  Enables to collect the SMMU system state information right
+	  after the first TLB sync timeout failure by calling BUG().
+	  Note to use this only on debug builds.
+
+	  If unsure, say N here.
+
 config QCOM_LAZY_MAPPING
 	bool "Reference counted iommu-mapping support"
 	depends on ION
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index bee0dfb..27500ab 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -438,7 +438,14 @@
 
 	dev_data->alias = get_alias(dev);
 
-	if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
+	/*
+	 * By default we use passthrough mode for IOMMUv2 capable device.
+	 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
+	 * invalid address), we ignore the capability for the device so
+	 * it'll be forced to go into translation mode.
+	 */
+	if ((iommu_pass_through || !amd_iommu_force_isolation) &&
+	    dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
 		struct amd_iommu *iommu;
 
 		iommu = amd_iommu_rlookup_table[dev_data->devid];
@@ -1922,16 +1929,13 @@
 
 static void do_detach(struct iommu_dev_data *dev_data)
 {
+	struct protection_domain *domain = dev_data->domain;
 	struct amd_iommu *iommu;
 	u16 alias;
 
 	iommu = amd_iommu_rlookup_table[dev_data->devid];
 	alias = dev_data->alias;
 
-	/* decrease reference counters */
-	dev_data->domain->dev_iommu[iommu->index] -= 1;
-	dev_data->domain->dev_cnt                 -= 1;
-
 	/* Update data structures */
 	dev_data->domain = NULL;
 	list_del(&dev_data->list);
@@ -1941,6 +1945,16 @@
 
 	/* Flush the DTE entry */
 	device_flush_dte(dev_data);
+
+	/* Flush IOTLB */
+	domain_flush_tlb_pde(domain);
+
+	/* Wait for the flushes to finish */
+	domain_flush_complete(domain);
+
+	/* decrease reference counters - needs to happen after the flushes */
+	domain->dev_iommu[iommu->index] -= 1;
+	domain->dev_cnt                 -= 1;
 }
 
 /*
@@ -2548,13 +2562,13 @@
 			bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
 			iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
 
-			if (--mapped_pages)
+			if (--mapped_pages == 0)
 				goto out_free_iova;
 		}
 	}
 
 out_free_iova:
-	free_iova_fast(&dma_dom->iovad, address, npages);
+	free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
 
 out_err:
 	return 0;
diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h
index 4695c25..a0e15d5 100644
--- a/drivers/iommu/arm-smmu-regs.h
+++ b/drivers/iommu/arm-smmu-regs.h
@@ -191,6 +191,8 @@
 #define TLBSTATUS_SACTIVE		(1 << 0)
 #define ARM_SMMU_CB_ATS1PR		0x800
 #define ARM_SMMU_CB_ATSR		0x8f0
+#define ARM_SMMU_STATS_SYNC_INV_TBU_ACK 0x25dc
+#define ARM_SMMU_TBU_PWR_STATUS         0x2204
 
 #define SCTLR_MEM_ATTR_SHIFT		16
 #define SCTLR_SHCFG_SHIFT		22
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index fc9fa1d..5391896 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -586,7 +586,11 @@
 
 	struct arm_smmu_strtab_cfg	strtab_cfg;
 
-	u32				sync_count;
+	/* Hi16xx adds an extra 32 bits of goodness to its MSI payload */
+	union {
+		u32			sync_count;
+		u64			padding;
+	};
 
 	/* IOMMU core code handle */
 	struct iommu_device		iommu;
@@ -684,7 +688,13 @@
 	u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
 
 	q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
-	writel(q->cons, q->cons_reg);
+
+	/*
+	 * Ensure that all CPU accesses (reads and writes) to the queue
+	 * are complete before we update the cons pointer.
+	 */
+	mb();
+	writel_relaxed(q->cons, q->cons_reg);
 }
 
 static int queue_sync_prod(struct arm_smmu_queue *q)
@@ -837,7 +847,13 @@
 			cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
 		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
 		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
-		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
+		/*
+		 * Commands are written little-endian, but we want the SMMU to
+		 * receive MSIData, and thus write it back to memory, in CPU
+		 * byte order, so big-endian needs an extra byteswap here.
+		 */
+		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
+				     cpu_to_le32(ent->sync.msidata));
 		cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
 		break;
 	default:
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index cf0f4d8..d3936b3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -135,6 +135,7 @@
 	ARM_MMU500,
 	CAVIUM_SMMUV2,
 	QCOM_SMMUV500,
+	QCOM_SMMUV2,
 };
 
 struct arm_smmu_impl_def_reg {
@@ -250,6 +251,7 @@
 #define ARM_SMMU_OPT_NO_ASID_RETENTION	(1 << 5)
 #define ARM_SMMU_OPT_STATIC_CB		(1 << 6)
 #define ARM_SMMU_OPT_DISABLE_ATOS	(1 << 7)
+#define ARM_SMMU_OPT_NO_DYNAMIC_ASID	(1 << 8)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -387,6 +389,7 @@
 	{ ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
 	{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
 	{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
+	{ ARM_SMMU_OPT_NO_DYNAMIC_ASID, "qcom,no-dynamic-asid" },
 	{ 0, NULL},
 };
 
@@ -693,6 +696,20 @@
 	return smmu->arch_ops->device_group(dev, group);
 }
 
+static void arm_smmu_arch_write_sync(struct arm_smmu_device *smmu)
+{
+	u32 id;
+
+	if (!smmu)
+		return;
+
+	/* Read to complete prior write transcations */
+	id = readl_relaxed(ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_ID0);
+
+	/* Wait for read to complete before off */
+	rmb();
+}
+
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
 	if (dev_is_pci(dev)) {
@@ -940,6 +957,9 @@
 static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
 {
 	unsigned long flags;
+	struct arm_smmu_device *smmu = pwr->dev->driver_data;
+
+	arm_smmu_arch_write_sync(smmu);
 
 	spin_lock_irqsave(&pwr->clock_refs_lock, flags);
 	if (pwr->clock_refs_count == 0) {
@@ -1080,6 +1100,7 @@
 				void __iomem *sync, void __iomem *status)
 {
 	unsigned int spin_cnt, delay;
+	u32 sync_inv_ack, tbu_pwr_status;
 
 	writel_relaxed(0, sync);
 	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
@@ -1090,9 +1111,15 @@
 		}
 		udelay(delay);
 	}
+	sync_inv_ack = scm_io_read((unsigned long)(smmu->phys_addr +
+				     ARM_SMMU_STATS_SYNC_INV_TBU_ACK));
+	tbu_pwr_status = scm_io_read((unsigned long)(smmu->phys_addr +
+				     ARM_SMMU_TBU_PWR_STATUS));
 	trace_tlbsync_timeout(smmu->dev, 0);
 	dev_err_ratelimited(smmu->dev,
-			    "TLB sync timed out -- SMMU may be deadlocked\n");
+			    "TLB sync timed out -- SMMU may be deadlocked ack 0x%x pwr 0x%x\n",
+			    sync_inv_ack, tbu_pwr_status);
+	BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
 	return -EINVAL;
 }
 
@@ -1376,6 +1403,62 @@
 	.free_pages_exact = arm_smmu_free_pages_exact,
 };
 
+static void print_ctx_regs(struct arm_smmu_device *smmu, struct arm_smmu_cfg
+			   *cfg, unsigned int fsr)
+{
+	u32 fsynr0;
+	void __iomem *cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
+	void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
+	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+
+	fsynr0 = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+
+	dev_err(smmu->dev, "FAR    = 0x%016llx\n",
+		readq_relaxed(cb_base + ARM_SMMU_CB_FAR));
+	dev_err(smmu->dev, "PAR    = 0x%pK\n",
+		readq_relaxed(cb_base + ARM_SMMU_CB_PAR));
+
+	dev_err(smmu->dev,
+		"FSR    = 0x%08x [%s%s%s%s%s%s%s%s%s%s]\n",
+		fsr,
+		(fsr & 0x02) ?  (fsynr0 & 0x10 ?
+				 "TF W " : "TF R ") : "",
+		(fsr & 0x04) ? "AFF " : "",
+		(fsr & 0x08) ? (fsynr0 & 0x10 ?
+				"PF W " : "PF R ") : "",
+		(fsr & 0x10) ? "EF " : "",
+		(fsr & 0x20) ? "TLBMCF " : "",
+		(fsr & 0x40) ? "TLBLKF " : "",
+		(fsr & 0x80) ? "MHF " : "",
+		(fsr & 0x100) ? "UUT " : "",
+		(fsr & 0x40000000) ? "SS " : "",
+		(fsr & 0x80000000) ? "MULTI " : "");
+
+	if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+		dev_err(smmu->dev, "TTBR0  = 0x%pK\n",
+			readl_relaxed(cb_base + ARM_SMMU_CB_TTBR0));
+		dev_err(smmu->dev, "TTBR1  = 0x%pK\n",
+			readl_relaxed(cb_base + ARM_SMMU_CB_TTBR1));
+	} else {
+		dev_err(smmu->dev, "TTBR0  = 0x%pK\n",
+			readq_relaxed(cb_base + ARM_SMMU_CB_TTBR0));
+		if (stage1)
+			dev_err(smmu->dev, "TTBR1  = 0x%pK\n",
+				readq_relaxed(cb_base + ARM_SMMU_CB_TTBR1));
+	}
+
+
+	dev_err(smmu->dev, "SCTLR  = 0x%08x ACTLR  = 0x%08x\n",
+	       readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR),
+	       readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR));
+	dev_err(smmu->dev, "CBAR  = 0x%08x\n",
+	       readl_relaxed(gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)));
+	dev_err(smmu->dev, "MAIR0   = 0x%08x MAIR1   = 0x%08x\n",
+	       readl_relaxed(cb_base + ARM_SMMU_CB_S1_MAIR0),
+	       readl_relaxed(cb_base + ARM_SMMU_CB_S1_MAIR1));
+
+}
+
 static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
 					 dma_addr_t iova, u32 fsr)
 {
@@ -1463,29 +1546,17 @@
 		ret = IRQ_HANDLED;
 		resume = RESUME_TERMINATE;
 	} else {
-		phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
-							      fsr);
 		if (__ratelimit(&_rs)) {
+			phys_addr_t phys_atos = arm_smmu_verify_fault(domain,
+								      iova,
+								      fsr);
+
 			dev_err(smmu->dev,
 				"Unhandled context fault: iova=0x%08lx, cb=%d, fsr=0x%x, fsynr0=0x%x, fsynr1=0x%x\n",
 				iova, cfg->cbndx, fsr, fsynr0, fsynr1);
-			dev_err(smmu->dev, "FAR    = %016lx\n",
-				(unsigned long)iova);
-			dev_err(smmu->dev,
-				"FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n",
-				fsr,
-				(fsr & 0x02) ?  (fsynr0 & 0x10 ?
-						"TF W " : "TF R ") : "",
-				(fsr & 0x04) ? "AFF " : "",
-				(fsr & 0x08) ? (fsynr0 & 0x10 ?
-						"PF W " : "PF R ") : "",
-				(fsr & 0x10) ? "EF " : "",
-				(fsr & 0x20) ? "TLBMCF " : "",
-				(fsr & 0x40) ? "TLBLKF " : "",
-				(fsr & 0x80) ? "MHF " : "",
-				(fsr & 0x100) ? "UUT " : "",
-				(fsr & 0x40000000) ? "SS " : "",
-				(fsr & 0x80000000) ? "MULTI " : "");
+
+			print_ctx_regs(smmu, cfg, fsr);
+
 			dev_err(smmu->dev,
 				"soft iova-to-phys=%pa\n", &phys_soft);
 			if (!phys_soft)
@@ -1784,7 +1855,7 @@
 	bool dynamic = is_dynamic_domain(domain);
 	int ret;
 
-	if (!dynamic) {
+	if (!dynamic || (smmu->options & ARM_SMMU_OPT_NO_DYNAMIC_ASID)) {
 		cfg->asid = cfg->cbndx + 1;
 	} else {
 		mutex_lock(&smmu->idr_mutex);
@@ -1828,7 +1899,6 @@
 	enum io_pgtable_fmt fmt;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
 	unsigned long quirks = 0;
 	bool dynamic;
 
@@ -1950,7 +2020,7 @@
 		goto out_unlock;
 	}
 
-	if (is_fast)
+	if (smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))
 		fmt = ARM_V8L_FAST;
 
 	if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
@@ -2426,7 +2496,10 @@
 
 	mutex_lock(&smmu->stream_map_mutex);
 	for_each_cfg_sme(fwspec, i, idx) {
-		WARN_ON(s2cr[idx].attach_count == 0);
+		if (WARN_ON(s2cr[idx].attach_count == 0)) {
+			mutex_unlock(&smmu->stream_map_mutex);
+			return;
+		}
 		s2cr[idx].attach_count -= 1;
 
 		if (s2cr[idx].attach_count > 0)
@@ -2948,14 +3021,6 @@
 	return ret;
 }
 
-static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-	if (smmu_domain->tlb_ops)
-		smmu_domain->tlb_ops->tlb_sync(smmu_domain);
-}
-
 #define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
 static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
 			   struct scatterlist *sg, unsigned int nents, int prot)
@@ -3927,8 +3992,6 @@
 	.map			= arm_smmu_map,
 	.unmap			= arm_smmu_unmap,
 	.map_sg			= arm_smmu_map_sg,
-	.flush_iotlb_all	= arm_smmu_iotlb_sync,
-	.iotlb_sync		= arm_smmu_iotlb_sync,
 	.iova_to_phys		= arm_smmu_iova_to_phys,
 	.iova_to_phys_hard	= arm_smmu_iova_to_phys_hard,
 	.add_device		= arm_smmu_add_device,
@@ -4324,7 +4387,7 @@
 	pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
 	if (!pwr->bus_client) {
 		dev_err(dev, "Bus client registration failed\n");
-		return -EINVAL;
+		return -EPROBE_DEFER;
 	}
 
 	return 0;
@@ -4609,6 +4672,7 @@
 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
 ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
 		    &qsmmuv500_arch_ops);
+ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, NULL);
 
 static const struct of_device_id arm_smmu_of_match[] = {
 	{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
@@ -4618,6 +4682,7 @@
 	{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
 	{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
 	{ .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
+	{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
@@ -5018,6 +5083,11 @@
 #define DEBUG_PAR_PA_SHIFT		12
 #define DEBUG_PAR_FAULT_VAL		0x1
 
+#define DEBUG_AXUSER_REG		0x30
+#define DEBUG_AXUSER_CDMID_MASK         0xff
+#define DEBUG_AXUSER_CDMID_SHIFT        36
+#define DEBUG_AXUSER_CDMID_VAL          255
+
 #define TBU_DBG_TIMEOUT_US		100
 
 struct actlr_setting {
@@ -5277,9 +5347,13 @@
 redo:
 	/* Set address and stream-id */
 	val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+	val &= ~DEBUG_SID_HALT_SID_MASK;
 	val |= sid & DEBUG_SID_HALT_SID_MASK;
 	writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
 	writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
+	val = (u64)(DEBUG_AXUSER_CDMID_VAL & DEBUG_AXUSER_CDMID_MASK) <<
+		DEBUG_AXUSER_CDMID_SHIFT;
+	writeq_relaxed(val, tbu->base + DEBUG_AXUSER_REG);
 
 	/*
 	 * Write-back Read and Write-Allocate
@@ -5336,6 +5410,9 @@
 	/* Reset hardware */
 	writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
 	writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
+	val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+	val &= ~DEBUG_SID_HALT_SID_MASK;
+	writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
 
 	/*
 	 * After a failed translation, the next successful translation will
@@ -5351,6 +5428,12 @@
 	qsmmuv500_tbu_resume(tbu);
 
 out_power_off:
+	/* Read to complete prior write transcations */
+	val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
+
+	/* Wait for read to complete before off */
+	rmb();
+
 	arm_smmu_power_off(tbu->pwr);
 
 	return phys;
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index e7994ba..27523fc 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/dma-contiguous.h>
@@ -540,12 +540,22 @@
 	void *addr;
 	unsigned long flags;
 	struct sg_mapping_iter miter;
-	unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
+	size_t count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
 	bool is_coherent = is_dma_coherent(dev, attrs);
 	int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs);
 	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
 	struct page **pages;
 
+	/*
+	 * sg_alloc_table_from_pages accepts unsigned int value for count
+	 * so check count doesn't exceed UINT_MAX.
+	 */
+
+	if (count > UINT_MAX) {
+		dev_err(dev, "count: %zx exceeds UNIT_MAX\n", count);
+		return NULL;
+	}
+
 	*handle = DMA_ERROR_CODE;
 
 	pages = __fast_smmu_alloc_pages(count, gfp);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a76c47f..2b8f5eb 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2069,7 +2069,7 @@
 	 * than default.  Unnecessary for PT mode.
 	 */
 	if (translation != CONTEXT_TT_PASS_THROUGH) {
-		for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+		for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
 			ret = -ENOMEM;
 			pgd = phys_to_virt(dma_pte_addr(pgd));
 			if (!dma_pte_present(pgd))
@@ -2083,7 +2083,7 @@
 			translation = CONTEXT_TT_MULTI_LEVEL;
 
 		context_set_address_root(context, virt_to_phys(pgd));
-		context_set_address_width(context, iommu->agaw);
+		context_set_address_width(context, agaw);
 	} else {
 		/*
 		 * In pass through mode, AW must be programmed to
@@ -5230,7 +5230,7 @@
 	struct iommu_resv_region *entry, *next;
 
 	list_for_each_entry_safe(entry, next, head, list) {
-		if (entry->type == IOMMU_RESV_RESERVED)
+		if (entry->type == IOMMU_RESV_MSI)
 			kfree(entry);
 	}
 }
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index d137257..4378f2c 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -240,7 +240,7 @@
 
 static struct iommu_debug_attr secure_attr = {
 	.dma_type = 0,
-	.vmid = VMID_CP_CAMERA,
+	.vmid = VMID_CP_PIXEL,
 };
 
 static int iommu_debug_set_attrs(struct iommu_debug_device *ddev,
@@ -289,20 +289,20 @@
 	bool coherent;
 
 	if (ddev->domain) {
-		dev_err(dev, "Already attached.\n");
+		dev_err_ratelimited(dev, "Already attached.\n");
 		return -EBUSY;
 	}
 
 	iommu = of_iommu_configure(dev, dev->of_node);
 	if (!iommu) {
-		dev_err(dev, "Is not associated with an iommu\n");
+		dev_err_ratelimited(dev, "Is not associated with an iommu\n");
 		return -EINVAL;
 	}
 
 	coherent = of_dma_is_coherent(dev->of_node);
 
 	if (!dev->iommu_group) {
-		dev_err(dev, "Does not have an iommu group\n");
+		dev_err_ratelimited(dev, "Does not have an iommu group\n");
 		return -EINVAL;
 	}
 
@@ -310,7 +310,7 @@
 	domain = iommu_get_domain_for_dev(dev);
 	if (domain) {
 		if (domain->type != IOMMU_DOMAIN_DMA) {
-			dev_err(dev, "Attached, but its not a default domain?\n");
+			dev_err_ratelimited(dev, "Attached, but its not a default domain?\n");
 			return -EINVAL;
 		}
 		iommu_detach_group(domain, dev->iommu_group);
@@ -318,19 +318,19 @@
 
 	domain = iommu_domain_alloc(dev->bus);
 	if (!domain) {
-		dev_err(dev, "Allocating iommu domain failed\n");
+		dev_err_ratelimited(dev, "Allocating iommu domain failed\n");
 		return -EINVAL;
 	}
 
 	domain->is_debug_domain = true;
 
 	if (iommu_debug_set_attrs(ddev, domain, attrs)) {
-		dev_err(dev, "Setting attrs failed\n");
+		dev_err_ratelimited(dev, "Setting attrs failed\n");
 		goto out_free_domain;
 	}
 
 	if (iommu_attach_group(domain, dev->iommu_group)) {
-		dev_err(dev, "attach group failed\n");
+		dev_err_ratelimited(dev, "attach group failed\n");
 		goto out_free_domain;
 	}
 
@@ -341,7 +341,7 @@
 	set_dma_ops(dev, NULL);
 	arch_setup_dma_ops(dev, dma_base, size, iommu, coherent);
 	if (!get_dma_ops(dev)) {
-		dev_err(dev, "arch_setup_dma_ops failed, dma ops are null.\n");
+		dev_err_ratelimited(dev, "arch_setup_dma_ops failed, dma ops are null.\n");
 		goto out_detach_group;
 	}
 
@@ -362,13 +362,13 @@
 	struct device *dev = ddev->dev;
 
 	if (!dev->iommu_group) {
-		dev_err(dev, "Does not have an iommu group\n");
+		dev_err_ratelimited(dev, "Does not have an iommu group\n");
 		return;
 	}
 
 	domain = ddev->domain;
 	if (!domain) {
-		dev_err(dev, "Is not attached\n");
+		dev_err_ratelimited(dev, "Is not attached\n");
 		return;
 	}
 
@@ -770,14 +770,14 @@
 	for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
 		dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
 		if (dma_addr == DMA_ERROR_CODE) {
-			dev_err(dev, "Failed map on iter %d\n", i);
+			dev_err_ratelimited(dev, "Failed map on iter %d\n", i);
 			ret = -EINVAL;
 			goto out;
 		}
 	}
 
 	if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
-		dev_err(dev,
+		dev_err_ratelimited(dev,
 			"dma_map_single unexpectedly (VA should have been exhausted)\n");
 		ret = -EINVAL;
 		goto out;
@@ -797,7 +797,7 @@
 	if (dma_addr != SZ_8K) {
 		dma_addr_t expected = SZ_8K;
 
-		dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
+		dev_err_ratelimited(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
 			&dma_addr, &expected);
 		ret = -EINVAL;
 		goto out;
@@ -812,14 +812,14 @@
 	if (dma_addr != 0) {
 		dma_addr_t expected = 0;
 
-		dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
+		dev_err_ratelimited(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
 			&dma_addr, &expected);
 		ret = -EINVAL;
 		goto out;
 	}
 
 	if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
-		dev_err(dev,
+		dev_err_ratelimited(dev,
 			"dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
 		ret = -EINVAL;
 		goto out;
@@ -869,7 +869,7 @@
 	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
 	if (!virt) {
 		if (size > SZ_8K) {
-			dev_err(dev,
+			dev_err_ratelimited(dev,
 				"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
 				_size_to_string(size));
 			return 0;
@@ -881,7 +881,7 @@
 	for (iova = 0, i = 0; iova < max; iova += size, ++i) {
 		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
 		if (dma_addr == DMA_ERROR_CODE) {
-			dev_err(dev, "Failed map on iter %d\n", i);
+			dev_err_ratelimited(dev, "Failed map on iter %d\n", i);
 			ret = -EINVAL;
 			goto out;
 		}
@@ -914,7 +914,7 @@
 	}
 
 	if (unmapped != remapped) {
-		dev_err(dev,
+		dev_err_ratelimited(dev,
 			"Unexpected random remap count! Unmapped %d but remapped %d\n",
 			unmapped, remapped);
 		ret = -EINVAL;
@@ -959,7 +959,7 @@
 	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
 	if (!virt) {
 		if (size > SZ_8K) {
-			dev_err(dev,
+			dev_err_ratelimited(dev,
 				"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
 				_size_to_string(size));
 			return 0;
@@ -988,7 +988,7 @@
 			phys_addr_t expected = phys;
 
 			if (__check_mapping(dev, domain, iova, expected)) {
-				dev_err(dev, "iter: %d\n", i);
+				dev_err_ratelimited(dev, "iter: %d\n", i);
 				ret = -EINVAL;
 				goto out;
 			}
@@ -999,7 +999,7 @@
 			unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
 
 			if (__check_mapping(dev, domain, theiova, expected)) {
-				dev_err(dev, "iter: %d\n", i);
+				dev_err_ratelimited(dev, "iter: %d\n", i);
 				ret = -EINVAL;
 				goto out;
 			}
@@ -1158,7 +1158,7 @@
 		pa = iommu_iova_to_phys(domain, iova);
 		pa2 = iommu_iova_to_phys_hard(domain, iova);
 		if (pa != pa2) {
-			dev_err(dev,
+			dev_err_ratelimited(dev,
 				"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
 				&pa, &pa2);
 			ret = -EINVAL;
@@ -1166,7 +1166,7 @@
 		}
 		pa2 = virt_to_phys(data);
 		if (pa != pa2) {
-			dev_err(dev,
+			dev_err_ratelimited(dev,
 				"iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
 				&pa, &pa2);
 			ret = -EINVAL;
@@ -1175,7 +1175,8 @@
 		dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
 		for (j = 0; j < size; ++j) {
 			if (data[j] != 0xa5) {
-				dev_err(dev, "data[%d] != 0xa5\n", data[j]);
+				dev_err_ratelimited(dev,
+					       "data[%d] != 0xa5\n", data[j]);
 				ret = -EINVAL;
 				goto out;
 			}
@@ -1230,7 +1231,7 @@
 			pa = iommu_iova_to_phys(domain, iova);
 			pa2 = iommu_iova_to_phys_hard(domain, iova);
 			if (pa != pa2) {
-				dev_err(dev,
+				dev_err_ratelimited(dev,
 					"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
 					&pa, &pa2);
 				ret = -EINVAL;
@@ -1241,7 +1242,7 @@
 			pa = iommu_iova_to_phys(domain, iova);
 			pa2 = iommu_iova_to_phys_hard(domain, iova);
 			if (pa != pa2) {
-				dev_err(dev,
+				dev_err_ratelimited(dev,
 					"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
 					&pa, &pa2);
 				ret = -EINVAL;
@@ -1289,7 +1290,7 @@
 		goto out_release_mapping;
 	}
 
-	dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
+	dev_err_ratelimited(dev, "testing with pgtables at %pa\n", &pt_phys);
 	if (iommu_enable_config_clocks(domain)) {
 		ds_printf(dev, s, "Couldn't enable clocks\n");
 		goto out_release_mapping;
@@ -1378,7 +1379,7 @@
 	int val, ret;
 
 	if (kstrtoint_from_user(ubuf, count, 0, &val)) {
-		pr_err("Invalid format. Expected a hex or decimal integer");
+		pr_err_ratelimited("Invalid format. Expected a hex or decimal integer");
 		return -EFAULT;
 	}
 
@@ -1386,10 +1387,10 @@
 	if (val) {
 		ret = iommu_debug_dma_reconfigure(ddev, attrs, 0, SZ_1G * 4ULL);
 		if (!ret)
-			pr_err("Attached\n");
+			pr_err_ratelimited("Attached\n");
 	} else {
 		iommu_debug_dma_deconfigure(ddev);
-		pr_err("Detached\n");
+		pr_err_ratelimited("Detached\n");
 	}
 	mutex_unlock(&ddev->state_lock);
 	retval = count;
@@ -1416,20 +1417,10 @@
 				       size_t count, loff_t *offset)
 {
 	struct iommu_debug_device *ddev = file->private_data;
-	char c[2];
+	char buf[100];
 
-	if (*offset)
-		return 0;
-
-	c[0] = ddev->domain ? '1' : '0';
-	c[1] = '\n';
-	if (copy_to_user(ubuf, &c, 2)) {
-		pr_err("copy_to_user failed\n");
-		return -EFAULT;
-	}
-	*offset = 1;		/* non-zero means we're done */
-
-	return 2;
+	snprintf(buf, sizeof(buf), "%d\n", ddev->domain ? 1 : 0);
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_dma_attach_fops = {
@@ -1443,9 +1434,7 @@
 					       size_t count, loff_t *offset)
 {
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
-	int buf_len = sizeof(buf);
+	size_t buf_len = sizeof(buf);
 
 	if (*offset)
 		return 0;
@@ -1457,16 +1446,7 @@
 	else
 		snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
 
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_test_virt_addr_fops = {
@@ -1494,13 +1474,13 @@
 	dma_addr_t iova;
 
 	if (kstrtox_from_user(ubuf, count, 0, &iova)) {
-		pr_err("Invalid format for iova\n");
+		pr_err_ratelimited("Invalid format for iova\n");
 		ddev->iova = 0;
 		return -EINVAL;
 	}
 
 	ddev->iova = iova;
-	pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+	pr_err_ratelimited("Saved iova=%pa for future PTE commands\n", &iova);
 	return count;
 }
 
@@ -1511,11 +1491,9 @@
 	struct iommu_debug_device *ddev = file->private_data;
 	uint64_t pte;
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
 
 	if (kptr_restrict != 0) {
-		pr_err("kptr_restrict needs to be disabled.\n");
+		pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
 		return -EPERM;
 	}
 
@@ -1524,7 +1502,7 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -1537,18 +1515,8 @@
 		strlcpy(buf, "FAIL\n", sizeof(buf));
 	else
 		snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
-
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
 	mutex_unlock(&ddev->state_lock);
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_pte_fops = {
@@ -1565,13 +1533,13 @@
 	dma_addr_t iova;
 
 	if (kstrtox_from_user(ubuf, count, 0, &iova)) {
-		pr_err("Invalid format for iova\n");
+		pr_err_ratelimited("Invalid format for iova\n");
 		ddev->iova = 0;
 		return -EINVAL;
 	}
 
 	ddev->iova = iova;
-	pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
+	pr_err_ratelimited("Saved iova=%pa for future ATOS commands\n", &iova);
 	return count;
 }
 
@@ -1581,11 +1549,9 @@
 	struct iommu_debug_device *ddev = file->private_data;
 	phys_addr_t phys;
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
 
 	if (kptr_restrict != 0) {
-		pr_err("kptr_restrict needs to be disabled.\n");
+		pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
 		return -EPERM;
 	}
 
@@ -1594,7 +1560,7 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -1605,23 +1571,13 @@
 	if (!phys) {
 		strlcpy(buf, "FAIL\n", 100);
 		phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
-		dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
+		dev_err_ratelimited(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
 			&ddev->iova, &phys);
 	} else {
 		snprintf(buf, 100, "%pa\n", &phys);
 	}
-
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
 	mutex_unlock(&ddev->state_lock);
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_atos_fops = {
@@ -1636,11 +1592,9 @@
 	struct iommu_debug_device *ddev = file->private_data;
 	phys_addr_t phys;
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
 
 	if (kptr_restrict != 0) {
-		pr_err("kptr_restrict needs to be disabled.\n");
+		pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
 		return -EPERM;
 	}
 	if (*offset)
@@ -1648,7 +1602,7 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -1661,18 +1615,8 @@
 		strlcpy(buf, "FAIL\n", sizeof(buf));
 	else
 		snprintf(buf, sizeof(buf), "%pa\n", &phys);
-
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
 	mutex_unlock(&ddev->state_lock);
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_dma_atos_fops = {
@@ -1695,14 +1639,14 @@
 	struct iommu_debug_device *ddev = file->private_data;
 
 	if (count >= 100) {
-		pr_err("Value too large\n");
+		pr_err_ratelimited("Value too large\n");
 		return -EINVAL;
 	}
 
 	memset(buf, 0, 100);
 
 	if (copy_from_user(buf, ubuf, count)) {
-		pr_err("Couldn't copy from user\n");
+		pr_err_ratelimited("Couldn't copy from user\n");
 		retval = -EFAULT;
 	}
 
@@ -1735,27 +1679,27 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
 
 	ret = iommu_map(ddev->domain, iova, phys, size, prot);
 	if (ret) {
-		pr_err("iommu_map failed with %d\n", ret);
+		pr_err_ratelimited("iommu_map failed with %d\n", ret);
 		retval = -EIO;
 		goto out;
 	}
 
 	retval = count;
-	pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
+	pr_err_ratelimited("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
 	       &iova, &phys, size, prot);
 out:
 	mutex_unlock(&ddev->state_lock);
 	return retval;
 
 invalid_format:
-	pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
+	pr_err_ratelimited("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
 	return -EINVAL;
 }
 
@@ -1789,14 +1733,14 @@
 	struct device *dev = ddev->dev;
 
 	if (count >= sizeof(buf)) {
-		pr_err("Value too large\n");
+		pr_err_ratelimited("Value too large\n");
 		return -EINVAL;
 	}
 
 	memset(buf, 0, sizeof(buf));
 
 	if (copy_from_user(buf, ubuf, count)) {
-		pr_err("Couldn't copy from user\n");
+		pr_err_ratelimited("Couldn't copy from user\n");
 		return -EFAULT;
 	}
 
@@ -1836,7 +1780,7 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -1845,26 +1789,27 @@
 					DMA_TO_DEVICE, dma_attrs);
 
 	if (dma_mapping_error(dev, iova)) {
-		pr_err("Failed to perform dma_map_single\n");
+		pr_err_ratelimited("Failed to perform dma_map_single\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
 	retval = count;
-	pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
+	pr_err_ratelimited("Mapped 0x%p to %pa (len=0x%zx)\n",
 			v_addr, &iova, size);
 	ddev->iova = iova;
-		pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+		pr_err_ratelimited("Saved iova=%pa for future PTE commands\n",
+				&iova);
 out:
 	mutex_unlock(&ddev->state_lock);
 	return retval;
 
 invalid_format:
-	pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
+	pr_err_ratelimited("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
 	return retval;
 
 invalid_addr:
-	pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
+	pr_err_ratelimited("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
 	return retval;
 }
 
@@ -1873,8 +1818,6 @@
 {
 	struct iommu_debug_device *ddev = file->private_data;
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
 	dma_addr_t iova;
 
 	if (*offset)
@@ -1884,17 +1827,7 @@
 
 	iova = ddev->iova;
 	snprintf(buf, sizeof(buf), "%pa\n", &iova);
-
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_dma_map_fops = {
@@ -1916,19 +1849,19 @@
 	struct iommu_debug_device *ddev = file->private_data;
 
 	if (count >= 100) {
-		pr_err("Value too large\n");
+		pr_err_ratelimited("Value too large\n");
 		return -EINVAL;
 	}
 
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		return -EINVAL;
 	}
 
 	memset(buf, 0, 100);
 
 	if (copy_from_user(buf, ubuf, count)) {
-		pr_err("Couldn't copy from user\n");
+		pr_err_ratelimited("Couldn't copy from user\n");
 		retval = -EFAULT;
 		goto out;
 	}
@@ -1948,27 +1881,27 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
 
 	unmapped = iommu_unmap(ddev->domain, iova, size);
 	if (unmapped != size) {
-		pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
+		pr_err_ratelimited("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
 		       size, unmapped);
 		retval = -EIO;
 		goto out;
 	}
 
 	retval = count;
-	pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+	pr_err_ratelimited("Unmapped %pa (len=0x%zx)\n", &iova, size);
 out:
 	mutex_unlock(&ddev->state_lock);
 	return retval;
 
 invalid_format:
-	pr_err("Invalid format. Expected: iova,len\n");
+	pr_err_ratelimited("Invalid format. Expected: iova,len\n");
 	return -EINVAL;
 }
 
@@ -1992,14 +1925,14 @@
 	struct device *dev = ddev->dev;
 
 	if (count >= sizeof(buf)) {
-		pr_err("Value too large\n");
+		pr_err_ratelimited("Value too large\n");
 		return -EINVAL;
 	}
 
 	memset(buf, 0, sizeof(buf));
 
 	if (copy_from_user(buf, ubuf, count)) {
-		pr_err("Couldn't copy from user\n");
+		pr_err_ratelimited("Couldn't copy from user\n");
 		retval = -EFAULT;
 		goto out;
 	}
@@ -2036,20 +1969,20 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
 	dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
 
 	retval = count;
-	pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+	pr_err_ratelimited("Unmapped %pa (len=0x%zx)\n", &iova, size);
 out:
 	mutex_unlock(&ddev->state_lock);
 	return retval;
 
 invalid_format:
-	pr_err("Invalid format. Expected: iova,len, dma attr\n");
+	pr_err_ratelimited("Invalid format. Expected: iova,len, dma attr\n");
 	return retval;
 }
 
@@ -2068,17 +2001,17 @@
 
 	/* we're expecting a single character plus (optionally) a newline */
 	if (count > 2) {
-		dev_err(dev, "Invalid value\n");
+		dev_err_ratelimited(dev, "Invalid value\n");
 		return -EINVAL;
 	}
 
 	if (!ddev->domain) {
-		dev_err(dev, "No domain. Did you already attach?\n");
+		dev_err_ratelimited(dev, "No domain. Did you already attach?\n");
 		return -EINVAL;
 	}
 
 	if (copy_from_user(&buf, ubuf, 1)) {
-		dev_err(dev, "Couldn't copy from user\n");
+		dev_err_ratelimited(dev, "Couldn't copy from user\n");
 		return -EFAULT;
 	}
 
@@ -2086,26 +2019,26 @@
 	switch (buf) {
 	case '0':
 		if (ddev->clk_count == 0) {
-			dev_err(dev, "Config clocks already disabled\n");
+			dev_err_ratelimited(dev, "Config clocks already disabled\n");
 			break;
 		}
 
 		if (--ddev->clk_count > 0)
 			break;
 
-		dev_err(dev, "Disabling config clocks\n");
+		dev_err_ratelimited(dev, "Disabling config clocks\n");
 		iommu_disable_config_clocks(ddev->domain);
 		break;
 	case '1':
 		if (ddev->clk_count++ > 0)
 			break;
 
-		dev_err(dev, "Enabling config clocks\n");
+		dev_err_ratelimited(dev, "Enabling config clocks\n");
 		if (iommu_enable_config_clocks(ddev->domain))
-			dev_err(dev, "Failed!\n");
+			dev_err_ratelimited(dev, "Failed!\n");
 		break;
 	default:
-		dev_err(dev, "Invalid value. Should be 0 or 1.\n");
+		dev_err_ratelimited(dev, "Invalid value. Should be 0 or 1.\n");
 		mutex_unlock(&ddev->clk_lock);
 		return -EINVAL;
 	}
@@ -2127,13 +2060,13 @@
 	unsigned long flags;
 
 	if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
-		pr_err("Invalid flags format\n");
+		pr_err_ratelimited("Invalid flags format\n");
 		return -EFAULT;
 	}
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -2177,147 +2110,147 @@
 	ddev->dev = dev;
 	dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
 	if (!dir) {
-		pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s debugfs dir\n",
 		       dev_name(dev));
 		goto err;
 	}
 
 	if (!debugfs_create_file("nr_iters", 0400, dir, &iters_per_op,
 				&iommu_debug_nr_iters_ops)) {
-		pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
 				&iommu_debug_test_virt_addr_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("profiling", 0400, dir, ddev,
 				 &iommu_debug_profiling_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/profiling debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("secure_profiling", 0400, dir, ddev,
 				 &iommu_debug_secure_profiling_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("profiling_fast", 0400, dir, ddev,
 				 &iommu_debug_profiling_fast_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("profiling_fast_dma_api", 0400, dir, ddev,
 				 &iommu_debug_profiling_fast_dma_api_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("functional_fast_dma_api", 0400, dir, ddev,
 				 &iommu_debug_functional_fast_dma_api_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("functional_arm_dma_api", 0400, dir, ddev,
 				 &iommu_debug_functional_arm_dma_api_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
 				 &iommu_debug_dma_attach_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("attach", 0400, dir, ddev,
 				 &iommu_debug_attach_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/attach debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("secure_attach", 0400, dir, ddev,
 				 &iommu_debug_secure_attach_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("atos", 0200, dir, ddev,
 				 &iommu_debug_atos_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/atos debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
 				 &iommu_debug_dma_atos_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("map", 0200, dir, ddev,
 				 &iommu_debug_map_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/map debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("dma_map", 0600, dir, ddev,
 					 &iommu_debug_dma_map_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
 		       dev_name(dev));
 			goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("unmap", 0200, dir, ddev,
 				 &iommu_debug_unmap_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/unmap debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
 					 &iommu_debug_dma_unmap_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
 		       dev_name(dev));
 			goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("pte", 0600, dir, ddev,
 			&iommu_debug_pte_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/pte debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("config_clocks", 0200, dir, ddev,
 				 &iommu_debug_config_clocks_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
 				 &iommu_debug_trigger_fault_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
@@ -2337,7 +2270,7 @@
 	debugfs_tests_dir = debugfs_create_dir("tests",
 					       iommu_debugfs_top);
 	if (!debugfs_tests_dir) {
-		pr_err("Couldn't create iommu/tests debugfs directory\n");
+		pr_err_ratelimited("Couldn't create iommu/tests debugfs directory\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index f7787e7..0e0e88e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -103,6 +103,14 @@
 	int err;
 
 	ops = iommu_ops_from_fwnode(fwnode);
+	/*
+	 * Return -EPROBE_DEFER for the platform devices which are dependent
+	 * on the SMMU driver registration. Deferring from here helps in adding
+	 * the clients in proper iommu groups.
+	 */
+	if (!dev_is_pci(dev) && of_device_is_available(iommu_spec->np) && !ops)
+		return -EPROBE_DEFER;
+
 	if ((ops && !ops->of_xlate) ||
 	    !of_device_is_available(iommu_spec->np))
 		return NO_IOMMU;
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index d097373..3a97fe6 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -27,6 +27,7 @@
 	u32 iidr;
 	u32 mask;
 };
+extern bool from_suspend;
 
 #ifdef CONFIG_QCOM_SHOW_RESUME_IRQ
 extern int msm_show_resume_irq_mask;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c2df341..15579cb 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -93,9 +93,14 @@
  * The ITS structure - contains most of the infrastructure, with the
  * top-level MSI domain, the command queue, the collections, and the
  * list of devices writing to it.
+ *
+ * dev_alloc_lock has to be taken for device allocations, while the
+ * spinlock must be taken to parse data structures such as the device
+ * list.
  */
 struct its_node {
 	raw_spinlock_t		lock;
+	struct mutex		dev_alloc_lock;
 	struct list_head	entry;
 	void __iomem		*base;
 	phys_addr_t		phys_base;
@@ -152,6 +157,7 @@
 	void			*itt;
 	u32			nr_ites;
 	u32			device_id;
+	bool			shared;
 };
 
 static struct {
@@ -1575,6 +1581,9 @@
 		nr_irqs /= 2;
 	} while (nr_irqs > 0);
 
+	if (!nr_irqs)
+		err = -ENOSPC;
+
 	if (err)
 		goto out;
 
@@ -1945,6 +1954,29 @@
 		   get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
 }
 
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
+{
+	u32 count = 1000000;	/* 1s! */
+	bool clean;
+	u64 val;
+
+	val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+	val &= ~GICR_VPENDBASER_Valid;
+	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+	do {
+		val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+		clean = !(val & GICR_VPENDBASER_Dirty);
+		if (!clean) {
+			count--;
+			cpu_relax();
+			udelay(1);
+		}
+	} while (!clean && count);
+
+	return val;
+}
+
 static void its_cpu_init_lpis(void)
 {
 	void __iomem *rbase = gic_data_rdist_rd_base();
@@ -2018,6 +2050,30 @@
 	val |= GICR_CTLR_ENABLE_LPIS;
 	writel_relaxed(val, rbase + GICR_CTLR);
 
+	if (gic_rdists->has_vlpis) {
+		void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+
+		/*
+		 * It's possible for CPU to receive VLPIs before it is
+		 * sheduled as a vPE, especially for the first CPU, and the
+		 * VLPI with INTID larger than 2^(IDbits+1) will be considered
+		 * as out of range and dropped by GIC.
+		 * So we initialize IDbits to known value to avoid VLPI drop.
+		 */
+		val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+		pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
+			smp_processor_id(), val);
+		gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+		/*
+		 * Also clear Valid bit of GICR_VPENDBASER, in case some
+		 * ancient programming gets left in and has possibility of
+		 * corrupting memory.
+		 */
+		val = its_clear_vpend_valid(vlpi_base);
+		WARN_ON(val & GICR_VPENDBASER_Dirty);
+	}
+
 	/* Make sure the GIC has seen the above */
 	dsb(sy);
 }
@@ -2267,13 +2323,14 @@
 	kfree(its_dev);
 }
 
-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
+static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
 {
 	int idx;
 
-	idx = find_first_zero_bit(dev->event_map.lpi_map,
-				  dev->event_map.nr_lpis);
-	if (idx == dev->event_map.nr_lpis)
+	idx = bitmap_find_free_region(dev->event_map.lpi_map,
+				      dev->event_map.nr_lpis,
+				      get_count_order(nvecs));
+	if (idx < 0)
 		return -ENOSPC;
 
 	*hwirq = dev->event_map.lpi_base + idx;
@@ -2289,6 +2346,7 @@
 	struct its_device *its_dev;
 	struct msi_domain_info *msi_info;
 	u32 dev_id;
+	int err = 0;
 
 	/*
 	 * We ignore "dev" entierely, and rely on the dev_id that has
@@ -2311,6 +2369,7 @@
 		return -EINVAL;
 	}
 
+	mutex_lock(&its->dev_alloc_lock);
 	its_dev = its_find_device(its, dev_id);
 	if (its_dev) {
 		/*
@@ -2318,18 +2377,22 @@
 		 * another alias (PCI bridge of some sort). No need to
 		 * create the device.
 		 */
+		its_dev->shared = true;
 		pr_debug("Reusing ITT for devID %x\n", dev_id);
 		goto out;
 	}
 
 	its_dev = its_create_device(its, dev_id, nvec, true);
-	if (!its_dev)
-		return -ENOMEM;
+	if (!its_dev) {
+		err = -ENOMEM;
+		goto out;
+	}
 
 	pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
 out:
+	mutex_unlock(&its->dev_alloc_lock);
 	info->scratchpad[0].ptr = its_dev;
-	return 0;
+	return err;
 }
 
 static struct msi_domain_ops its_msi_domain_ops = {
@@ -2369,21 +2432,21 @@
 	int err;
 	int i;
 
-	for (i = 0; i < nr_irqs; i++) {
-		err = its_alloc_device_irq(its_dev, &hwirq);
-		if (err)
-			return err;
+	err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
+	if (err)
+		return err;
 
-		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
+	for (i = 0; i < nr_irqs; i++) {
+		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
 		if (err)
 			return err;
 
 		irq_domain_set_hwirq_and_chip(domain, virq + i,
-					      hwirq, &its_irq_chip, its_dev);
+					      hwirq + i, &its_irq_chip, its_dev);
 		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
 		pr_debug("ID:%d pID:%d vID:%d\n",
-			 (int)(hwirq - its_dev->event_map.lpi_base),
-			 (int) hwirq, virq + i);
+			 (int)(hwirq + i - its_dev->event_map.lpi_base),
+			 (int)(hwirq + i), virq + i);
 	}
 
 	return 0;
@@ -2433,6 +2496,7 @@
 {
 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	struct its_node *its = its_dev->its;
 	int i;
 
 	for (i = 0; i < nr_irqs; i++) {
@@ -2447,8 +2511,14 @@
 		irq_domain_reset_irq_data(data);
 	}
 
-	/* If all interrupts have been freed, start mopping the floor */
-	if (bitmap_empty(its_dev->event_map.lpi_map,
+	mutex_lock(&its->dev_alloc_lock);
+
+	/*
+	 * If all interrupts have been freed, start mopping the
+	 * floor. This is conditionned on the device not being shared.
+	 */
+	if (!its_dev->shared &&
+	    bitmap_empty(its_dev->event_map.lpi_map,
 			 its_dev->event_map.nr_lpis)) {
 		its_lpi_free(its_dev->event_map.lpi_map,
 			     its_dev->event_map.lpi_base,
@@ -2460,6 +2530,8 @@
 		its_free_device(its_dev);
 	}
 
+	mutex_unlock(&its->dev_alloc_lock);
+
 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
 }
 
@@ -2622,26 +2694,11 @@
 static void its_vpe_deschedule(struct its_vpe *vpe)
 {
 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
-	u32 count = 1000000;	/* 1s! */
-	bool clean;
 	u64 val;
 
-	/* We're being scheduled out */
-	val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
-	val &= ~GICR_VPENDBASER_Valid;
-	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+	val = its_clear_vpend_valid(vlpi_base);
 
-	do {
-		val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
-		clean = !(val & GICR_VPENDBASER_Dirty);
-		if (!clean) {
-			count--;
-			cpu_relax();
-			udelay(1);
-		}
-	} while (!clean && count);
-
-	if (unlikely(!clean && !count)) {
+	if (unlikely(val & GICR_VPENDBASER_Dirty)) {
 		pr_err_ratelimited("ITS virtual pending table not cleaning\n");
 		vpe->idai = false;
 		vpe->pending_last = true;
@@ -3384,6 +3441,7 @@
 	}
 
 	raw_spin_lock_init(&its->lock);
+	mutex_init(&its->dev_alloc_lock);
 	INIT_LIST_HEAD(&its->entry);
 	INIT_LIST_HEAD(&its->its_device_list);
 	typer = gic_read_typer(its_base + GITS_TYPER);
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index ad70e7c..fbfa7ff 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -24,7 +24,7 @@
 	unsigned long		*bm;
 };
 
-static struct mutex		mbi_lock;
+static DEFINE_MUTEX(mbi_lock);
 static phys_addr_t		mbi_phys_base;
 static struct mbi_range		*mbi_ranges;
 static unsigned int		mbi_range_nr;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 0e5c34c..16c7637 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -894,6 +894,9 @@
 static int gic_cpu_pm_notifier(struct notifier_block *self,
 			       unsigned long cmd, void *v)
 {
+	if (from_suspend)
+		return NOTIFY_OK;
+
 	if (cmd == CPU_PM_EXIT) {
 		if (gic_dist_security_disabled())
 			gic_enable_redist(true);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 25f32e1..3496b61 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -34,6 +34,9 @@
 #define SEL_INT_PENDING		(1 << 6)
 #define SEL_INT_NUM_MASK	0x3f
 
+#define MMP2_ICU_INT_ROUTE_PJ4_IRQ	(1 << 5)
+#define MMP2_ICU_INT_ROUTE_PJ4_FIQ	(1 << 6)
+
 struct icu_chip_data {
 	int			nr_irqs;
 	unsigned int		virq_base;
@@ -190,7 +193,8 @@
 static const struct mmp_intc_conf mmp2_conf = {
 	.conf_enable	= 0x20,
 	.conf_disable	= 0x0,
-	.conf_mask	= 0x7f,
+	.conf_mask	= MMP2_ICU_INT_ROUTE_PJ4_IRQ |
+			  MMP2_ICU_INT_ROUTE_PJ4_FIQ,
 };
 
 static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index ca82313..603aabd 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -13,12 +13,13 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/soc/qcom/irq.h>
 #include <linux/spinlock.h>
-#include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
 #define PDC_MAX_IRQS		138
+#define PDC_MAX_GPIO_IRQS	256
 
 #define CLEAR_INTR(reg, intr)	(reg & ~(1 << intr))
 #define ENABLE_INTR(reg, intr)	(reg | (1 << intr))
@@ -169,7 +170,6 @@
 			return (region->parent_base + pin - region->pin_base);
 	}
 
-	WARN_ON(1);
 	return ~0UL;
 }
 
@@ -232,6 +232,60 @@
 	.free		= irq_domain_free_irqs_common,
 };
 
+static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
+			       unsigned int nr_irqs, void *data)
+{
+	struct qcom_irq_fwspec *qcom_fwspec = data;
+	struct irq_fwspec *fwspec = &qcom_fwspec->fwspec;
+	struct irq_fwspec parent_fwspec;
+	irq_hw_number_t hwirq, parent_hwirq;
+	unsigned int type;
+	int ret;
+
+	ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return -EINVAL;
+
+	parent_hwirq = get_parent_hwirq(hwirq);
+	if (parent_hwirq == ~0UL)
+		return -EINVAL;
+
+	ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+					    &qcom_pdc_gic_chip, NULL);
+	if (ret)
+		return ret;
+
+	qcom_fwspec->mask = true;
+
+	if (type & IRQ_TYPE_EDGE_BOTH)
+		type = IRQ_TYPE_EDGE_RISING;
+
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		type = IRQ_TYPE_LEVEL_HIGH;
+
+	parent_fwspec.fwnode      = domain->parent->fwnode;
+	parent_fwspec.param_count = 3;
+	parent_fwspec.param[0]    = 0;
+	parent_fwspec.param[1]    = parent_hwirq;
+	parent_fwspec.param[2]    = type;
+
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static int qcom_pdc_gpio_domain_select(struct irq_domain *d,
+				       struct irq_fwspec *fwspec,
+				       enum irq_domain_bus_token bus_token)
+{
+	return (bus_token == DOMAIN_BUS_WAKEUP);
+}
+
+static const struct irq_domain_ops qcom_pdc_gpio_ops = {
+	.select		= qcom_pdc_gpio_domain_select,
+	.alloc		= qcom_pdc_gpio_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
 static int pdc_setup_pin_mapping(struct device_node *np)
 {
 	int ret, n;
@@ -270,7 +324,7 @@
 
 static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
 {
-	struct irq_domain *parent_domain, *pdc_domain;
+	struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
 	int ret;
 
 	pdc_base = of_iomap(node, 0);
@@ -301,6 +355,18 @@
 		goto fail;
 	}
 
+	pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain, 0,
+						      PDC_MAX_GPIO_IRQS,
+						      of_fwnode_handle(node),
+						      &qcom_pdc_gpio_ops, NULL);
+	if (!pdc_gpio_domain) {
+		pr_err("GIC domain add failed for GPIO domain\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP);
+
 	return 0;
 
 fail:
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 0ff517d..a4ceb61 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -852,7 +852,7 @@
 	u16 ret;
 
 	if (contr == 0) {
-		strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
+		strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
 		return CAPI_NOERROR;
 	}
 
@@ -860,7 +860,7 @@
 
 	ctr = get_capi_ctr_by_nr(contr);
 	if (ctr && ctr->state == CAPI_CTR_RUNNING) {
-		strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
+		strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
 		ret = CAPI_NOERROR;
 	} else
 		ret = CAPI_REGNOTINSTALLED;
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4ac378e..40ca1e8 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -423,7 +423,7 @@
 	int i, j;
 
 	for (j = 0; j < AVM_MAXVERSION; j++)
-		cinfo->version[j] = "\0\0" + 1;
+		cinfo->version[j] = "";
 	for (i = 0, j = 0;
 	     j < AVM_MAXVERSION && i < cinfo->versionlen;
 	     j++, i += cinfo->versionbuf[i] + 1)
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8e5b031..64a6371 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1170,11 +1170,13 @@
 		if (cs->debug & L1_DEB_LAPD)
 			debugl1(cs, "-> PH_REQUEST_PULL");
 #endif
+		spin_lock_irqsave(&cs->lock, flags);
 		if (!cs->tx_skb) {
 			test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
 			st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
 		} else
 			test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
+		spin_unlock_irqrestore(&cs->lock, flags);
 		break;
 	case (HW_RESET | REQUEST):
 		spin_lock_irqsave(&cs->lock, flags);
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index b730037..9cff667 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1456,15 +1456,19 @@
 {
 	modem_info *info = (modem_info *) tty->driver_data;
 
+	mutex_lock(&modem_info_mutex);
 	if (!old_termios)
 		isdn_tty_change_speed(info);
 	else {
 		if (tty->termios.c_cflag == old_termios->c_cflag &&
 		    tty->termios.c_ispeed == old_termios->c_ispeed &&
-		    tty->termios.c_ospeed == old_termios->c_ospeed)
+		    tty->termios.c_ospeed == old_termios->c_ospeed) {
+			mutex_unlock(&modem_info_mutex);
 			return;
+		}
 		isdn_tty_change_speed(info);
 	}
+	mutex_unlock(&modem_info_mutex);
 }
 
 /*
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 211ed6c..5789787 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -170,8 +170,8 @@
 	spin_lock_irqsave(&timer->dev->lock, flags);
 	if (timer->id >= 0)
 		list_move_tail(&timer->list, &timer->dev->expired);
-	spin_unlock_irqrestore(&timer->dev->lock, flags);
 	wake_up_interruptible(&timer->dev->wait);
+	spin_unlock_irqrestore(&timer->dev->lock, flags);
 }
 
 static int
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index a2e74fe..fd64df5 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -318,7 +318,9 @@
 
 	/* Let the programs run for couple of ms and check the engine status */
 	usleep_range(3000, 6000);
-	lp55xx_read(chip, LP5523_REG_STATUS, &status);
+	ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
+	if (ret)
+		return ret;
 	status &= LP5523_ENG_STATUS_MASK;
 
 	if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index df80c89..5d3faae 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -100,8 +100,9 @@
 		led_data->pwm = devm_pwm_get(dev, led->name);
 	if (IS_ERR(led_data->pwm)) {
 		ret = PTR_ERR(led_data->pwm);
-		dev_err(dev, "unable to request PWM for %s: %d\n",
-			led->name, ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "unable to request PWM for %s: %d\n",
+				led->name, ret);
 		return ret;
 	}
 
diff --git a/drivers/leds/leds-qti-tri-led.c b/drivers/leds/leds-qti-tri-led.c
index ad996da..db5d132 100644
--- a/drivers/leds/leds-qti-tri-led.c
+++ b/drivers/leds/leds-qti-tri-led.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/bitops.h>
@@ -371,6 +371,8 @@
 	if (rc < 0)
 		return rc;
 
+	cancel_work_sync(&led_cdev->set_brightness_work);
+
 	mutex_lock(&led->lock);
 	if (led->breathing == breath)
 		goto unlock;
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 2940cdc..95be6e3 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -1252,15 +1252,22 @@
 
 	ret = pblk_line_alloc_bitmaps(pblk, line);
 	if (ret)
-		return ret;
+		goto fail;
 
 	if (!pblk_line_init_bb(pblk, line, 0)) {
-		list_add(&line->list, &l_mg->free_list);
-		return -EINTR;
+		ret = -EINTR;
+		goto fail;
 	}
 
 	pblk_rl_free_lines_dec(&pblk->rl, line, true);
 	return 0;
+
+fail:
+	spin_lock(&l_mg->free_lock);
+	list_add(&line->list, &l_mg->free_list);
+	spin_unlock(&l_mg->free_lock);
+
+	return ret;
 }
 
 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 879227d..c3e038d 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -158,9 +158,11 @@
 		w_ctx = &entry->w_ctx;
 
 		/* Check if the lba has been overwritten */
-		ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
-		if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
-			w_ctx->lba = ADDR_EMPTY;
+		if (w_ctx->lba != ADDR_EMPTY) {
+			ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
+			if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
+				w_ctx->lba = ADDR_EMPTY;
+		}
 
 		/* Mark up the entry as submittable again */
 		flags = READ_ONCE(w_ctx->flags);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 909ecad..8f61366 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -49,7 +49,7 @@
 	struct bio *bio_out;
 	struct bvec_iter iter_in;
 	struct bvec_iter iter_out;
-	sector_t cc_sector;
+	u64 cc_sector;
 	atomic_t cc_pending;
 	union {
 		struct skcipher_request *req;
@@ -81,7 +81,7 @@
 	struct convert_context *ctx;
 	struct scatterlist sg_in[4];
 	struct scatterlist sg_out[4];
-	sector_t iv_sector;
+	u64 iv_sector;
 };
 
 struct crypt_config;
@@ -160,7 +160,7 @@
 		struct iv_lmk_private lmk;
 		struct iv_tcw_private tcw;
 	} iv_gen_private;
-	sector_t iv_offset;
+	u64 iv_offset;
 	unsigned int iv_size;
 	unsigned short int sector_size;
 	unsigned char sector_shift;
@@ -932,7 +932,7 @@
 	if (IS_ERR(bip))
 		return PTR_ERR(bip);
 
-	tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
+	tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
 
 	bip->bip_iter.bi_size = tag_len;
 	bip->bip_iter.bi_sector = io->cc->start + io->sector;
@@ -2414,9 +2414,21 @@
 	 * capi:cipher_api_spec-iv:ivopts
 	 */
 	tmp = &cipher_in[strlen("capi:")];
-	cipher_api = strsep(&tmp, "-");
-	*ivmode = strsep(&tmp, ":");
-	*ivopts = tmp;
+
+	/* Separate IV options if present, it can contain another '-' in hash name */
+	*ivopts = strrchr(tmp, ':');
+	if (*ivopts) {
+		**ivopts = '\0';
+		(*ivopts)++;
+	}
+	/* Parse IV mode */
+	*ivmode = strrchr(tmp, '-');
+	if (*ivmode) {
+		**ivmode = '\0';
+		(*ivmode)++;
+	}
+	/* The rest is crypto API spec */
+	cipher_api = tmp;
 
 	if (*ivmode && !strcmp(*ivmode, "lmk"))
 		cc->tfms_count = 64;
@@ -2486,11 +2498,8 @@
 		goto bad_mem;
 
 	chainmode = strsep(&tmp, "-");
-	*ivopts = strsep(&tmp, "-");
-	*ivmode = strsep(&*ivopts, ":");
-
-	if (tmp)
-		DMWARN("Ignoring unexpected additional cipher options");
+	*ivmode = strsep(&tmp, ":");
+	*ivopts = tmp;
 
 	/*
 	 * For compatibility with the original dm-crypt mapping format, if
@@ -2789,7 +2798,7 @@
 	}
 
 	ret = -EINVAL;
-	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
+	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
 		ti->error = "Invalid device sector";
 		goto bad;
 	}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 2fb7bb4..fddffe2 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -141,7 +141,7 @@
 	unsigned long long tmpll;
 	char dummy;
 
-	if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
+	if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
 		ti->error = "Invalid device sector";
 		return -EINVAL;
 	}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 32aabe2..b86d243 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -213,7 +213,7 @@
 	devname = dm_shift_arg(&as);
 
 	r = -EINVAL;
-	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
+	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
 		ti->error = "Invalid device sector";
 		goto bad;
 	}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 2fc4213..671c243 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -56,15 +56,17 @@
 	atomic_t nr_jobs;
 
 /*
- * We maintain three lists of jobs:
+ * We maintain four lists of jobs:
  *
  * i)   jobs waiting for pages
  * ii)  jobs that have pages, and are waiting for the io to be issued.
- * iii) jobs that have completed.
+ * iii) jobs that don't need to do any IO and just run a callback
+ * iv) jobs that have completed.
  *
- * All three of these are protected by job_lock.
+ * All four of these are protected by job_lock.
  */
 	spinlock_t job_lock;
+	struct list_head callback_jobs;
 	struct list_head complete_jobs;
 	struct list_head io_jobs;
 	struct list_head pages_jobs;
@@ -625,6 +627,7 @@
 	struct dm_kcopyd_client *kc = container_of(work,
 					struct dm_kcopyd_client, kcopyd_work);
 	struct blk_plug plug;
+	unsigned long flags;
 
 	/*
 	 * The order that these are called is *very* important.
@@ -633,6 +636,10 @@
 	 * list.  io jobs call wake when they complete and it all
 	 * starts again.
 	 */
+	spin_lock_irqsave(&kc->job_lock, flags);
+	list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
+	spin_unlock_irqrestore(&kc->job_lock, flags);
+
 	blk_start_plug(&plug);
 	process_jobs(&kc->complete_jobs, kc, run_complete_job);
 	process_jobs(&kc->pages_jobs, kc, run_pages_job);
@@ -650,7 +657,7 @@
 	struct dm_kcopyd_client *kc = job->kc;
 	atomic_inc(&kc->nr_jobs);
 	if (unlikely(!job->source.count))
-		push(&kc->complete_jobs, job);
+		push(&kc->callback_jobs, job);
 	else if (job->pages == &zero_page_list)
 		push(&kc->io_jobs, job);
 	else
@@ -858,7 +865,7 @@
 	job->read_err = read_err;
 	job->write_err = write_err;
 
-	push(&kc->complete_jobs, job);
+	push(&kc->callback_jobs, job);
 	wake(kc);
 }
 EXPORT_SYMBOL(dm_kcopyd_do_callback);
@@ -888,6 +895,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	spin_lock_init(&kc->job_lock);
+	INIT_LIST_HEAD(&kc->callback_jobs);
 	INIT_LIST_HEAD(&kc->complete_jobs);
 	INIT_LIST_HEAD(&kc->io_jobs);
 	INIT_LIST_HEAD(&kc->pages_jobs);
@@ -939,6 +947,7 @@
 	/* Wait for completion of all jobs submitted by this client. */
 	wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
 
+	BUG_ON(!list_empty(&kc->callback_jobs));
 	BUG_ON(!list_empty(&kc->complete_jobs));
 	BUG_ON(!list_empty(&kc->io_jobs));
 	BUG_ON(!list_empty(&kc->pages_jobs));
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 391537b..f0b088a 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -45,7 +45,7 @@
 	}
 
 	ret = -EINVAL;
-	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
+	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
 		ti->error = "Invalid device sector";
 		goto bad;
 	}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 79eab10..5a51151 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -943,7 +943,8 @@
 	char dummy;
 	int ret;
 
-	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
+	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
+	    offset != (sector_t)offset) {
 		ti->error = "Invalid offset";
 		return -EINVAL;
 	}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ae4b33d..36805b12 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -19,6 +19,7 @@
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
 #include <linux/dm-kcopyd.h>
+#include <linux/semaphore.h>
 
 #include "dm.h"
 
@@ -105,6 +106,9 @@
 	/* The on disk metadata handler */
 	struct dm_exception_store *store;
 
+	/* Maximum number of in-flight COW jobs. */
+	struct semaphore cow_count;
+
 	struct dm_kcopyd_client *kcopyd_client;
 
 	/* Wait for events based on state_bits */
@@ -145,6 +149,19 @@
 #define RUNNING_MERGE          0
 #define SHUTDOWN_MERGE         1
 
+/*
+ * Maximum number of chunks being copied on write.
+ *
+ * The value was decided experimentally as a trade-off between memory
+ * consumption, stalling the kernel's workqueues and maintaining a high enough
+ * throughput.
+ */
+#define DEFAULT_COW_THRESHOLD 2048
+
+static int cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
+
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
 		"A percentage of time allocated for copy on write");
 
@@ -1190,6 +1207,8 @@
 		goto bad_hash_tables;
 	}
 
+	sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+
 	s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
 	if (IS_ERR(s->kcopyd_client)) {
 		r = PTR_ERR(s->kcopyd_client);
@@ -1575,6 +1594,7 @@
 		rb_link_node(&pe->out_of_order_node, parent, p);
 		rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
 	}
+	up(&s->cow_count);
 }
 
 /*
@@ -1598,6 +1618,7 @@
 	dest.count = src.count;
 
 	/* Hand over to kcopyd */
+	down(&s->cow_count);
 	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
 }
 
@@ -1617,6 +1638,7 @@
 	pe->full_bio = bio;
 	pe->full_bio_end_io = bio->bi_end_io;
 
+	down(&s->cow_count);
 	callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
 						   copy_callback, pe);
 
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 485626d..b065df3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1938,6 +1938,9 @@
 	 */
 	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+
+	/* io_pages is used for readahead */
+	q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
 }
 
 unsigned int dm_table_get_num_targets(struct dm_table *t)
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 20b0776..ed3cace 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1678,7 +1678,7 @@
 	return r;
 }
 
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
 {
 	int r;
 	uint32_t ref_count;
@@ -1686,7 +1686,7 @@
 	down_read(&pmd->root_lock);
 	r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
 	if (!r)
-		*result = (ref_count != 0);
+		*result = (ref_count > 1);
 	up_read(&pmd->root_lock);
 
 	return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 35e954e..f6be0d7 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -195,7 +195,7 @@
 
 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
 
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
 
 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1f225a1..cd4220e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -257,6 +257,7 @@
 
 	spinlock_t lock;
 	struct bio_list deferred_flush_bios;
+	struct bio_list deferred_flush_completions;
 	struct list_head prepared_mappings;
 	struct list_head prepared_discards;
 	struct list_head prepared_discards_pt2;
@@ -956,6 +957,39 @@
 	mempool_free(m, &m->tc->pool->mapping_pool);
 }
 
+static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
+{
+	struct pool *pool = tc->pool;
+	unsigned long flags;
+
+	/*
+	 * If the bio has the REQ_FUA flag set we must commit the metadata
+	 * before signaling its completion.
+	 */
+	if (!bio_triggers_commit(tc, bio)) {
+		bio_endio(bio);
+		return;
+	}
+
+	/*
+	 * Complete bio with an error if earlier I/O caused changes to the
+	 * metadata that can't be committed, e.g, due to I/O errors on the
+	 * metadata device.
+	 */
+	if (dm_thin_aborted_changes(tc->td)) {
+		bio_io_error(bio);
+		return;
+	}
+
+	/*
+	 * Batch together any bios that trigger commits and then issue a
+	 * single commit for them in process_deferred_bios().
+	 */
+	spin_lock_irqsave(&pool->lock, flags);
+	bio_list_add(&pool->deferred_flush_completions, bio);
+	spin_unlock_irqrestore(&pool->lock, flags);
+}
+
 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 {
 	struct thin_c *tc = m->tc;
@@ -988,7 +1022,7 @@
 	 */
 	if (bio) {
 		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
-		bio_endio(bio);
+		complete_overwrite_bio(tc, bio);
 	} else {
 		inc_all_io_entry(tc->pool, m->cell->holder);
 		remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1048,7 +1082,7 @@
 	 * passdown we have to check that these blocks are now unused.
 	 */
 	int r = 0;
-	bool used = true;
+	bool shared = true;
 	struct thin_c *tc = m->tc;
 	struct pool *pool = tc->pool;
 	dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
@@ -1058,11 +1092,11 @@
 	while (b != end) {
 		/* find start of unmapped run */
 		for (; b < end; b++) {
-			r = dm_pool_block_is_used(pool->pmd, b, &used);
+			r = dm_pool_block_is_shared(pool->pmd, b, &shared);
 			if (r)
 				goto out;
 
-			if (!used)
+			if (!shared)
 				break;
 		}
 
@@ -1071,11 +1105,11 @@
 
 		/* find end of run */
 		for (e = b + 1; e != end; e++) {
-			r = dm_pool_block_is_used(pool->pmd, e, &used);
+			r = dm_pool_block_is_shared(pool->pmd, e, &shared);
 			if (r)
 				goto out;
 
-			if (used)
+			if (shared)
 				break;
 		}
 
@@ -2317,7 +2351,7 @@
 {
 	unsigned long flags;
 	struct bio *bio;
-	struct bio_list bios;
+	struct bio_list bios, bio_completions;
 	struct thin_c *tc;
 
 	tc = get_first_thin(pool);
@@ -2328,26 +2362,36 @@
 	}
 
 	/*
-	 * If there are any deferred flush bios, we must commit
-	 * the metadata before issuing them.
+	 * If there are any deferred flush bios, we must commit the metadata
+	 * before issuing them or signaling their completion.
 	 */
 	bio_list_init(&bios);
+	bio_list_init(&bio_completions);
+
 	spin_lock_irqsave(&pool->lock, flags);
 	bio_list_merge(&bios, &pool->deferred_flush_bios);
 	bio_list_init(&pool->deferred_flush_bios);
+
+	bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
+	bio_list_init(&pool->deferred_flush_completions);
 	spin_unlock_irqrestore(&pool->lock, flags);
 
-	if (bio_list_empty(&bios) &&
+	if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
 	    !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
 		return;
 
 	if (commit(pool)) {
+		bio_list_merge(&bios, &bio_completions);
+
 		while ((bio = bio_list_pop(&bios)))
 			bio_io_error(bio);
 		return;
 	}
 	pool->last_commit_jiffies = jiffies;
 
+	while ((bio = bio_list_pop(&bio_completions)))
+		bio_endio(bio);
+
 	while ((bio = bio_list_pop(&bios)))
 		generic_make_request(bio);
 }
@@ -2954,6 +2998,7 @@
 	INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
 	spin_lock_init(&pool->lock);
 	bio_list_init(&pool->deferred_flush_bios);
+	bio_list_init(&pool->deferred_flush_completions);
 	INIT_LIST_HEAD(&pool->prepared_mappings);
 	INIT_LIST_HEAD(&pool->prepared_discards);
 	INIT_LIST_HEAD(&pool->prepared_discards_pt2);
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
index 954b7ab..e673dac 100644
--- a/drivers/md/dm-unstripe.c
+++ b/drivers/md/dm-unstripe.c
@@ -78,7 +78,7 @@
 		goto err;
 	}
 
-	if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
+	if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
 		ti->error = "Invalid striped device offset";
 		goto err;
 	}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1d54109..fa47249 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1863,6 +1863,20 @@
 		reschedule_retry(r1_bio);
 }
 
+static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
+{
+	sector_t sync_blocks = 0;
+	sector_t s = r1_bio->sector;
+	long sectors_to_go = r1_bio->sectors;
+
+	/* make sure these bits don't get cleared. */
+	do {
+		md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
+		s += sync_blocks;
+		sectors_to_go -= sync_blocks;
+	} while (sectors_to_go > 0);
+}
+
 static void end_sync_write(struct bio *bio)
 {
 	int uptodate = !bio->bi_status;
@@ -1874,15 +1888,7 @@
 	struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
 
 	if (!uptodate) {
-		sector_t sync_blocks = 0;
-		sector_t s = r1_bio->sector;
-		long sectors_to_go = r1_bio->sectors;
-		/* make sure these bits doesn't get cleared. */
-		do {
-			md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
-			s += sync_blocks;
-			sectors_to_go -= sync_blocks;
-		} while (sectors_to_go > 0);
+		abort_sync_write(mddev, r1_bio);
 		set_bit(WriteErrorSeen, &rdev->flags);
 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
 			set_bit(MD_RECOVERY_NEEDED, &
@@ -2172,8 +2178,10 @@
 		     (i == r1_bio->read_disk ||
 		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
 			continue;
-		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
+		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
+			abort_sync_write(mddev, r1_bio);
 			continue;
+		}
 
 		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
 		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 811427e..9df1334 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1208,7 +1208,9 @@
 		struct bio *split = bio_split(bio, max_sectors,
 					      gfp, &conf->bio_split);
 		bio_chain(split, bio);
+		allow_barrier(conf);
 		generic_make_request(bio);
+		wait_barrier(conf);
 		bio = split;
 		r10_bio->master_bio = bio;
 		r10_bio->sectors = max_sectors;
@@ -1513,7 +1515,9 @@
 		struct bio *split = bio_split(bio, r10_bio->sectors,
 					      GFP_NOIO, &conf->bio_split);
 		bio_chain(split, bio);
+		allow_barrier(conf);
 		generic_make_request(bio);
+		wait_barrier(conf);
 		bio = split;
 		r10_bio->master_bio = bio;
 	}
@@ -4626,7 +4630,6 @@
 	atomic_inc(&r10_bio->remaining);
 	read_bio->bi_next = NULL;
 	generic_make_request(read_bio);
-	sector_nr += nr_sectors;
 	sectors_done += nr_sectors;
 	if (sector_nr <= last)
 		goto read_more;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index e6e925a..6518b01 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1935,12 +1935,14 @@
 }
 
 static struct stripe_head *
-r5c_recovery_alloc_stripe(struct r5conf *conf,
-			  sector_t stripe_sect)
+r5c_recovery_alloc_stripe(
+		struct r5conf *conf,
+		sector_t stripe_sect,
+		int noblock)
 {
 	struct stripe_head *sh;
 
-	sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+	sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
 	if (!sh)
 		return NULL;  /* no more stripe available */
 
@@ -2150,7 +2152,7 @@
 						stripe_sect);
 
 		if (!sh) {
-			sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
+			sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
 			/*
 			 * cannot get stripe from raid5_get_active_stripe
 			 * try replay some stripes
@@ -2159,20 +2161,29 @@
 				r5c_recovery_replay_stripes(
 					cached_stripe_list, ctx);
 				sh = r5c_recovery_alloc_stripe(
-					conf, stripe_sect);
+					conf, stripe_sect, 1);
 			}
 			if (!sh) {
+				int new_size = conf->min_nr_stripes * 2;
 				pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
 					mdname(mddev),
-					conf->min_nr_stripes * 2);
-				raid5_set_cache_size(mddev,
-						     conf->min_nr_stripes * 2);
-				sh = r5c_recovery_alloc_stripe(conf,
-							       stripe_sect);
+					new_size);
+				ret = raid5_set_cache_size(mddev, new_size);
+				if (conf->min_nr_stripes <= new_size / 2) {
+					pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
+						mdname(mddev),
+						ret,
+						new_size,
+						conf->min_nr_stripes,
+						conf->max_nr_stripes);
+					return -ENOMEM;
+				}
+				sh = r5c_recovery_alloc_stripe(
+					conf, stripe_sect, 0);
 			}
 			if (!sh) {
 				pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
-				       mdname(mddev));
+					mdname(mddev));
 				return -ENOMEM;
 			}
 			list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e4e98f4..45a3551 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6357,6 +6357,7 @@
 int
 raid5_set_cache_size(struct mddev *mddev, int size)
 {
+	int result = 0;
 	struct r5conf *conf = mddev->private;
 
 	if (size <= 16 || size > 32768)
@@ -6373,11 +6374,14 @@
 
 	mutex_lock(&conf->cache_size_mutex);
 	while (size > conf->max_nr_stripes)
-		if (!grow_one_stripe(conf, GFP_KERNEL))
+		if (!grow_one_stripe(conf, GFP_KERNEL)) {
+			conf->min_nr_stripes = conf->max_nr_stripes;
+			result = -ENOMEM;
 			break;
+		}
 	mutex_unlock(&conf->cache_size_mutex);
 
-	return 0;
+	return result;
 }
 EXPORT_SYMBOL(raid5_set_cache_size);
 
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index a537e51..a7ea27d 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -442,7 +442,7 @@
 				(adap->needs_hpd &&
 				 (!adap->is_configured && !adap->is_configuring)) ||
 				kthread_should_stop() ||
-				(!adap->transmitting &&
+				(!adap->transmit_in_progress &&
 				 !list_empty(&adap->transmit_queue)),
 				msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
 			timeout = err == 0;
@@ -450,7 +450,7 @@
 			/* Otherwise we just wait for something to happen. */
 			wait_event_interruptible(adap->kthread_waitq,
 				kthread_should_stop() ||
-				(!adap->transmitting &&
+				(!adap->transmit_in_progress &&
 				 !list_empty(&adap->transmit_queue)));
 		}
 
@@ -475,6 +475,7 @@
 			pr_warn("cec-%s: message %*ph timed out\n", adap->name,
 				adap->transmitting->msg.len,
 				adap->transmitting->msg.msg);
+			adap->transmit_in_progress = false;
 			adap->tx_timeouts++;
 			/* Just give up on this. */
 			cec_data_cancel(adap->transmitting,
@@ -486,7 +487,7 @@
 		 * If we are still transmitting, or there is nothing new to
 		 * transmit, then just continue waiting.
 		 */
-		if (adap->transmitting || list_empty(&adap->transmit_queue))
+		if (adap->transmit_in_progress || list_empty(&adap->transmit_queue))
 			goto unlock;
 
 		/* Get a new message to transmit */
@@ -532,6 +533,8 @@
 		if (adap->ops->adap_transmit(adap, data->attempts,
 					     signal_free_time, &data->msg))
 			cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
+		else
+			adap->transmit_in_progress = true;
 
 unlock:
 		mutex_unlock(&adap->lock);
@@ -562,14 +565,17 @@
 	data = adap->transmitting;
 	if (!data) {
 		/*
-		 * This can happen if a transmit was issued and the cable is
+		 * This might happen if a transmit was issued and the cable is
 		 * unplugged while the transmit is ongoing. Ignore this
 		 * transmit in that case.
 		 */
-		dprintk(1, "%s was called without an ongoing transmit!\n",
-			__func__);
-		goto unlock;
+		if (!adap->transmit_in_progress)
+			dprintk(1, "%s was called without an ongoing transmit!\n",
+				__func__);
+		adap->transmit_in_progress = false;
+		goto wake_thread;
 	}
+	adap->transmit_in_progress = false;
 
 	msg = &data->msg;
 
@@ -635,7 +641,6 @@
 	 * for transmitting or to retry the current message.
 	 */
 	wake_up_interruptible(&adap->kthread_waitq);
-unlock:
 	mutex_unlock(&adap->lock);
 }
 EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
@@ -1483,8 +1488,11 @@
 		if (adap->monitor_all_cnt)
 			WARN_ON(call_op(adap, adap_monitor_all_enable, false));
 		mutex_lock(&adap->devnode.lock);
-		if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
+		if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
 			WARN_ON(adap->ops->adap_enable(adap, false));
+			adap->transmit_in_progress = false;
+			wake_up_interruptible(&adap->kthread_waitq);
+		}
 		mutex_unlock(&adap->devnode.lock);
 		if (phys_addr == CEC_PHYS_ADDR_INVALID)
 			return;
@@ -1492,6 +1500,7 @@
 
 	mutex_lock(&adap->devnode.lock);
 	adap->last_initiator = 0xff;
+	adap->transmit_in_progress = false;
 
 	if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
 	    adap->ops->adap_enable(adap, true)) {
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index 6e31142..0496d93 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -601,8 +601,9 @@
 			break;
 		/* Was the message ACKed? */
 		ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
-		if (!ack && !pin->tx_ignore_nack_until_eom &&
-		    pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) {
+		if (!ack && (!pin->tx_ignore_nack_until_eom ||
+		    pin->tx_bit / 10 == pin->tx_msg.len - 1) &&
+		    !pin->tx_post_eom) {
 			/*
 			 * Note: the CEC spec is ambiguous regarding
 			 * what action to take when a NACK appears
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index f40ab57..2036b94 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -1738,7 +1738,7 @@
 		unsigned s;	\
 	\
 		for (s = 0; s < len; s++) {	\
-			u8 chr = font8x16[text[s] * 16 + line];	\
+			u8 chr = font8x16[(u8)text[s] * 16 + line];	\
 	\
 			if (hdiv == 2 && tpg->hflip) { \
 				pos[3] = (chr & (0x01 << 6) ? fg : bg);	\
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 16c7b20..6889c25 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -800,6 +800,9 @@
 		memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
 		q->memory = memory;
 		q->waiting_for_buffers = !q->is_output;
+	} else if (q->memory != memory) {
+		dprintk(1, "memory model mismatch\n");
+		return -EINVAL;
 	}
 
 	num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
@@ -1930,9 +1933,13 @@
 			return -EINVAL;
 		}
 	}
+
+	mutex_lock(&q->mmap_lock);
+
 	if (vb2_fileio_is_active(q)) {
 		dprintk(1, "mmap: file io in progress\n");
-		return -EBUSY;
+		ret = -EBUSY;
+		goto unlock;
 	}
 
 	/*
@@ -1940,7 +1947,7 @@
 	 */
 	ret = __find_plane_by_offset(q, off, &buffer, &plane);
 	if (ret)
-		return ret;
+		goto unlock;
 
 	vb = q->bufs[buffer];
 
@@ -1953,11 +1960,13 @@
 	if (length < (vma->vm_end - vma->vm_start)) {
 		dprintk(1,
 			"MMAP invalid, as it would overflow buffer length\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unlock;
 	}
 
-	mutex_lock(&q->mmap_lock);
 	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+
+unlock:
 	mutex_unlock(&q->mmap_lock);
 	if (ret)
 		return ret;
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 1c933b2..3ef5df1 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -968,7 +968,8 @@
 	return r->operand[7];
 }
 
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+		    unsigned int *len)
 {
 	struct avc_command_frame *c = (void *)fdtv->avc_data;
 	struct avc_response_frame *r = (void *)fdtv->avc_data;
@@ -1009,7 +1010,8 @@
 	return ret;
 }
 
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+		unsigned int *len)
 {
 	struct avc_command_frame *c = (void *)fdtv->avc_data;
 	struct avc_response_frame *r = (void *)fdtv->avc_data;
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index 876cdec..009905a 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -124,8 +124,10 @@
 		    struct dvb_diseqc_master_cmd *diseqcmd);
 void avc_remote_ctrl_work(struct work_struct *work);
 int avc_register_remote_control(struct firedtv *fdtv);
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+		    unsigned int *len);
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+		unsigned int *len);
 int avc_ca_reset(struct firedtv *fdtv);
 int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
 int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 82af974..63c9ac2 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -61,6 +61,7 @@
 	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
 	depends on SND_SOC
 	select SND_PCM
+	select HDMI
 	---help---
 	  V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
 
@@ -610,6 +611,7 @@
 	tristate "Sony IMX274 sensor support"
 	depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
 	depends on MEDIA_CAMERA_SUPPORT
+	select REGMAP_I2C
 	---help---
 	  This is a V4L2 sensor driver for the Sony IMX274
 	  CMOS image sensor.
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 5b008b0..aa8b04c 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -578,7 +578,7 @@
 	.type = V4L2_DV_BT_656_1120,
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
-	V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+	V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
 		V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
 			V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
 		V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index f3899cc..88349b5 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -130,7 +130,7 @@
 	.type = V4L2_DV_BT_656_1120,
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
-	V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
+	V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
 		ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
 		V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
 			V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index c786981..f01964c 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -766,7 +766,7 @@
 	.type = V4L2_DV_BT_656_1120,
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
-	V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+	V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
 		V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
 			V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
 		V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
@@ -777,7 +777,7 @@
 	.type = V4L2_DV_BT_656_1120,
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
-	V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+	V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
 		V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
 			V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
 		V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 71fe565..bb43a75 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -663,7 +663,7 @@
 	.type = V4L2_DV_BT_656_1120,
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
-	V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+	V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
 		V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
 			V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
 		V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
@@ -674,7 +674,7 @@
 	.type = V4L2_DV_BT_656_1120,
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
-	V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+	V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
 		V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
 			V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
 		V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index f8c70f1..8cc3bdb 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -636,16 +636,19 @@
 
 static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
 {
+	unsigned int uint_val;
 	int err;
 
-	err = regmap_read(priv->regmap, addr, (unsigned int *)val);
+	err = regmap_read(priv->regmap, addr, &uint_val);
 	if (err)
 		dev_err(&priv->client->dev,
 			"%s : i2c read failed, addr = %x\n", __func__, addr);
 	else
 		dev_dbg(&priv->client->dev,
 			"%s : addr 0x%x, val=0x%x\n", __func__,
-			addr, *val);
+			addr, uint_val);
+
+	*val = uint_val;
 	return err;
 }
 
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 30b15e9..8e7a2a5 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2020,6 +2020,7 @@
 	struct ov5640_dev *sensor = to_ov5640_dev(sd);
 	const struct ov5640_mode_info *new_mode;
 	struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
+	struct v4l2_mbus_framefmt *fmt;
 	int ret;
 
 	if (format->pad != 0)
@@ -2037,22 +2038,20 @@
 	if (ret)
 		goto out;
 
-	if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-		struct v4l2_mbus_framefmt *fmt =
-			v4l2_subdev_get_try_format(sd, cfg, 0);
+	if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+		fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+	else
+		fmt = &sensor->fmt;
 
-		*fmt = *mbus_fmt;
-		goto out;
-	}
+	*fmt = *mbus_fmt;
 
 	if (new_mode != sensor->current_mode) {
 		sensor->current_mode = new_mode;
 		sensor->pending_mode_change = true;
 	}
-	if (mbus_fmt->code != sensor->fmt.code) {
-		sensor->fmt = *mbus_fmt;
+	if (mbus_fmt->code != sensor->fmt.code)
 		sensor->pending_fmt_change = true;
-	}
+
 out:
 	mutex_unlock(&sensor->lock);
 	return ret;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index ff25ea9..26070fb 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -59,7 +59,7 @@
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
 	/* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */
-	V4L2_INIT_BT_TIMINGS(1, 10000, 1, 10000, 0, 165000000,
+	V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 13000000, 165000000,
 			V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
 			V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
 			V4L2_DV_BT_CAP_PROGRESSIVE |
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 498ad23..f5ee280 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -49,7 +49,7 @@
 	.type = V4L2_DV_BT_656_1120,
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
-	V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
+	V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1080, 25000000, 148500000,
 		V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
 };
 
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 06d29d8..f27d294 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -510,7 +510,12 @@
 
 static void video_i2c_release(struct video_device *vdev)
 {
-	kfree(video_get_drvdata(vdev));
+	struct video_i2c_data *data = video_get_drvdata(vdev);
+
+	v4l2_device_unregister(&data->v4l2_dev);
+	mutex_destroy(&data->lock);
+	mutex_destroy(&data->queue_lock);
+	kfree(data);
 }
 
 static int video_i2c_probe(struct i2c_client *client,
@@ -608,10 +613,6 @@
 	struct video_i2c_data *data = i2c_get_clientdata(client);
 
 	video_unregister_device(&data->vdev);
-	v4l2_device_unregister(&data->v4l2_dev);
-
-	mutex_destroy(&data->lock);
-	mutex_destroy(&data->queue_lock);
 
 	return 0;
 }
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 39804d8..fd5c52b 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -23,6 +23,7 @@
 #include <linux/moduleparam.h>
 #include <linux/kmod.h>
 #include <linux/kernel.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
@@ -41,6 +42,18 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(CX23885_VERSION);
 
+/*
+ * Some platforms have been found to require periodic resetting of the DMA
+ * engine. Ryzen and XEON platforms are known to be affected. The symptom
+ * encountered is "mpeg risc op code error". Only Ryzen platforms employ
+ * this workaround if the option equals 1. The workaround can be explicitly
+ * disabled for all platforms by setting to 0, the workaround can be forced
+ * on for any platform by setting to 2.
+ */
+static unsigned int dma_reset_workaround = 1;
+module_param(dma_reset_workaround, int, 0644);
+MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
+
 static unsigned int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "enable debug messages");
@@ -603,8 +616,13 @@
 
 static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
 {
-	uint32_t reg1_val = cx_read(TC_REQ); /* read-only */
-	uint32_t reg2_val = cx_read(TC_REQ_SET);
+	uint32_t reg1_val, reg2_val;
+
+	if (!dev->need_dma_reset)
+		return;
+
+	reg1_val = cx_read(TC_REQ); /* read-only */
+	reg2_val = cx_read(TC_REQ_SET);
 
 	if (reg1_val && reg2_val) {
 		cx_write(TC_REQ, reg1_val);
@@ -2058,6 +2076,37 @@
 	/* TODO: 23-19 */
 }
 
+static struct {
+	int vendor, dev;
+} const broken_dev_id[] = {
+	/* According with
+	 * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
+	 * 0x1451 is PCI ID for the IOMMU found on Ryzen
+	 */
+	{ PCI_VENDOR_ID_AMD, 0x1451 },
+};
+
+static bool cx23885_does_need_dma_reset(void)
+{
+	int i;
+	struct pci_dev *pdev = NULL;
+
+	if (dma_reset_workaround == 0)
+		return false;
+	else if (dma_reset_workaround == 2)
+		return true;
+
+	for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
+		pdev = pci_get_device(broken_dev_id[i].vendor,
+				      broken_dev_id[i].dev, NULL);
+		if (pdev) {
+			pci_dev_put(pdev);
+			return true;
+		}
+	}
+	return false;
+}
+
 static int cx23885_initdev(struct pci_dev *pci_dev,
 			   const struct pci_device_id *pci_id)
 {
@@ -2069,6 +2118,8 @@
 	if (NULL == dev)
 		return -ENOMEM;
 
+	dev->need_dma_reset = cx23885_does_need_dma_reset();
+
 	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
 	if (err < 0)
 		goto fail_free;
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index d54c7ee..cf965ef 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -451,6 +451,8 @@
 	/* Analog raw audio */
 	struct cx23885_audio_dev   *audio_dev;
 
+	/* Does the system require periodic DMA resets? */
+	unsigned int		need_dma_reset:1;
 };
 
 static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index d26c2d8..d20d3df 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -991,16 +991,15 @@
 		else
 			coda_write(dev, CODA_STD_H264,
 				   CODA_CMD_ENC_SEQ_COD_STD);
-		if (ctx->params.h264_deblk_enabled) {
-			value = ((ctx->params.h264_deblk_alpha &
-				  CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
-				 CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
-				((ctx->params.h264_deblk_beta &
-				  CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
-				 CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
-		} else {
-			value = 1 << CODA_264PARAM_DISABLEDEBLK_OFFSET;
-		}
+		value = ((ctx->params.h264_disable_deblocking_filter_idc &
+			  CODA_264PARAM_DISABLEDEBLK_MASK) <<
+			 CODA_264PARAM_DISABLEDEBLK_OFFSET) |
+			((ctx->params.h264_slice_alpha_c0_offset_div2 &
+			  CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
+			 CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
+			((ctx->params.h264_slice_beta_offset_div2 &
+			  CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
+			 CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
 		coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
 		break;
 	case V4L2_PIX_FMT_JPEG:
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index bf7b841..19d92ed 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1793,14 +1793,13 @@
 		ctx->params.h264_max_qp = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
-		ctx->params.h264_deblk_alpha = ctrl->val;
+		ctx->params.h264_slice_alpha_c0_offset_div2 = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
-		ctx->params.h264_deblk_beta = ctrl->val;
+		ctx->params.h264_slice_beta_offset_div2 = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
-		ctx->params.h264_deblk_enabled = (ctrl->val ==
-				V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+		ctx->params.h264_disable_deblocking_filter_idc = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
 		/* TODO: switch between baseline and constrained baseline */
@@ -1882,13 +1881,13 @@
 	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
 		V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51);
 	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
-		V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, 0, 15, 1, 0);
+		V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
 	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
-		V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, 0, 15, 1, 0);
+		V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
 	v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
 		V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
-		V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0,
-		V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+		V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
+		0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
 	v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
 		V4L2_CID_MPEG_VIDEO_H264_PROFILE,
 		V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 19ac0b9..2469ca1 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -115,9 +115,9 @@
 	u8			h264_inter_qp;
 	u8			h264_min_qp;
 	u8			h264_max_qp;
-	u8			h264_deblk_enabled;
-	u8			h264_deblk_alpha;
-	u8			h264_deblk_beta;
+	u8			h264_disable_deblocking_filter_idc;
+	s8			h264_slice_alpha_c0_offset_div2;
+	s8			h264_slice_beta_offset_div2;
 	u8			h264_profile_idc;
 	u8			h264_level_idc;
 	u8			mpeg4_intra_qp;
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 5e7b00a..e675e38 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -292,7 +292,7 @@
 #define		CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET	8
 #define		CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK	0x0f
 #define		CODA_264PARAM_DISABLEDEBLK_OFFSET		6
-#define		CODA_264PARAM_DISABLEDEBLK_MASK		0x01
+#define		CODA_264PARAM_DISABLEDEBLK_MASK		0x03
 #define		CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET	5
 #define		CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_MASK	0x01
 #define		CODA_264PARAM_CHROMAQPOFFSET_OFFSET		0
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 18c035e..df1ae6b 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -740,7 +740,7 @@
 	if (ret) {
 		v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
 			 def_output);
-		return ret;
+		goto fail_kfree_amp;
 	}
 
 	printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
@@ -748,12 +748,15 @@
 	if (ret) {
 		v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
 			 def_mode);
-		return ret;
+		goto fail_kfree_amp;
 	}
 	vpbe_dev->initialized = 1;
 	/* TBD handling of bootargs for default output and mode */
 	return 0;
 
+fail_kfree_amp:
+	mutex_lock(&vpbe_dev->lock);
+	kfree(vpbe_dev->amp);
 fail_kfree_encoders:
 	kfree(vpbe_dev->encoders);
 fail_dev_unregister:
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
index 2c388d0..63e88dd 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_HW_INTF_H_
@@ -73,4 +73,8 @@
 	void                        *hw_priv;
 };
 
+/* hardware event callback function type */
+typedef int (*cam_hw_mgr_event_cb_func)(void *priv, uint32_t evt_id,
+	void *evt_data);
+
 #endif /* _CAM_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index 261c457..1dcf4ee 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -31,6 +31,7 @@
  * @sfr_buf: buffer for subsystem failure reason[SFR]
  * @sec_heap: secondary heap hfi memory for firmware
  * @qdss: qdss mapped memory for fw
+ * @io_mem: io memory info
  * @icp_base: icp base address
  */
 struct hfi_mem_info {
@@ -42,6 +43,7 @@
 	struct hfi_mem sec_heap;
 	struct hfi_mem shmem;
 	struct hfi_mem qdss;
+	struct hfi_mem io_mem;
 	void __iomem *icp_base;
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
index d969e48..f67a704 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_HFI_REG_H_
@@ -35,8 +35,11 @@
 #define HFI_REG_UNCACHED_HEAP_PTR               0x5C
 #define HFI_REG_UNCACHED_HEAP_SIZE              0x60
 #define HFI_REG_QDSS_IOVA                       0x6C
-#define HFI_REG_QDSS_IOVA_SIZE                  0x70
 #define HFI_REG_SFR_PTR                         0x68
+#define HFI_REG_QDSS_IOVA_SIZE                  0x70
+#define HFI_REG_IO_REGION_IOVA                  0x74
+#define HFI_REG_IO_REGION_SIZE                  0x78
+
 /* end of ICP CSR registers */
 
 /* flags for ICP CSR registers */
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index 055d911..b0f625c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -665,6 +665,10 @@
 		icp_base + HFI_REG_QDSS_IOVA);
 	cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
 		icp_base + HFI_REG_QDSS_IOVA_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.iova,
+		icp_base + HFI_REG_IO_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.len,
+		icp_base + HFI_REG_IO_REGION_SIZE);
 
 	return rc;
 }
@@ -853,6 +857,10 @@
 		icp_base + HFI_REG_QDSS_IOVA);
 	cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
 		icp_base + HFI_REG_QDSS_IOVA_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.iova,
+		icp_base + HFI_REG_IO_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.len,
+		icp_base + HFI_REG_IO_REGION_SIZE);
 
 	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 98d10c5..083bb98 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -368,7 +368,7 @@
 	uint32_t ubwc_ipe_cfg[ICP_UBWC_MAX] = {0};
 	uint32_t ubwc_bps_cfg[ICP_UBWC_MAX] = {0};
 	uint32_t index = 0;
-	int rc = 0;
+	int rc = 0, ddr_type = 0;
 
 	if (!device_priv) {
 		CAM_ERR(CAM_ICP, "Invalid arguments");
@@ -474,7 +474,9 @@
 
 		if (a5_soc->ubwc_config_ext) {
 			/* Invoke kernel API to determine DDR type */
-			if (of_fdt_get_ddrtype() == DDR_TYPE_LPDDR5)
+			ddr_type = of_fdt_get_ddrtype();
+			if ((ddr_type == DDR_TYPE_LPDDR5) ||
+				(ddr_type == DDR_TYPE_LPDDR5X))
 				index = 1;
 
 			ubwc_cfg_ext = &a5_soc->uconfig.ubwc_cfg_ext;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 2b05804..2ccb9ce 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -2177,6 +2177,25 @@
 	return rc;
 }
 
+static int cam_icp_get_io_mem_info(void)
+{
+	int rc;
+	size_t len;
+	dma_addr_t iova;
+
+	rc = cam_smmu_get_io_region_info(icp_hw_mgr.iommu_hdl,
+		&iova, &len);
+	if (rc)
+		return rc;
+
+	icp_hw_mgr.hfi_mem.io_mem.iova_len = len;
+	icp_hw_mgr.hfi_mem.io_mem.iova_start = iova;
+
+	CAM_DBG(CAM_ICP, "iova: %llx, len: %zu", iova, len);
+
+	return rc;
+}
+
 static int cam_icp_allocate_hfi_mem(void)
 {
 	int rc;
@@ -2237,7 +2256,15 @@
 		goto sec_heap_alloc_failed;
 	}
 
+	rc = cam_icp_get_io_mem_info();
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to get I/O region info");
+		goto get_io_mem_failed;
+	}
+
 	return rc;
+get_io_mem_failed:
+	cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
 sec_heap_alloc_failed:
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
 sfr_buf_alloc_failed:
@@ -2456,6 +2483,14 @@
 
 	hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
 	hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
+
+	hfi_mem.io_mem.iova = icp_hw_mgr.hfi_mem.io_mem.iova_start;
+	hfi_mem.io_mem.len = icp_hw_mgr.hfi_mem.io_mem.iova_len;
+
+	CAM_DBG(CAM_ICP, "IO region IOVA = %X length = %lld",
+			hfi_mem.io_mem.iova,
+			hfi_mem.io_mem.len);
+
 	return cam_hfi_resume(&hfi_mem,
 		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
 		hw_mgr->a5_jtag_debug);
@@ -2836,6 +2871,9 @@
 	hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
 	hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
 
+	hfi_mem.io_mem.iova = icp_hw_mgr.hfi_mem.io_mem.iova_start;
+	hfi_mem.io_mem.len = icp_hw_mgr.hfi_mem.io_mem.iova_len;
+
 	return cam_hfi_init(0, &hfi_mem,
 		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
 		hw_mgr->a5_jtag_debug);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index d20572d..9d15e72 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -74,6 +74,8 @@
  * @fw_buf: Memory info of firmware
  * @qdss_buf: Memory info of qdss
  * @sfr_buf: Memory info for sfr buffer
+ * @shmem: Memory info for shared region
+ * @io_mem: Memory info for io region
  */
 struct icp_hfi_mem_info {
 	struct cam_mem_mgr_memory_desc qtbl;
@@ -85,6 +87,7 @@
 	struct cam_mem_mgr_memory_desc qdss_buf;
 	struct cam_mem_mgr_memory_desc sfr_buf;
 	struct cam_smmu_region_info shmem;
+	struct cam_smmu_region_info io_mem;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 7d61fd0..9914613 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -3161,7 +3161,7 @@
 	ctx_isp->reported_req_id = 0;
 	ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
 		CAM_ISP_CTX_ACTIVATED_APPLIED :
-		(req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
+		(req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_APPLIED :
 		CAM_ISP_CTX_ACTIVATED_SOF;
 
 	/*
@@ -3183,13 +3183,8 @@
 	CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
 
 	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->wait_req_list);
 
-	if (req_isp->num_fence_map_out) {
-		list_add_tail(&req->list, &ctx->active_req_list);
-		ctx_isp->active_req_cnt++;
-	} else {
-		list_add_tail(&req->list, &ctx->wait_req_list);
-	}
 end:
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 228931c..3b3eb8e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -36,7 +36,7 @@
 	(CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1)
 
 #define CAM_ISP_GENERIC_BLOB_TYPE_MAX               \
-	(CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2 + 1)
+	(CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG + 1)
 
 static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = {
 	CAM_ISP_HW_CMD_GET_HFR_UPDATE,
@@ -50,6 +50,11 @@
 
 static struct cam_ife_hw_mgr g_ife_hw_mgr;
 
+static int cam_ife_hw_mgr_event_handler(
+	void                                *priv,
+	uint32_t                             evt_id,
+	void                                *evt_info);
+
 static int cam_ife_notify_safe_lut_scm(bool safe_trigger)
 {
 	uint32_t camera_hw_version, rc = 0;
@@ -693,7 +698,7 @@
 		vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_BUS_RD;
 		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
 		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
-		vfe_acquire.vfe_out.ctx = ife_ctx;
+		vfe_acquire.priv = ife_ctx;
 		vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
 		vfe_acquire.vfe_out.is_dual = ife_src_res->is_dual_vfe;
 		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
@@ -798,11 +803,12 @@
 			continue;
 
 		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
-		vfe_acquire.vfe_out.ctx = ife_ctx;
+		vfe_acquire.priv = ife_ctx;
 		vfe_acquire.vfe_out.out_port_info = out_port;
 		vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
 		vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
 		vfe_acquire.vfe_out.is_dual = 0;
+		vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler;
 		hw_intf = ife_src_res->hw_res[0]->hw_intf;
 		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
 			&vfe_acquire,
@@ -839,7 +845,8 @@
 static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
 	struct cam_ife_hw_mgr_ctx       *ife_ctx,
 	struct cam_ife_hw_mgr_res       *ife_src_res,
-	struct cam_isp_in_port_info     *in_port)
+	struct cam_isp_in_port_info     *in_port,
+	bool                             acquire_lcr)
 {
 	int rc = -1;
 	uint32_t  i, j, k;
@@ -860,8 +867,13 @@
 		if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
 			continue;
 
-		CAM_DBG(CAM_ISP, "res_type 0x%x",
-			 out_port->res_type);
+		if ((acquire_lcr &&
+			out_port->res_type != CAM_ISP_IFE_OUT_RES_LCR) ||
+			(!acquire_lcr &&
+			out_port->res_type == CAM_ISP_IFE_OUT_RES_LCR))
+			continue;
+
+		CAM_DBG(CAM_ISP, "res_type 0x%x", out_port->res_type);
 
 		ife_out_res = &ife_ctx->res_list_ife_out[k];
 		ife_out_res->is_dual_vfe = in_port->usage_type;
@@ -869,10 +881,11 @@
 		vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
 		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
 		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
-		vfe_acquire.vfe_out.ctx = ife_ctx;
+		vfe_acquire.priv = ife_ctx;
 		vfe_acquire.vfe_out.out_port_info =  out_port;
 		vfe_acquire.vfe_out.is_dual       = ife_src_res->is_dual_vfe;
 		vfe_acquire.vfe_out.unique_id     = ife_ctx->ctx_index;
+		vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler;
 
 		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
 			if (!ife_src_res->hw_res[j])
@@ -948,9 +961,12 @@
 		case CAM_ISP_HW_VFE_IN_CAMIF:
 		case CAM_ISP_HW_VFE_IN_PDLIB:
 		case CAM_ISP_HW_VFE_IN_RD:
+			rc = cam_ife_hw_mgr_acquire_res_ife_out_pixel(ife_ctx,
+				ife_src_res, in_port, false);
+			break;
 		case CAM_ISP_HW_VFE_IN_LCR:
 			rc = cam_ife_hw_mgr_acquire_res_ife_out_pixel(ife_ctx,
-				ife_src_res, in_port);
+				ife_src_res, in_port, true);
 			break;
 		case CAM_ISP_HW_VFE_IN_RDI0:
 		case CAM_ISP_HW_VFE_IN_RDI1:
@@ -1096,13 +1112,14 @@
 
 err:
 	/* release resource at the entry function */
-	CAM_DBG(CAM_ISP, "Exit rc(0x%x)", rc);
+	CAM_DBG(CAM_ISP, "Exit rc %d", rc);
 	return rc;
 }
 
 static int cam_ife_hw_mgr_acquire_res_ife_src(
 	struct cam_ife_hw_mgr_ctx     *ife_ctx,
-	struct cam_isp_in_port_info   *in_port)
+	struct cam_isp_in_port_info   *in_port,
+	bool                           acquire_lcr)
 {
 	int rc                = -1;
 	int i;
@@ -1115,7 +1132,10 @@
 	ife_hw_mgr = ife_ctx->hw_mgr;
 
 	list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) {
-		if (csid_res->num_children)
+		if (csid_res->num_children && !acquire_lcr)
+			continue;
+
+		if (acquire_lcr && csid_res->res_id != CAM_IFE_PIX_PATH_RES_IPP)
 			continue;
 
 		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
@@ -1131,10 +1151,17 @@
 		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
 		vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops;
 		vfe_acquire.vfe_in.in_port = in_port;
+		vfe_acquire.priv = ife_ctx;
+		vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler;
 
 		switch (csid_res->res_id) {
 		case CAM_IFE_PIX_PATH_RES_IPP:
-			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_CAMIF;
+			if (!acquire_lcr)
+				vfe_acquire.vfe_in.res_id =
+					CAM_ISP_HW_VFE_IN_CAMIF;
+			else
+				vfe_acquire.vfe_in.res_id =
+					CAM_ISP_HW_VFE_IN_LCR;
 			if (csid_res->is_dual_vfe)
 				vfe_acquire.vfe_in.sync_mode =
 				CAM_ISP_HW_SYNC_MASTER;
@@ -1206,10 +1233,6 @@
 
 		}
 
-		/* It should be one to one mapping between
-		 * csid resource and ife source resource
-		 */
-		csid_res->child[0] = ife_src_res;
 		ife_src_res->parent = csid_res;
 		csid_res->child[csid_res->num_children++] = ife_src_res;
 		CAM_DBG(CAM_ISP,
@@ -1366,6 +1389,7 @@
 	/* CID(DT_ID) value of acquire device, require for path */
 	cid_res_temp->res_id = csid_acquire.node_res->res_id;
 	cid_res_temp->is_dual_vfe = in_port->usage_type;
+	ife_ctx->is_dual = (bool)in_port->usage_type;
 
 	if (in_port->num_out_res)
 		cid_res_temp->is_secure = out_port->secure_mode;
@@ -1716,12 +1740,14 @@
 	int                         *ipp_count,
 	int                         *rdi_count,
 	int                         *ppp_count,
-	int                         *ife_rd_count)
+	int                         *ife_rd_count,
+	int                         *lcr_count)
 {
 	int ipp_num        = 0;
 	int rdi_num        = 0;
 	int ppp_num        = 0;
 	int ife_rd_num     = 0;
+	int lcr_num        = 0;
 	uint32_t i;
 	struct cam_isp_out_port_info      *out_port;
 	struct cam_ife_hw_mgr             *ife_hw_mgr;
@@ -1737,6 +1763,8 @@
 				rdi_num++;
 			else if (out_port->res_type == CAM_ISP_IFE_OUT_RES_2PD)
 				ppp_num++;
+			else if (out_port->res_type == CAM_ISP_IFE_OUT_RES_LCR)
+				lcr_num++;
 			else {
 				CAM_DBG(CAM_ISP, "out_res_type %d",
 				out_port->res_type);
@@ -1749,9 +1777,10 @@
 	*rdi_count = rdi_num;
 	*ppp_count = ppp_num;
 	*ife_rd_count = ife_rd_num;
+	*lcr_count = lcr_num;
 
-	CAM_DBG(CAM_ISP, "rdi: %d ipp: %d ppp: %d ife_rd: %d",
-		rdi_num, ipp_num, ppp_num, ife_rd_num);
+	CAM_DBG(CAM_ISP, "rdi: %d ipp: %d ppp: %d ife_rd: %d lcr: %d",
+		rdi_num, ipp_num, ppp_num, ife_rd_num, lcr_num);
 
 	return 0;
 }
@@ -1767,6 +1796,7 @@
 	int rdi_count                             = 0;
 	int ppp_count                             = 0;
 	int ife_rd_count                          = 0;
+	int lcr_count                             = 0;
 
 	is_dual_vfe = in_port->usage_type;
 
@@ -1777,21 +1807,23 @@
 		goto err;
 	}
 
-	cam_ife_hw_mgr_preprocess_port(ife_ctx, in_port,
-		&ipp_count, &rdi_count, &ppp_count, &ife_rd_count);
+	cam_ife_hw_mgr_preprocess_port(ife_ctx, in_port, &ipp_count,
+		&rdi_count, &ppp_count, &ife_rd_count, &lcr_count);
 
-	if (!ipp_count && !rdi_count && !ppp_count && !ife_rd_count) {
-		CAM_ERR(CAM_ISP, "No PIX or RDI or PPP or IFE RD resource");
+	if (!ipp_count && !rdi_count && !ppp_count && !ife_rd_count
+		&& !lcr_count) {
+		CAM_ERR(CAM_ISP,
+			"No PIX or RDI or PPP or IFE RD or LCR resource");
 		return -EINVAL;
 	}
 
-	if (ipp_count) {
+	if (ipp_count || lcr_count) {
 		/* get ife csid IPP resource */
 		rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx,
 			in_port, true);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
-				"Acquire IFE CSID IPP resource Failed");
+				"Acquire IFE CSID IPP/LCR resource Failed");
 			goto err;
 		}
 	}
@@ -1822,13 +1854,29 @@
 	if (ife_rd_count) {
 		rc = cam_ife_hw_mgr_acquire_res_ife_rd_src(ife_ctx, in_port);
 		rc = cam_ife_hw_mgr_acquire_res_bus_rd(ife_ctx, in_port);
-	} else {
-		rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
+
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Acquire IFE RD SRC resource Failed");
+			goto err;
+		}
+	} else if (ipp_count || ppp_count || rdi_count) {
+		rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx,
+			in_port, false);
+
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Acquire IFE IPP/PPP SRC resource Failed");
+			goto err;
+		}
 	}
 
-	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed");
-		goto err;
+	if (lcr_count) {
+		rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port, true);
+
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Acquire IFE LCR SRC resource Failed");
+			goto err;
+		}
 	}
 
 	CAM_DBG(CAM_ISP, "Acquiring IFE OUT resource...");
@@ -1838,7 +1886,7 @@
 		goto err;
 	}
 
-	*num_pix_port += ipp_count + ppp_count + ife_rd_count;
+	*num_pix_port += ipp_count + ppp_count + ife_rd_count + lcr_count;
 	*num_rdi_port += rdi_count;
 
 	return 0;
@@ -2650,17 +2698,19 @@
 		cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
 	}
 
-	cam_tasklet_stop(ctx->common.tasklet_info);
-
 	cam_ife_mgr_pause_hw(ctx);
 
-	if (stop_isp->stop_only)
+	if (stop_isp->stop_only) {
+		cam_tasklet_stop(ctx->common.tasklet_info);
 		goto end;
+	}
 
 	if (cam_cdm_stream_off(ctx->cdm_handle))
 		CAM_ERR(CAM_ISP, "CDM stream off failed %d", ctx->cdm_handle);
 
 	cam_ife_hw_mgr_deinit_hw(ctx);
+	cam_tasklet_stop(ctx->common.tasklet_info);
+
 	CAM_DBG(CAM_ISP,
 		"Stop success for ctx id:%d rc :%d", ctx->ctx_index, rc);
 
@@ -2680,7 +2730,7 @@
 }
 
 static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr,
-			uint32_t hw_idx)
+	uint32_t hw_idx)
 {
 	uint32_t i = 0;
 	struct cam_hw_intf             *vfe_hw_intf;
@@ -3550,6 +3600,54 @@
 	return rc;
 }
 
+static int cam_isp_blob_core_cfg_update(
+	uint32_t                               blob_type,
+	struct cam_isp_generic_blob_info      *blob_info,
+	struct cam_isp_core_config            *core_config,
+	struct cam_hw_prepare_update_args     *prepare)
+{
+	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	uint64_t                               clk_rate = 0;
+	int                                    rc = -EINVAL, i;
+	struct cam_vfe_core_config_args        vfe_core_config;
+
+	ctx = prepare->ctxt_to_hw_map;
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			clk_rate = 0;
+			if (!hw_mgr_res->hw_res[i] ||
+				hw_mgr_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF)
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				vfe_core_config.node_res =
+					hw_mgr_res->hw_res[i];
+
+				memcpy(&vfe_core_config.core_config,
+					core_config,
+					sizeof(struct cam_isp_core_config));
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_CORE_CONFIG,
+					&vfe_core_config,
+					sizeof(
+					struct cam_vfe_core_config_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "Core cfg parse fail");
+			} else {
+				CAM_WARN(CAM_ISP, "NULL hw_intf!");
+			}
+		}
+	}
+
+	return rc;
+}
+
 static int cam_isp_blob_clock_update(
 	uint32_t                               blob_type,
 	struct cam_isp_generic_blob_info      *blob_info,
@@ -3625,7 +3723,8 @@
 					clk_rate = max(clock_config->rdi_hz[j],
 						clk_rate);
 			else
-				if (hw_mgr_res->hw_res[i]) {
+				if (hw_mgr_res->res_id != CAM_ISP_HW_VFE_IN_LCR
+					&& hw_mgr_res->hw_res[i]) {
 					CAM_ERR(CAM_ISP, "Invalid res_id %u",
 						hw_mgr_res->res_id);
 					rc = -EINVAL;
@@ -3666,8 +3765,8 @@
 	struct cam_hw_prepare_update_args *prepare = NULL;
 
 	if (!blob_data || (blob_size == 0) || !blob_info) {
-		CAM_ERR(CAM_ISP, "Invalid info blob %pK %d prepare %pK",
-			blob_data, blob_size, prepare);
+		CAM_ERR(CAM_ISP, "Invalid args data %pK size %d info %pK",
+			blob_data, blob_size, blob_info);
 		return -EINVAL;
 	}
 
@@ -3687,8 +3786,29 @@
 	CAM_DBG(CAM_ISP, "FS2: BLOB Type: %d", blob_type);
 	switch (blob_type) {
 	case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: {
-		struct cam_isp_resource_hfr_config    *hfr_config =
-			(struct cam_isp_resource_hfr_config *)blob_data;
+		struct cam_isp_resource_hfr_config    *hfr_config;
+
+		if (blob_size < sizeof(struct cam_isp_resource_hfr_config)) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+			return -EINVAL;
+		}
+
+		hfr_config = (struct cam_isp_resource_hfr_config *)blob_data;
+
+		if (hfr_config->num_ports > CAM_ISP_IFE_OUT_RES_MAX) {
+			CAM_ERR(CAM_ISP, "Invalid num_ports %u in hfr config",
+				hfr_config->num_ports);
+			return -EINVAL;
+		}
+
+		if (blob_size < (sizeof(uint32_t) * 2 + hfr_config->num_ports *
+			sizeof(struct cam_isp_port_hfr_config))) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+				blob_size, sizeof(uint32_t) * 2 +
+				sizeof(struct cam_isp_port_hfr_config) *
+				hfr_config->num_ports);
+			return -EINVAL;
+		}
 
 		rc = cam_isp_blob_hfr_update(blob_type, blob_info,
 			hfr_config, prepare);
@@ -3697,8 +3817,29 @@
 	}
 		break;
 	case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: {
-		struct cam_isp_clock_config    *clock_config =
-			(struct cam_isp_clock_config *)blob_data;
+		struct cam_isp_clock_config    *clock_config;
+
+		if (blob_size < sizeof(struct cam_isp_clock_config)) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+			return -EINVAL;
+		}
+
+		clock_config = (struct cam_isp_clock_config *)blob_data;
+
+		if (clock_config->num_rdi > CAM_IFE_RDI_NUM_MAX) {
+			CAM_ERR(CAM_ISP, "Invalid num_rdi %u in clock config",
+				clock_config->num_rdi);
+			return -EINVAL;
+		}
+
+		if (blob_size < (sizeof(uint32_t) * 2 + sizeof(uint64_t) *
+			(clock_config->num_rdi + 2))) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+				blob_size,
+				sizeof(uint32_t) * 2 + sizeof(uint64_t) *
+				(clock_config->num_rdi + 2));
+			return -EINVAL;
+		}
 
 		rc = cam_isp_blob_clock_update(blob_type, blob_info,
 			clock_config, prepare);
@@ -3707,10 +3848,31 @@
 	}
 		break;
 	case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: {
-		struct cam_isp_bw_config    *bw_config =
-			(struct cam_isp_bw_config *)blob_data;
+		struct cam_isp_bw_config    *bw_config;
 		struct cam_isp_prepare_hw_update_data   *prepare_hw_data;
 
+		if (blob_size < sizeof(struct cam_isp_bw_config)) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+			return -EINVAL;
+		}
+
+		bw_config = (struct cam_isp_bw_config *)blob_data;
+
+		if (bw_config->num_rdi > CAM_IFE_RDI_NUM_MAX) {
+			CAM_ERR(CAM_ISP, "Invalid num_rdi %u in bw config",
+				bw_config->num_rdi);
+			return -EINVAL;
+		}
+
+		if (blob_size < (sizeof(uint32_t) * 2 + (bw_config->num_rdi + 2)
+			* sizeof(struct cam_isp_bw_vote))) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+				blob_size,
+				sizeof(uint32_t) * 2 + (bw_config->num_rdi + 2)
+				* sizeof(struct cam_isp_bw_vote));
+			return -EINVAL;
+		}
+
 		if (!prepare || !prepare->priv ||
 			(bw_config->usage_type >= CAM_IFE_HW_NUM_MAX)) {
 			CAM_ERR(CAM_ISP, "Invalid inputs");
@@ -3728,8 +3890,29 @@
 	}
 		break;
 	case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG: {
-		struct cam_ubwc_config *ubwc_config =
-			(struct cam_ubwc_config *)blob_data;
+		struct cam_ubwc_config *ubwc_config;
+
+		if (blob_size < sizeof(struct cam_ubwc_config)) {
+			CAM_ERR(CAM_ISP, "Invalid blob_size %u", blob_size);
+			return -EINVAL;
+		}
+
+		ubwc_config = (struct cam_ubwc_config *)blob_data;
+
+		if (ubwc_config->num_ports > CAM_VFE_MAX_UBWC_PORTS) {
+			CAM_ERR(CAM_ISP, "Invalid num_ports %u in ubwc config",
+				ubwc_config->num_ports);
+			return -EINVAL;
+		}
+
+		if (blob_size < (sizeof(uint32_t) * 2 + ubwc_config->num_ports *
+			sizeof(struct cam_ubwc_plane_cfg_v1) * 2)) {
+			CAM_ERR(CAM_ISP, "Invalid blob_size %u expected %u",
+				blob_size,
+				sizeof(uint32_t) * 2 + ubwc_config->num_ports *
+				sizeof(struct cam_ubwc_plane_cfg_v1) * 2);
+			return -EINVAL;
+		}
 
 		rc = cam_isp_blob_ubwc_update(blob_type, blob_info,
 			ubwc_config, prepare);
@@ -3739,8 +3922,29 @@
 		break;
 
 	case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2: {
-		struct cam_ubwc_config_v2 *ubwc_config =
-			(struct cam_ubwc_config_v2 *)blob_data;
+		struct cam_ubwc_config_v2 *ubwc_config;
+
+		if (blob_size < sizeof(struct cam_ubwc_config_v2)) {
+			CAM_ERR(CAM_ISP, "Invalid blob_size %u", blob_size);
+			return -EINVAL;
+		}
+
+		ubwc_config = (struct cam_ubwc_config_v2 *)blob_data;
+
+		if (ubwc_config->num_ports > CAM_VFE_MAX_UBWC_PORTS) {
+			CAM_ERR(CAM_ISP, "Invalid num_ports %u in ubwc config",
+				ubwc_config->num_ports);
+			return -EINVAL;
+		}
+
+		if (blob_size < (sizeof(uint32_t) * 2 + ubwc_config->num_ports *
+			sizeof(struct cam_ubwc_plane_cfg_v2) * 2)) {
+			CAM_ERR(CAM_ISP, "Invalid blob_size %u expected %u",
+				blob_size,
+				sizeof(uint32_t) * 2 + ubwc_config->num_ports *
+				sizeof(struct cam_ubwc_plane_cfg_v2) * 2);
+			return -EINVAL;
+		}
 
 		rc = cam_isp_blob_ubwc_update_v2(blob_type, blob_info,
 			ubwc_config, prepare);
@@ -3749,8 +3953,16 @@
 	}
 		break;
 	case CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: {
-		struct cam_isp_csid_clock_config    *clock_config =
-			(struct cam_isp_csid_clock_config *)blob_data;
+		struct cam_isp_csid_clock_config    *clock_config;
+
+		if (blob_size < sizeof(struct cam_isp_csid_clock_config)) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+				blob_size,
+				sizeof(struct cam_isp_csid_clock_config));
+			return -EINVAL;
+		}
+
+		clock_config = (struct cam_isp_csid_clock_config *)blob_data;
 
 		rc = cam_isp_blob_csid_clock_update(blob_type, blob_info,
 			clock_config, prepare);
@@ -3759,14 +3971,32 @@
 	}
 		break;
 	case CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG: {
-		struct cam_fe_config *fe_config =
-			(struct cam_fe_config *)blob_data;
+		struct cam_fe_config *fe_config;
+
+		if (blob_size < sizeof(struct cam_fe_config)) {
+			CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+				blob_size, sizeof(struct cam_fe_config));
+			return -EINVAL;
+		}
+
+		fe_config = (struct cam_fe_config *)blob_data;
+
 		rc = cam_isp_blob_fe_update(blob_type, blob_info,
 			fe_config, prepare);
 		if (rc)
 			CAM_ERR(CAM_ISP, "FS Update Failed rc: %d", rc);
 	}
 		break;
+	case CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG: {
+		struct cam_isp_core_config *core_config =
+			(struct cam_isp_core_config *)blob_data;
+
+		rc = cam_isp_blob_core_cfg_update(blob_type, blob_info,
+			core_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Core cfg update fail: %d", rc);
+	}
+		break;
 
 	default:
 		CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
@@ -4116,12 +4346,12 @@
 }
 
 static int cam_ife_mgr_cmd_get_sof_timestamp(
-	struct cam_ife_hw_mgr_ctx      *ife_ctx,
-	uint64_t                       *time_stamp,
-	uint64_t                       *boot_time_stamp)
+	struct cam_ife_hw_mgr_ctx            *ife_ctx,
+	uint64_t                             *time_stamp,
+	uint64_t                             *boot_time_stamp)
 {
-	int rc = -EINVAL;
-	uint32_t i;
+	int                                   rc = -EINVAL;
+	uint32_t                              i;
 	struct cam_ife_hw_mgr_res            *hw_mgr_res;
 	struct cam_hw_intf                   *hw_intf;
 	struct cam_csid_get_time_stamp_args   csid_get_time;
@@ -4166,7 +4396,7 @@
 	}
 
 	if (rc)
-		CAM_ERR(CAM_ISP, "Getting sof time stamp failed");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Getting sof time stamp failed");
 
 	return rc;
 }
@@ -4225,7 +4455,7 @@
 
 		CAM_DBG(CAM_ISP, "RESET: Calling VFE reset");
 
-		for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
+		for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
 			if (recovery_data->affected_core[i])
 				cam_ife_mgr_reset_vfe_hw(ife_hw_mgr, i);
 		}
@@ -4266,10 +4496,10 @@
 }
 
 static int cam_ife_hw_mgr_do_error_recovery(
-		struct cam_hw_event_recovery_data  *ife_mgr_recovery_data)
+	struct cam_hw_event_recovery_data  *ife_mgr_recovery_data)
 {
-	int32_t rc = 0;
-	struct crm_workq_task        *task = NULL;
+	int32_t                             rc = 0;
+	struct crm_workq_task              *task = NULL;
 	struct cam_hw_event_recovery_data  *recovery_data = NULL;
 
 	recovery_data = kzalloc(sizeof(struct cam_hw_event_recovery_data),
@@ -4284,7 +4514,7 @@
 
 	task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq);
 	if (!task) {
-		CAM_ERR(CAM_ISP, "No empty task frame");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No empty task frame");
 		kfree(recovery_data);
 		return -ENOMEM;
 	}
@@ -4303,44 +4533,43 @@
  * is associated with this context. if YES
  *  a. It fills the other cores associated with this context.in
  *      affected_core[]
- *  b. Return 1 if ctx is affected, 0 otherwise
+ *  b. Return true
  */
-static int cam_ife_hw_mgr_is_ctx_affected(
+static bool cam_ife_hw_mgr_is_ctx_affected(
 	struct cam_ife_hw_mgr_ctx   *ife_hwr_mgr_ctx,
-	uint32_t *affected_core, uint32_t size)
+	uint32_t                    *affected_core,
+	uint32_t                     size)
 {
-	int32_t rc = 0;
-	uint32_t i = 0, j = 0;
-	uint32_t max_idx =  ife_hwr_mgr_ctx->num_base;
-	uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
 
-	CAM_DBG(CAM_ISP, "max_idx = %d", max_idx);
+	bool                  rc = false;
+	uint32_t              i = 0, j = 0;
+	uint32_t              max_idx =  ife_hwr_mgr_ctx->num_base;
+	uint32_t              ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
 
-	if ((max_idx >= CAM_IFE_HW_NUM_MAX) ||
-		(size > CAM_IFE_HW_NUM_MAX)) {
-		CAM_ERR(CAM_ISP, "invalid parameter = %d", max_idx);
+	CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx);
+
+	if ((max_idx >= CAM_IFE_HW_NUM_MAX) || (size > CAM_IFE_HW_NUM_MAX)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "invalid parameter = %d", max_idx);
 		return rc;
 	}
 
 	for (i = 0; i < max_idx; i++) {
 		if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
-			rc = 1;
+			rc = true;
 		else {
 			ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx;
-			CAM_DBG(CAM_ISP, "Add affected IFE %d for recovery",
-				ctx_affected_core_idx[j]);
 			j = j + 1;
 		}
 	}
 
-	if (rc == 1) {
+	if (rc) {
 		while (j) {
 			if (affected_core[ctx_affected_core_idx[j-1]] != 1)
 				affected_core[ctx_affected_core_idx[j-1]] = 1;
 			j = j - 1;
 		}
 	}
-
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -4352,7 +4581,6 @@
  *   b. Notify CTX with fatal error
  */
 static int  cam_ife_hw_mgr_find_affected_ctx(
-	struct cam_ife_hw_mgr_ctx             *curr_ife_hwr_mgr_ctx,
 	struct cam_isp_hw_error_event_data    *error_event_data,
 	uint32_t                               curr_core_idx,
 	struct cam_hw_event_recovery_data     *recovery_data)
@@ -4371,7 +4599,7 @@
 
 	recovery_data->no_of_context = 0;
 	affected_core[curr_core_idx] = 1;
-	ife_hwr_mgr = curr_ife_hwr_mgr_ctx->hw_mgr;
+	ife_hwr_mgr = &g_ife_hw_mgr;
 
 	list_for_each_entry(ife_hwr_mgr_ctx,
 		&ife_hwr_mgr->used_ctx_list, list) {
@@ -4412,333 +4640,114 @@
 	return 0;
 }
 
-static int cam_ife_hw_mgr_get_err_type(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_err(
+	void                                *evt_info)
 {
-	struct cam_isp_resource_node         *hw_res_left = NULL;
-	struct cam_isp_resource_node         *hw_res_right = NULL;
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
-	uint32_t  status = 0;
-	uint32_t  core_idx;
-
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-
-	if (!evt_payload) {
-		CAM_ERR(CAM_ISP, "No payload");
-		return IRQ_HANDLED;
-	}
-
-	core_idx = evt_payload->core_index;
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
-
-	list_for_each_entry(isp_ife_camif_res,
-		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
-
-		if ((isp_ife_camif_res->res_type ==
-			CAM_IFE_HW_MGR_RES_UNINIT) ||
-			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
-			continue;
-
-		hw_res_left = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
-		hw_res_right =
-			isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
-
-		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
-			isp_ife_camif_res->is_dual_vfe);
-
-		/* ERROR check for Left VFE */
-		if (!hw_res_left) {
-			CAM_DBG(CAM_ISP, "VFE(L) Device is NULL");
-			break;
-		}
-
-		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
-			hw_res_left->hw_intf->hw_idx);
-
-		if (core_idx == hw_res_left->hw_intf->hw_idx) {
-			status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-		}
-
-		if (status)
-			break;
-
-		/* ERROR check for Right  VFE */
-		if (!hw_res_right) {
-			CAM_DBG(CAM_ISP, "VFE(R) Device is NULL");
-			continue;
-		}
-		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
-			hw_res_right->hw_intf->hw_idx);
-
-		if (core_idx == hw_res_right->hw_intf->hw_idx) {
-			status = hw_res_right->bottom_half_handler(
-				hw_res_right, evt_payload);
-		}
-
-		if (status)
-			break;
-	}
-	CAM_DBG(CAM_ISP, "Exit (status = %d)!", status);
-	return status;
-}
-
-static int  cam_ife_hw_mgr_handle_camif_error(
-	void                              *handler_priv,
-	void                              *payload)
-{
-	int32_t  error_status;
+	struct cam_isp_hw_event_info        *event_info = evt_info;
 	uint32_t core_idx;
-	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload      *evt_payload;
-	struct cam_isp_hw_error_event_data       error_event_data = {0};
-	struct cam_hw_event_recovery_data        recovery_data = {0};
-	int rc = 0;
+	struct cam_isp_hw_error_event_data   error_event_data = {0};
+	struct cam_hw_event_recovery_data    recovery_data = {0};
+	int                                  rc = -EINVAL;
 
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-	core_idx = evt_payload->core_index;
+	if (event_info->err_type == CAM_VFE_IRQ_STATUS_VIOLATION)
+		error_event_data.error_type = CAM_ISP_HW_ERROR_VIOLATION;
+	else if (event_info->res_type == CAM_ISP_RESOURCE_VFE_IN)
+		error_event_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+	else if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT)
+		error_event_data.error_type = CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
 
-	error_status = cam_ife_hw_mgr_get_err_type(ife_hwr_mgr_ctx,
-		evt_payload);
-	if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending)) {
-		rc = error_status;
-		goto end;
-	}
+	core_idx = event_info->hw_idx;
 
-	switch (error_status) {
-	case CAM_ISP_HW_ERROR_OVERFLOW:
-	case CAM_ISP_HW_ERROR_P2I_ERROR:
-	case CAM_ISP_HW_ERROR_VIOLATION:
-		CAM_ERR(CAM_ISP, "Enter: error_type (%d)", error_status);
-		rc = error_status;
-		if (g_ife_hw_mgr.debug_cfg.enable_recovery)
-			error_event_data.recovery_enabled = true;
+	if (g_ife_hw_mgr.debug_cfg.enable_recovery)
+		error_event_data.recovery_enabled = true;
+	else
+		error_event_data.recovery_enabled = false;
 
-		error_event_data.error_type =
-				CAM_ISP_HW_ERROR_OVERFLOW;
+	rc = cam_ife_hw_mgr_find_affected_ctx(&error_event_data,
+		core_idx, &recovery_data);
 
-		cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
-			&error_event_data,
-			core_idx,
-			&recovery_data);
+	if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT)
+		return rc;
 
-		if (!g_ife_hw_mgr.debug_cfg.enable_recovery) {
-			CAM_DBG(CAM_ISP, "recovery is not enabled");
-			break;
-		}
-
+	if (g_ife_hw_mgr.debug_cfg.enable_recovery) {
 		CAM_DBG(CAM_ISP, "IFE Mgr recovery is enabled");
+
 		/* Trigger for recovery */
-		recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+		if (event_info->err_type == CAM_VFE_IRQ_STATUS_VIOLATION)
+			recovery_data.error_type = CAM_ISP_HW_ERROR_VIOLATION;
+		else
+			recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
 		cam_ife_hw_mgr_do_error_recovery(&recovery_data);
-		break;
-	default:
-		CAM_DBG(CAM_ISP, "No error (%d)", error_status);
-		break;
+	} else {
+		CAM_DBG(CAM_ISP, "recovery is not enabled");
+		rc = 0;
 	}
 
-end:
 	return rc;
 }
 
-/*
- * DUAL VFE is valid for PIX processing path
- * This function assumes hw_res[0] is master in case
- * of dual VFE.
- * RDI path does not support DUAl VFE
- */
-static int cam_ife_hw_mgr_handle_reg_update(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_rup(
+	void                                    *ctx,
+	void                                    *evt_info)
 {
-	struct cam_isp_resource_node            *hw_res;
-	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload      *evt_payload;
-	struct cam_ife_hw_mgr_res               *ife_src_res = NULL;
+	struct cam_isp_hw_event_info            *event_info = evt_info;
+	struct cam_ife_hw_mgr_ctx               *ife_hw_mgr_ctx = ctx;
 	cam_hw_event_cb_func                     ife_hwr_irq_rup_cb;
 	struct cam_isp_hw_reg_update_event_data  rup_event_data;
-	uint32_t  core_idx;
-	uint32_t  rup_status = -EINVAL;
 
-	CAM_DBG(CAM_ISP, "Enter");
-
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-
-	if (!handler_priv || !payload) {
-		CAM_ERR(CAM_ISP, "Invalid Parameter");
-		return -EPERM;
-	}
-
-	core_idx = evt_payload->core_index;
 	ife_hwr_irq_rup_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
 
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
-	list_for_each_entry(ife_src_res,
-			&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_VFE_IN_CAMIF:
+		if (ife_hw_mgr_ctx->is_dual)
+			if (event_info->hw_idx != 1)
+				break;
 
-		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		CAM_DBG(CAM_ISP, "resource id = %d, curr_core_idx = %d",
-			 ife_src_res->res_id, core_idx);
-		switch (ife_src_res->res_id) {
-		case CAM_ISP_HW_VFE_IN_PDLIB:
+		if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 			break;
-		case CAM_ISP_HW_VFE_IN_CAMIF:
-		case CAM_ISP_HW_VFE_IN_RD:
-			if (ife_src_res->is_dual_vfe)
-				/* It checks for slave core RUP ACK*/
-				hw_res = ife_src_res->hw_res[1];
-			else
-				hw_res = ife_src_res->hw_res[0];
+		ife_hwr_irq_rup_cb(ife_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+		break;
 
-			if (!hw_res) {
-				CAM_ERR(CAM_ISP, "CAMIF device is NULL");
-				break;
-			}
-			CAM_DBG(CAM_ISP,
-				"current_core_id = %d , core_idx res = %d",
-				 core_idx, hw_res->hw_intf->hw_idx);
-
-			if (core_idx == hw_res->hw_intf->hw_idx) {
-				rup_status = hw_res->bottom_half_handler(
-					hw_res, evt_payload);
-			}
-
-			if (ife_src_res->is_dual_vfe) {
-				hw_res = ife_src_res->hw_res[0];
-				if (core_idx == hw_res->hw_intf->hw_idx) {
-					hw_res->bottom_half_handler(
-						hw_res, evt_payload);
-				}
-			}
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-
-			if (!rup_status) {
-				ife_hwr_irq_rup_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_REG_UPDATE,
-					&rup_event_data);
-			}
+	case CAM_ISP_HW_VFE_IN_RDI0:
+	case CAM_ISP_HW_VFE_IN_RDI1:
+	case CAM_ISP_HW_VFE_IN_RDI2:
+	case CAM_ISP_HW_VFE_IN_RDI3:
+		if (!ife_hw_mgr_ctx->is_rdi_only_context)
 			break;
-
-		case CAM_ISP_HW_VFE_IN_RDI0:
-		case CAM_ISP_HW_VFE_IN_RDI1:
-		case CAM_ISP_HW_VFE_IN_RDI2:
-		case CAM_ISP_HW_VFE_IN_RDI3:
-			hw_res = ife_src_res->hw_res[0];
-
-			if (!hw_res) {
-				CAM_ERR(CAM_ISP, "RDI Device is NULL");
-				break;
-			}
-
-			if (core_idx == hw_res->hw_intf->hw_idx)
-				rup_status = hw_res->bottom_half_handler(
-					hw_res, evt_payload);
-
-			if (ife_hwr_mgr_ctx->is_rdi_only_context == 0 &&
-				!ife_hwr_mgr_ctx->is_fe_enable)
-				continue;
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-			if (!rup_status) {
-				/* Send the Reg update hw event */
-				ife_hwr_irq_rup_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_REG_UPDATE,
-					&rup_event_data);
-			}
+		if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 			break;
-		default:
-			CAM_ERR(CAM_ISP, "Invalid resource id (%d)",
-				ife_src_res->res_id);
-		}
+		ife_hwr_irq_rup_cb(ife_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+		break;
 
+	case CAM_ISP_HW_VFE_IN_PDLIB:
+	case CAM_ISP_HW_VFE_IN_LCR:
+		break;
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
 	}
 
-	if (!rup_status)
-		CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
-
-	return 0;
-}
-
-static int cam_ife_hw_mgr_handle_reg_update_in_bus(
-	void                              *handler_priv,
-	void                              *payload)
-{
-	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
-	struct cam_vfe_bus_irq_evt_payload      *evt_payload;
-	cam_hw_event_cb_func                     ife_hwr_irq_rup_cb;
-	struct cam_isp_hw_reg_update_event_data  rup_event_data;
-	uint32_t                                 core_idx;
-	struct cam_ife_hw_mgr_res               *isp_ife_out_res;
-	struct cam_isp_resource_node            *hw_res_left;
-	uint32_t                                 rup_status = -EINVAL;
-	int                                      i = 0;
-
-	CAM_DBG(CAM_ISP, "Enter");
-
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-
-	if (!handler_priv || !payload) {
-		CAM_ERR(CAM_ISP, "Invalid Parameter");
-		return -EPERM;
-	}
-
-	core_idx = evt_payload->core_index;
-	ife_hwr_irq_rup_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
-
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
-	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
-		isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
-		if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		hw_res_left = isp_ife_out_res->hw_res[0];
-		if (hw_res_left && (evt_payload->core_index ==
-			hw_res_left->hw_intf->hw_idx)) {
-			rup_status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-
-			if (rup_status == 0)
-				break;
-		}
-	}
-
-	if (!rup_status) {
-		if (!atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-			ife_hwr_irq_rup_cb(
-				ife_hwr_mgr_ctx->common.cb_priv,
-				CAM_ISP_HW_EVENT_REG_UPDATE,
-				&rup_event_data);
-	}
-
-	CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
+	CAM_DBG(CAM_ISP, "RUP done for VFE source %d",
+		event_info->res_id);
 
 	return 0;
 }
 
 static int cam_ife_hw_mgr_check_irq_for_dual_vfe(
-	struct cam_ife_hw_mgr_ctx   *ife_hw_mgr_ctx,
-	uint32_t                     core_idx0,
-	uint32_t                     core_idx1,
-	uint32_t                     hw_event_type)
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx,
+	uint32_t                              hw_event_type)
 {
-	int32_t rc = -1;
-	uint32_t *event_cnt = NULL;
+	int32_t                               rc = -1;
+	uint32_t                             *event_cnt = NULL;
+	uint32_t                              core_idx0 = 0;
+	uint32_t                              core_idx1 = 1;
+
+	if (!ife_hw_mgr_ctx->is_dual)
+		return 0;
 
 	switch (hw_event_type) {
 	case CAM_ISP_HW_EVENT_SOF:
@@ -4754,8 +4763,7 @@
 		return 0;
 	}
 
-	if (event_cnt[core_idx0] ==
-			event_cnt[core_idx1]) {
+	if (event_cnt[core_idx0] == event_cnt[core_idx1]) {
 
 		event_cnt[core_idx0] = 0;
 		event_cnt[core_idx1] = 0;
@@ -4782,707 +4790,235 @@
 	return rc;
 }
 
-static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_epoch(
+	void                                 *ctx,
+	void                                 *evt_info)
 {
-	int32_t rc = -EINVAL;
-	struct cam_isp_resource_node         *hw_res_left;
-	struct cam_isp_resource_node         *hw_res_right;
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
-	cam_hw_event_cb_func                  ife_hwr_irq_epoch_cb;
+	struct cam_isp_hw_event_info         *event_info = evt_info;
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx = ctx;
+	cam_hw_event_cb_func                  ife_hw_irq_epoch_cb;
 	struct cam_isp_hw_epoch_event_data    epoch_done_event_data;
-	uint32_t  core_idx;
-	uint32_t  epoch_status = -EINVAL;
-	uint32_t  core_index0;
-	uint32_t  core_index1;
+	int                                   rc = 0;
 
-	CAM_DBG(CAM_ISP, "Enter");
+	ife_hw_irq_epoch_cb =
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EPOCH];
 
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-	ife_hwr_irq_epoch_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EPOCH];
-	core_idx = evt_payload->core_index;
-
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_EPOCH;
-
-	list_for_each_entry(isp_ife_camif_res,
-		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
-		if ((isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			|| (isp_ife_camif_res->res_id >
-			CAM_ISP_HW_VFE_IN_RD)) {
-			continue;
-		}
-
-		hw_res_left = isp_ife_camif_res->hw_res[0];
-		hw_res_right = isp_ife_camif_res->hw_res[1];
-
-		switch (isp_ife_camif_res->is_dual_vfe) {
-		/* Handling Single VFE Scenario */
-		case 0:
-			/* EPOCH check for Left side VFE */
-			if (!hw_res_left) {
-				CAM_ERR(CAM_ISP, "Left Device is NULL");
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_VFE_IN_CAMIF:
+		ife_hw_mgr_ctx->epoch_cnt[event_info->hw_idx]++;
+		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hw_mgr_ctx,
+			CAM_ISP_HW_EVENT_EPOCH);
+		if (!rc) {
+			if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 				break;
-			}
-
-			if (core_idx == hw_res_left->hw_intf->hw_idx) {
-				epoch_status = hw_res_left->bottom_half_handler(
-					hw_res_left, evt_payload);
-				if (atomic_read(
-					&ife_hwr_mgr_ctx->overflow_pending))
-					break;
-				if (!epoch_status)
-					ife_hwr_irq_epoch_cb(
-						ife_hwr_mgr_ctx->common.cb_priv,
-						CAM_ISP_HW_EVENT_EPOCH,
-						&epoch_done_event_data);
-			}
-
-			break;
-
-		/* Handling Dual VFE Scenario */
-		case 1:
-			/* SOF check for Left side VFE (Master)*/
-
-			if ((!hw_res_left) || (!hw_res_right)) {
-				CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
-				break;
-			}
-			if (core_idx == hw_res_left->hw_intf->hw_idx) {
-				epoch_status = hw_res_left->bottom_half_handler(
-					hw_res_left, evt_payload);
-
-				if (!epoch_status)
-					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
-				else
-					break;
-			}
-
-			/* SOF check for Right side VFE */
-			if (core_idx == hw_res_right->hw_intf->hw_idx) {
-				epoch_status =
-					hw_res_right->bottom_half_handler(
-					hw_res_right, evt_payload);
-
-				if (!epoch_status)
-					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
-				else
-					break;
-			}
-
-			core_index0 = hw_res_left->hw_intf->hw_idx;
-			core_index1 = hw_res_right->hw_intf->hw_idx;
-
-			rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
-					ife_hwr_mgr_ctx,
-					core_index0,
-					core_index1,
-					evt_payload->evt_id);
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-			if (!rc)
-				ife_hwr_irq_epoch_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_EPOCH,
-					&epoch_done_event_data);
-
-			break;
-
-		/* Error */
-		default:
-			CAM_ERR(CAM_ISP, "error with hw_res");
-
+			ife_hw_irq_epoch_cb(ife_hw_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_EPOCH, &epoch_done_event_data);
 		}
-	}
-
-	if (!epoch_status)
-		CAM_DBG(CAM_ISP, "Exit epoch_status = %d", epoch_status);
-
-	return 0;
-}
-
-static int cam_ife_hw_mgr_process_camif_sof(
-	struct cam_ife_hw_mgr_res            *isp_ife_camif_res,
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx,
-	struct cam_vfe_top_irq_evt_payload   *evt_payload)
-{
-	struct cam_isp_resource_node         *hw_res_left = NULL;
-	struct cam_isp_resource_node         *hw_res_right = NULL;
-	int32_t rc = -EINVAL;
-	uint32_t  core_idx;
-	uint32_t  sof_status = 0;
-	uint32_t  core_index0;
-	uint32_t  core_index1;
-
-	CAM_DBG(CAM_ISP, "Enter");
-	core_idx = evt_payload->core_index;
-	hw_res_left = isp_ife_camif_res->hw_res[0];
-	hw_res_right = isp_ife_camif_res->hw_res[1];
-	CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
-		isp_ife_camif_res->is_dual_vfe);
-
-	switch (isp_ife_camif_res->is_dual_vfe) {
-	/* Handling Single VFE Scenario */
-	case 0:
-		/* SOF check for Left side VFE */
-		if (!hw_res_left) {
-			CAM_ERR(CAM_ISP, "VFE Device is NULL");
-			break;
-		}
-		CAM_DBG(CAM_ISP, "curr_core_idx = %d,core idx hw = %d",
-			core_idx, hw_res_left->hw_intf->hw_idx);
-
-		if (core_idx == hw_res_left->hw_intf->hw_idx) {
-			sof_status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-			if (!sof_status)
-				rc = 0;
-		}
-
 		break;
 
-	/* Handling Dual VFE Scenario */
-	case 1:
-		/* SOF check for Left side VFE */
-
-		if (!hw_res_left) {
-			CAM_ERR(CAM_ISP, "VFE Device is NULL");
-			break;
-		}
-		CAM_DBG(CAM_ISP, "curr_core_idx = %d, res hw idx= %d",
-				 core_idx,
-				hw_res_left->hw_intf->hw_idx);
-
-		if (core_idx == hw_res_left->hw_intf->hw_idx) {
-			sof_status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-			if (!sof_status)
-				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
-			else
-				break;
-		}
-
-		/* SOF check for Right side VFE */
-		if (!hw_res_right) {
-			CAM_ERR(CAM_ISP, "VFE Device is NULL");
-			break;
-		}
-		CAM_DBG(CAM_ISP, "curr_core_idx = %d, ews hw idx= %d",
-				 core_idx,
-				hw_res_right->hw_intf->hw_idx);
-		if (core_idx == hw_res_right->hw_intf->hw_idx) {
-			sof_status = hw_res_right->bottom_half_handler(
-				hw_res_right, evt_payload);
-			if (!sof_status)
-				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
-			else
-				break;
-		}
-
-		core_index0 = hw_res_left->hw_intf->hw_idx;
-		core_index1 = hw_res_right->hw_intf->hw_idx;
-
-		if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-			break;
-
-		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hwr_mgr_ctx,
-			core_index0, core_index1, evt_payload->evt_id);
-
+	case CAM_ISP_HW_VFE_IN_RDI0:
+	case CAM_ISP_HW_VFE_IN_RDI1:
+	case CAM_ISP_HW_VFE_IN_RDI2:
+	case CAM_ISP_HW_VFE_IN_RDI3:
+	case CAM_ISP_HW_VFE_IN_PDLIB:
+	case CAM_ISP_HW_VFE_IN_LCR:
 		break;
 
 	default:
-		CAM_ERR(CAM_ISP, "error with hw_res");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
 		break;
 	}
 
-	CAM_DBG(CAM_ISP, "Exit (sof_status = %d)", sof_status);
+	CAM_DBG(CAM_ISP, "Epoch for VFE source %d", event_info->res_id);
 
-	return rc;
+	return 0;
 }
 
-static int cam_ife_hw_mgr_handle_sof(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_sof(
+	void                                 *ctx,
+	void                                 *evt_info)
 {
-	struct cam_isp_resource_node         *hw_res = NULL;
-	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	struct cam_ife_hw_mgr_res            *ife_src_res = NULL;
+	struct cam_isp_hw_event_info         *event_info = evt_info;
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx = ctx;
 	cam_hw_event_cb_func                  ife_hw_irq_sof_cb;
 	struct cam_isp_hw_sof_event_data      sof_done_event_data;
-	uint32_t  sof_status = 0;
-	bool sof_sent = false;
+	int                                   rc = 0;
 
-	CAM_DBG(CAM_ISP, "Enter");
-
-	ife_hw_mgr_ctx = handler_priv;
-	evt_payload = payload;
-	if (!evt_payload) {
-		CAM_ERR(CAM_ISP, "no payload");
-		return IRQ_HANDLED;
-	}
 	ife_hw_irq_sof_cb =
 		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
 
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_VFE_IN_CAMIF:
+	case CAM_ISP_HW_VFE_IN_RD:
+		ife_hw_mgr_ctx->sof_cnt[event_info->hw_idx]++;
+		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hw_mgr_ctx,
+			CAM_ISP_HW_EVENT_SOF);
+		if (!rc) {
+			cam_ife_mgr_cmd_get_sof_timestamp(ife_hw_mgr_ctx,
+				&sof_done_event_data.timestamp,
+				&sof_done_event_data.boot_time);
 
-	list_for_each_entry(ife_src_res,
-		&ife_hw_mgr_ctx->res_list_ife_src, list) {
+			if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
+				break;
 
-		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		switch (ife_src_res->res_id) {
-		case CAM_ISP_HW_VFE_IN_RDI0:
-		case CAM_ISP_HW_VFE_IN_RDI1:
-		case CAM_ISP_HW_VFE_IN_RDI2:
-		case CAM_ISP_HW_VFE_IN_RDI3:
-			hw_res = ife_src_res->hw_res[0];
-			sof_status = hw_res->bottom_half_handler(
-				hw_res, evt_payload);
-
-			/* check if it is rdi only context */
-			if (ife_hw_mgr_ctx->is_fe_enable ||
-				ife_hw_mgr_ctx->is_rdi_only_context) {
-				if (!sof_status && !sof_sent) {
-					cam_ife_mgr_cmd_get_sof_timestamp(
-						ife_hw_mgr_ctx,
-						&sof_done_event_data.timestamp,
-						&sof_done_event_data.boot_time);
-
-					ife_hw_irq_sof_cb(
-						ife_hw_mgr_ctx->common.cb_priv,
-						CAM_ISP_HW_EVENT_SOF,
-						&sof_done_event_data);
-					CAM_DBG(CAM_ISP, "RDI sof_status = %d",
-						sof_status);
-
-					sof_sent = true;
-				}
-
-			}
-			break;
-
-		case CAM_ISP_HW_VFE_IN_CAMIF:
-		case CAM_ISP_HW_VFE_IN_RD:
-			sof_status = cam_ife_hw_mgr_process_camif_sof(
-				ife_src_res, ife_hw_mgr_ctx, evt_payload);
-			if (!sof_status && !sof_sent) {
-				cam_ife_mgr_cmd_get_sof_timestamp(
-					ife_hw_mgr_ctx,
-					&sof_done_event_data.timestamp,
-					&sof_done_event_data.boot_time);
-
-				ife_hw_irq_sof_cb(
-					ife_hw_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_SOF,
-					&sof_done_event_data);
-				CAM_DBG(CAM_ISP, "sof_status = %d",
-					sof_status);
-
-				sof_sent = true;
-			}
-			break;
-		case CAM_ISP_HW_VFE_IN_PDLIB:
-			break;
-		default:
-			CAM_ERR(CAM_ISP, "Invalid resource id :%d",
-				ife_src_res->res_id);
-			break;
+			ife_hw_irq_sof_cb(ife_hw_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
 		}
+		break;
+
+	case CAM_ISP_HW_VFE_IN_RDI0:
+	case CAM_ISP_HW_VFE_IN_RDI1:
+	case CAM_ISP_HW_VFE_IN_RDI2:
+	case CAM_ISP_HW_VFE_IN_RDI3:
+		if (!ife_hw_mgr_ctx->is_rdi_only_context)
+			break;
+		cam_ife_mgr_cmd_get_sof_timestamp(ife_hw_mgr_ctx,
+			&sof_done_event_data.timestamp,
+			&sof_done_event_data.boot_time);
+		if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
+			break;
+		ife_hw_irq_sof_cb(ife_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+		break;
+
+	case CAM_ISP_HW_VFE_IN_PDLIB:
+	case CAM_ISP_HW_VFE_IN_LCR:
+		break;
+
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
 	}
 
+	CAM_DBG(CAM_ISP, "SOF for VFE source %d", event_info->res_id);
+
 	return 0;
 }
 
-static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_eof(
+	void                                 *ctx,
+	void                                 *evt_info)
 {
-	int32_t rc = -EINVAL;
-	struct cam_isp_resource_node         *hw_res_left = NULL;
-	struct cam_isp_resource_node         *hw_res_right = NULL;
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
-	cam_hw_event_cb_func                  ife_hwr_irq_eof_cb;
+	struct cam_isp_hw_event_info         *event_info = evt_info;
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx = ctx;
+	cam_hw_event_cb_func                  ife_hw_irq_eof_cb;
 	struct cam_isp_hw_eof_event_data      eof_done_event_data;
-	uint32_t  core_idx;
-	uint32_t  eof_status = 0;
-	uint32_t  core_index0;
-	uint32_t  core_index1;
+	int                                   rc = 0;
 
-	CAM_DBG(CAM_ISP, "Enter");
+	ife_hw_irq_eof_cb =
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EOF];
 
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-	if (!evt_payload) {
-		pr_err("%s: no payload\n", __func__);
-		return IRQ_HANDLED;
-	}
-	core_idx = evt_payload->core_index;
-	ife_hwr_irq_eof_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EOF];
-
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_EOF;
-
-	list_for_each_entry(isp_ife_camif_res,
-		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
-
-		if (isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		hw_res_left = isp_ife_camif_res->hw_res[0];
-		hw_res_right = isp_ife_camif_res->hw_res[1];
-
-		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
-				isp_ife_camif_res->is_dual_vfe);
-		switch (isp_ife_camif_res->is_dual_vfe) {
-		/* Handling Single VFE Scenario */
-		case 0:
-			/* EOF check for Left side VFE */
-			if (!hw_res_left) {
-				pr_err("%s: VFE Device is NULL\n",
-					__func__);
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_VFE_IN_CAMIF:
+		ife_hw_mgr_ctx->eof_cnt[event_info->hw_idx]++;
+		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hw_mgr_ctx,
+			CAM_ISP_HW_EVENT_EOF);
+		if (!rc) {
+			if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 				break;
-			}
-			CAM_DBG(CAM_ISP, "curr_core_idx = %d, core idx hw = %d",
-					core_idx, hw_res_left->hw_intf->hw_idx);
-
-			if (core_idx == hw_res_left->hw_intf->hw_idx) {
-				eof_status = hw_res_left->bottom_half_handler(
-					hw_res_left, evt_payload);
-				if (atomic_read(
-					&ife_hwr_mgr_ctx->overflow_pending))
-					break;
-				if (!eof_status)
-					ife_hwr_irq_eof_cb(
-						ife_hwr_mgr_ctx->common.cb_priv,
-						CAM_ISP_HW_EVENT_EOF,
-						&eof_done_event_data);
-			}
-
-			break;
-		/* Handling dual VFE Scenario */
-		case 1:
-			if ((!hw_res_left) || (!hw_res_right)) {
-				CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
-				break;
-			}
-			if (core_idx == hw_res_left->hw_intf->hw_idx) {
-				eof_status = hw_res_left->bottom_half_handler(
-					hw_res_left, evt_payload);
-
-				if (!eof_status)
-					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
-				else
-					break;
-			}
-
-			/* EOF check for Right side VFE */
-			if (core_idx == hw_res_right->hw_intf->hw_idx) {
-				eof_status = hw_res_right->bottom_half_handler(
-					hw_res_right, evt_payload);
-
-				if (!eof_status)
-					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
-				else
-					break;
-			}
-
-			core_index0 = hw_res_left->hw_intf->hw_idx;
-			core_index1 = hw_res_right->hw_intf->hw_idx;
-
-			rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
-					ife_hwr_mgr_ctx,
-					core_index0,
-					core_index1,
-					evt_payload->evt_id);
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-
-			if (!rc)
-				ife_hwr_irq_eof_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_EOF,
-					&eof_done_event_data);
-
-			break;
-
-		default:
-			CAM_ERR(CAM_ISP, "error with hw_res");
+			ife_hw_irq_eof_cb(ife_hw_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_EOF, &eof_done_event_data);
 		}
+		break;
+
+	case CAM_ISP_HW_VFE_IN_RDI0:
+	case CAM_ISP_HW_VFE_IN_RDI1:
+	case CAM_ISP_HW_VFE_IN_RDI2:
+	case CAM_ISP_HW_VFE_IN_RDI3:
+	case CAM_ISP_HW_VFE_IN_PDLIB:
+	case CAM_ISP_HW_VFE_IN_LCR:
+		break;
+
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
 	}
 
-	CAM_DBG(CAM_ISP, "Exit (eof_status = %d)", eof_status);
+	CAM_DBG(CAM_ISP, "EOF for out_res->res_id: 0x%x",
+		event_info->res_id);
 
 	return 0;
 }
 
-
-static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
-	void                              *handler_priv,
-	void                              *payload)
-
+static int cam_ife_hw_mgr_handle_hw_buf_done(
+	void                                *ctx,
+	void                                *evt_info)
 {
-	int32_t                              buf_done_status = 0;
-	int32_t                              i;
-	int32_t                              rc = 0;
 	cam_hw_event_cb_func                 ife_hwr_irq_wm_done_cb;
-	struct cam_isp_resource_node        *hw_res_left = NULL;
-	struct cam_ife_hw_mgr_ctx           *ife_hwr_mgr_ctx = NULL;
-	struct cam_vfe_bus_irq_evt_payload  *evt_payload = payload;
-	struct cam_ife_hw_mgr_res           *isp_ife_out_res = NULL;
-	struct cam_hw_event_recovery_data    recovery_data;
+	struct cam_ife_hw_mgr_ctx           *ife_hw_mgr_ctx = ctx;
 	struct cam_isp_hw_done_event_data    buf_done_event_data = {0};
-	struct cam_isp_hw_error_event_data   error_event_data = {0};
-	uint32_t  error_resc_handle[CAM_IFE_HW_OUT_RES_MAX];
-	uint32_t  num_of_error_handles = 0;
+	struct cam_isp_hw_event_info        *event_info = evt_info;
 
-	CAM_DBG(CAM_ISP, "Enter");
-
-	ife_hwr_mgr_ctx = evt_payload->ctx;
 	ife_hwr_irq_wm_done_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
 
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
+	buf_done_event_data.num_handles = 1;
+	buf_done_event_data.resource_handle[0] = event_info->res_id;
 
-	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
-		isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
+	if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
+		return 0;
 
-		if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		hw_res_left = isp_ife_out_res->hw_res[0];
-
-		/*
-		 * DUAL VFE: Index 0 is always a master. In case of composite
-		 * Error, if the error is not in master, it needs to be checked
-		 * in slave (for debuging purpose only) For other cases:
-		 * Index zero is valid
-		 */
-
-		if (hw_res_left && (evt_payload->core_index ==
-			hw_res_left->hw_intf->hw_idx))
-			buf_done_status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-		else
-			continue;
-
-		switch (buf_done_status) {
-		case CAM_VFE_IRQ_STATUS_ERR_COMP:
-			/*
-			 * Write interface can pipeline upto 2 buffer done
-			 * strobes from each write client. If any of the client
-			 * triggers a third buffer done strobe before a
-			 * composite interrupt based on the first buffer doneis
-			 * triggered an error irq is set. This scenario can
-			 * only happen if a client is 3 frames ahead of the
-			 * other clients enabled in the same composite mask.
-			 */
-		case CAM_VFE_IRQ_STATUS_COMP_OWRT:
-			/*
-			 * It is an indication that bandwidth is not sufficient
-			 * to generate composite done irq within the VBI time.
-			 */
-
-			error_resc_handle[num_of_error_handles++] =
-					isp_ife_out_res->res_id;
-
-			if (num_of_error_handles > 0) {
-				error_event_data.error_type =
-					CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
-				goto err;
-			}
-
-			break;
-		case CAM_VFE_IRQ_STATUS_ERR:
-			break;
-		case CAM_VFE_IRQ_STATUS_SUCCESS:
-			buf_done_event_data.num_handles = 1;
-			buf_done_event_data.resource_handle[0] =
-				isp_ife_out_res->res_id;
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-			/* Report for Successful buf_done event if any */
-			if (buf_done_event_data.num_handles > 0 &&
-				ife_hwr_irq_wm_done_cb) {
-				CAM_DBG(CAM_ISP, "notify isp context");
-				ife_hwr_irq_wm_done_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_DONE,
-					&buf_done_event_data);
-			}
-
-			break;
-		default:
-			/* Do NOTHING */
-			error_resc_handle[num_of_error_handles++] =
-				isp_ife_out_res->res_id;
-			if (num_of_error_handles > 0) {
-				error_event_data.error_type =
-					CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
-				goto err;
-			}
-			break;
-		}
-		if (!buf_done_status)
-			CAM_DBG(CAM_ISP,
-				"buf_done status:(%d),out_res->res_id: 0x%x",
-				buf_done_status, isp_ife_out_res->res_id);
+	if (buf_done_event_data.num_handles > 0 && ife_hwr_irq_wm_done_cb) {
+		CAM_DBG(CAM_ISP, "Notify ISP context");
+		ife_hwr_irq_wm_done_cb(ife_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_DONE, &buf_done_event_data);
 	}
 
-	return rc;
+	CAM_DBG(CAM_ISP, "Buf done for out_res->res_id: 0x%x",
+		event_info->res_id);
 
-err:
-	/*
-	 * Report for error if any.
-	 * For the first phase, Error is reported as overflow, for all
-	 * the affected context and any successful buf_done event is not
-	 * reported.
-	 */
-	rc = cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
-		&error_event_data, evt_payload->core_index,
-		&recovery_data);
-
-	/*
-	 * We can temporarily return from here as
-	 * for the first phase, we are going to reset entire HW.
-	 */
-
-	CAM_DBG(CAM_ISP, "Exit buf_done_status Error = %d",
-		buf_done_status);
-	return rc;
+	return 0;
 }
 
-int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv,
-	void *evt_payload_priv)
+static int cam_ife_hw_mgr_event_handler(
+	void                                *priv,
+	uint32_t                             evt_id,
+	void                                *evt_info)
 {
-	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx = handler_priv;
-	struct cam_vfe_bus_irq_evt_payload      *evt_payload;
-	int rc = -EINVAL;
+	int                                  rc = 0;
 
-	if (!handler_priv)
-		return rc;
+	if (!evt_info)
+		return -EINVAL;
 
-	evt_payload = evt_payload_priv;
-	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
+	if (!priv)
+		if (evt_id != CAM_ISP_HW_EVENT_ERROR)
+			return -EINVAL;
 
-	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core index:0x%x",
-		evt_payload, evt_payload->core_index);
-	CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
-	CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
-	/* WM Done */
-	return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-}
+	CAM_DBG(CAM_ISP, "Event ID 0x%x", evt_id);
 
-int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
-{
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx = handler_priv;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	int rc = -EINVAL;
+	switch (evt_id) {
+	case CAM_ISP_HW_EVENT_SOF:
+		rc = cam_ife_hw_mgr_handle_hw_sof(priv, evt_info);
+		break;
 
-	if (!evt_payload_priv)
-		return rc;
+	case CAM_ISP_HW_EVENT_REG_UPDATE:
+		rc = cam_ife_hw_mgr_handle_hw_rup(priv, evt_info);
+		break;
 
-	evt_payload = evt_payload_priv;
-	if (!handler_priv)
-		return rc;
+	case CAM_ISP_HW_EVENT_EPOCH:
+		rc = cam_ife_hw_mgr_handle_hw_epoch(priv, evt_info);
+		break;
 
-	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
+	case CAM_ISP_HW_EVENT_EOF:
+		rc = cam_ife_hw_mgr_handle_hw_eof(priv, evt_info);
+		break;
 
-	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
-		(void *)evt_payload,
-		evt_payload->core_index);
-	CAM_DBG(CAM_ISP,
-		"irq_status_0 = 0x%x, irq_status_1 = 0x%x, irq_status_2 = 0x%x ",
-		evt_payload->irq_reg_val[0],
-		evt_payload->irq_reg_val[1],
-		evt_payload->irq_reg_val[2]);
+	case CAM_ISP_HW_EVENT_DONE:
+		rc = cam_ife_hw_mgr_handle_hw_buf_done(priv, evt_info);
+		break;
 
-	/*
-	 * If overflow/overwrite/error/violation are pending
-	 * for this context it needs to be handled remaining
-	 * interrupts are ignored.
-	 */
-	rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
-		evt_payload_priv);
+	case CAM_ISP_HW_EVENT_ERROR:
+		rc = cam_ife_hw_mgr_handle_hw_err(evt_info);
+		break;
 
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"Encountered Error (%d), ignoring other irqs",
-			rc);
-		goto put_payload;
+	default:
+		CAM_ERR(CAM_ISP, "Invalid event ID %d", evt_id);
+		break;
 	}
 
-	CAM_DBG(CAM_ISP, "Calling EOF");
-	cam_ife_hw_mgr_handle_eof_for_camif_hw_res(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-
-	CAM_DBG(CAM_ISP, "Calling SOF");
-	/* SOF IRQ */
-	cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-
-	if (evt_payload->hw_version != CAM_CPAS_TITAN_480_V100) {
-		CAM_DBG(CAM_ISP, "Calling RUP");
-		/* REG UPDATE */
-		cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-	}
-
-	CAM_DBG(CAM_ISP, "Calling EPOCH");
-	/* EPOCH IRQ */
-	cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-
-put_payload:
-	cam_vfe_put_evt_payload(evt_payload->core_info, &evt_payload);
-	return IRQ_HANDLED;
-}
-
-
-int cam_ife_mgr_do_tasklet_reg_update(
-	void *handler_priv, void *evt_payload_priv)
-{
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx = handler_priv;
-	struct cam_vfe_bus_irq_evt_payload   *evt_payload;
-	int                                   rc = -EINVAL;
-
-	evt_payload = evt_payload_priv;
-
-	if (!evt_payload_priv || !handler_priv) {
-		CAM_ERR(CAM_ISP, "Invalid handle:%pK or event payload:%pK",
-			handler_priv, evt_payload_priv);
-		return rc;
-	}
-	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
-
-	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
-		(void *)evt_payload,
-		evt_payload->core_index);
-	CAM_DBG(CAM_ISP,
-		"bus_irq_status_0: = 0x%x, bus_irq_status_1: = 0x%x, calling RUP",
-		evt_payload->irq_reg_val[0],
-		evt_payload->irq_reg_val[1]);
-	/* REG UPDATE */
-	rc = cam_ife_hw_mgr_handle_reg_update_in_bus(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-
-	if (rc)
-		CAM_ERR(CAM_ISP,
-			"Encountered Error, rc = %d", rc);
-
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index f9e44c6..711f279 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -124,6 +124,7 @@
  * @config_done_complete    indicator for configuration complete
  * @init_done               indicate whether init hw is done
  * @is_fe_enable            indicate whether fetch engine\read path is enabled
+ * @is_dual                 indicate whether context is in dual VFE mode
  */
 struct cam_ife_hw_mgr_ctx {
 	struct list_head                list;
@@ -160,6 +161,7 @@
 	struct completion               config_done_complete;
 	bool                            init_done;
 	bool                            is_fe_enable;
+	bool                            is_dual;
 };
 
 /**
@@ -210,38 +212,4 @@
  */
 int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl);
 
-/**
- * cam_ife_mgr_do_tasklet_buf_done()
- *
- * @brief:              Main tasklet handle function for the buf done event
- *
- * @handler_priv:       Tasklet information handle
- * @evt_payload_priv:   Event payload for the handler funciton
- *
- */
-int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv, void *evt_payload_priv);
-
-/**
- * cam_ife_mgr_do_tasklet()
- *
- * @brief:              Main tasklet handle function for mux resource events
- *
- * @handler_priv:       Tasklet information handle
- * @evt_payload_priv:   Event payload for the handler funciton
- *
- */
-int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv);
-
-/**
- * cam_ife_mgr_do_tasklet_reg_update()
- *
- * @brief:              Tasklet handle function for reg update
- *
- * @handler_priv:       Tasklet information handle
- * @evt_payload_priv:   Event payload for the handler funciton
- *
- */
-int cam_ife_mgr_do_tasklet_reg_update(void *handler_priv,
-	void *evt_payload_priv);
-
 #endif /* _CAM_IFE_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h
index 1e24a37..69e24bc 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_ISP_HW_MGR_H_
@@ -9,7 +9,7 @@
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_tasklet_util.h"
 
-#define CAM_ISP_HW_NUM_MAX                       4
+#define CAM_ISP_HW_NUM_MAX                       7
 
 /**
  * struct cam_isp_hw_mgr_ctx - common acquired context for managers
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
index 293b4e2..5145dad 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -24,6 +24,7 @@
  * @list:                   list_head member for each entry in queue
  * @payload:                Payload structure for the event. This will be
  *                          passed to the handler function
+ * @handler_priv:           Private data passed at event subscribe
  * @bottom_half_handler:    Function pointer for event handler in bottom
  *                          half context
  *
@@ -31,6 +32,7 @@
 struct cam_tasklet_queue_cmd {
 	struct list_head                   list;
 	void                              *payload;
+	void                              *handler_priv;
 	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler;
 };
 
@@ -203,6 +205,7 @@
 	CAM_DBG(CAM_ISP, "Enqueue tasklet cmd");
 	tasklet_cmd->bottom_half_handler = bottom_half_handler;
 	tasklet_cmd->payload = evt_payload_priv;
+	tasklet_cmd->handler_priv = handler_priv;
 	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
 	list_add_tail(&tasklet_cmd->list,
 		&tasklet->used_cmd_list);
@@ -317,7 +320,7 @@
 	tasklet_info = (struct cam_tasklet_info *)data;
 
 	while (!cam_tasklet_dequeue_cmd(tasklet_info, &tasklet_cmd)) {
-		tasklet_cmd->bottom_half_handler(tasklet_info->ctx_priv,
+		tasklet_cmd->bottom_half_handler(tasklet_cmd->handler_priv,
 			tasklet_cmd->payload);
 		cam_tasklet_put_cmd(tasklet_info, (void **)(&tasklet_cmd));
 	}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 9056718..e91092a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1052,7 +1052,7 @@
 	CAM_DBG(CAM_ISP, "CSID:%d init CSID HW",
 		csid_hw->hw_intf->hw_idx);
 
-	clk_lvl = cam_ife_csid_get_vote_level(soc_info, csid_hw->clk_rate);
+	clk_lvl = cam_soc_util_get_vote_level(soc_info, csid_hw->clk_rate);
 	CAM_DBG(CAM_ISP, "CSID clock lvl %u", clk_lvl);
 
 	rc = cam_ife_csid_enable_soc_resources(soc_info, clk_lvl);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index a35d8951..263a464 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/slab.h>
 #include "cam_ife_csid_soc.h"
@@ -110,7 +110,7 @@
 }
 
 int cam_ife_csid_enable_soc_resources(
-	struct cam_hw_soc_info *soc_info, uint32_t clk_lvl)
+	struct cam_hw_soc_info *soc_info, enum cam_vote_level clk_level)
 {
 	int rc = 0;
 	struct cam_csid_soc_private       *soc_private;
@@ -135,7 +135,7 @@
 	}
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
-		clk_lvl, true);
+		clk_level, true);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "enable platform failed");
 		goto stop_cpas;
@@ -228,24 +228,3 @@
 
 	return rc;
 }
-
-uint32_t cam_ife_csid_get_vote_level(struct cam_hw_soc_info *soc_info,
-	uint64_t clock_rate)
-{
-	int i = 0;
-
-	if (!clock_rate)
-		return CAM_SVS_VOTE;
-
-	for (i = 0; i < CAM_MAX_VOTE; i++) {
-		if (soc_info->clk_rate[i][soc_info->num_clk - 1] >=
-			clock_rate) {
-			CAM_DBG(CAM_ISP,
-				"Clock rate %lld, selected clock level %d",
-				clock_rate, i);
-			return i;
-		}
-	}
-
-	return CAM_TURBO_VOTE;
-}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 534ce84..a3321d6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -8,9 +8,9 @@
 
 #include <linux/completion.h>
 #include "cam_hw.h"
-#include <uapi/media/cam_isp.h>
 #include "cam_soc_util.h"
 #include "cam_irq_controller.h"
+#include "cam_hw_intf.h"
 #include <uapi/media/cam_isp.h>
 
 /*
@@ -98,6 +98,7 @@
 	CAM_ISP_HW_CMD_FE_UPDATE_IN_RD,
 	CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD,
 	CAM_ISP_HW_CMD_UBWC_UPDATE_V2,
+	CAM_ISP_HW_CMD_CORE_CONFIG,
 	CAM_ISP_HW_CMD_MAX,
 };
 
@@ -153,6 +154,24 @@
 };
 
 /*
+ * struct cam_isp_hw_event_info:
+ *
+ * @Brief:          Structure to pass event details to hw mgr
+ *
+ * @res_type:       Type of IFE resource
+ * @res_id:         Unique resource ID
+ * @hw_idx:         IFE hw index
+ * @err_type:       Error type if any
+ *
+ */
+struct cam_isp_hw_event_info {
+	enum cam_isp_resource_type     res_type;
+	uint32_t                       res_id;
+	uint32_t                       hw_idx;
+	uint32_t                       err_type;
+};
+
+/*
  * struct cam_isp_hw_cmd_buf_update:
  *
  * @Brief:           Contain the new created command buffer information
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index 8c985bc..1c1f867 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -18,6 +18,8 @@
 #define VFE_VBIF_BASE_IDX             1
 #define VFE_BUS_BASE_IDX              1
 
+#define CAM_VFE_MAX_UBWC_PORTS        4
+
 enum cam_isp_hw_vfe_in_mux {
 	CAM_ISP_HW_VFE_IN_CAMIF       = 0,
 	CAM_ISP_HW_VFE_IN_TESTGEN     = 1,
@@ -112,7 +114,6 @@
  *                           (Default is Master in case of Single VFE)
  * @dual_slave_core:         If Master and Slave exists, HW Index of Slave
  * @cdm_ops:                 CDM operations
- * @ctx:                     Context data
  */
 struct cam_vfe_hw_vfe_out_acquire_args {
 	struct cam_isp_resource_node      *rsrc_node;
@@ -123,7 +124,6 @@
 	uint32_t                           is_master;
 	uint32_t                           dual_slave_core;
 	struct cam_cdm_utils_ops          *cdm_ops;
-	void                              *ctx;
 };
 
 /*
@@ -153,6 +153,8 @@
  * @tasklet:                 Tasklet to associate with this resource. This is
  *                           used to schedule bottom of IRQ events associated
  *                           with this resource.
+ * @priv:                    Context data
+ * @event_cb:                Callback function to hw mgr in case of hw events
  * @vfe_out:                 Acquire args for VFE_OUT
  * @vfe_bus_rd               Acquire args for VFE_BUS_READ
  * @vfe_in:                  Acquire args for VFE_IN
@@ -160,6 +162,8 @@
 struct cam_vfe_acquire_args {
 	enum cam_isp_resource_type           rsrc_type;
 	void                                *tasklet;
+	void                                *priv;
+	cam_hw_mgr_event_cb_func             event_cb;
 	union {
 		struct cam_vfe_hw_vfe_out_acquire_args  vfe_out;
 		struct cam_vfe_hw_vfe_out_acquire_args  vfe_bus_rd;
@@ -179,6 +183,17 @@
 };
 
 /*
+ * struct cam_vfe_core_config_args:
+ *
+ * @node_res:                Resource to get the time stamp
+ * @core_config:             Core config for IFE
+ */
+struct cam_vfe_core_config_args {
+	struct cam_isp_resource_node      *node_res;
+	struct cam_isp_core_config         core_config;
+};
+
+/*
  * struct cam_vfe_bw_update_args:
  *
  * @node_res:             Resource to get the BW
@@ -227,24 +242,14 @@
  *                           related to VFE_TOP resources
  *
  * @list:                    list_head node for the payload
- * @core_index:              Index of VFE HW that generated this IRQ event
- * @core_info:               Private data of handler in bottom half context
- * @evt_id:                  IRQ event
  * @irq_reg_val:             IRQ and Error register values, read when IRQ was
  *                           handled
- * @error_type:              Identify different errors
  * @ts:                      Timestamp
- * @hw_version:              CPAS hw version
  */
 struct cam_vfe_top_irq_evt_payload {
-	struct list_head           list;
-	uint32_t                   core_index;
-	void                      *core_info;
-	uint32_t                   evt_id;
-	uint32_t                   irq_reg_val[CAM_IFE_IRQ_REGISTERS_MAX];
-	uint32_t                   error_type;
-	struct cam_isp_timestamp   ts;
-	uint32_t                   hw_version;
+	struct list_head            list;
+	uint32_t                    irq_reg_val[CAM_IFE_IRQ_REGISTERS_MAX];
+	struct cam_isp_timestamp    ts;
 };
 
 /*
@@ -261,7 +266,6 @@
  *                           handled
  * @error_type:              Identify different errors
  * @ts:                      Timestamp
- * @ctx:                     Context data received during acquire
  */
 struct cam_vfe_bus_irq_evt_payload {
 	struct list_head            list;
@@ -271,32 +275,8 @@
 	uint32_t                    overflow_status;
 	uint32_t                    image_size_violation_status;
 	uint32_t                    evt_id;
-	uint32_t                    irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_MAX];
-	uint32_t                    error_type;
+	uint32_t                    irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
 	struct cam_isp_timestamp    ts;
-	void                       *ctx;
-};
-
-/*
- * struct cam_vfe_irq_handler_priv:
- *
- * @Brief:                   This structure is used as private data to
- *                           register with IRQ controller. It has information
- *                           needed by top half and bottom half.
- *
- * @core_index:              Index of VFE HW that generated this IRQ event
- * @core_info:               Private data of handler in bottom half context
- * @mem_base:                Mapped base address of the register space
- * @reset_complete:          Completion structure to be signaled if Reset IRQ
- *                           is Set
- * @hw_version:              CPAS hw version
- */
-struct cam_vfe_irq_handler_priv {
-	uint32_t                     core_index;
-	void                        *core_info;
-	void __iomem                *mem_base;
-	struct completion           *reset_complete;
-	uint32_t                     hw_version;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index abab72f..3c8a7e2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -19,80 +19,23 @@
 #include "cam_cpas_api.h"
 
 static const char drv_name[] = "vfe";
-static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x00000054,
-	0x00000058,
-	0x0000005C,
-	0x00000074,
-};
 
-static uint32_t camif_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x00000000,
-	0x00000007,
-	0x00000000,
-};
+#define CAM_VFE_17X_CLEAR_0_REG_OFFSET              0x00000064
+#define CAM_VFE_17X_CLEAR_1_REG_OFFSET              0x00000068
+#define CAM_VFE_17X_IRQ_CMD_REG_OFFSET              0x00000058
+#define CAM_VFE_17X_TOP_RESET_MASK                  0x80000000
 
-static uint32_t camif_fe_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x10000056,
-	0x00000000,
-};
+#define CAM_VFE_48X_CLEAR_0_REG_OFFSET              0x00000048
+#define CAM_VFE_48X_CLEAR_1_REG_OFFSET              0x0000004C
+#define CAM_VFE_48X_CLEAR_2_REG_OFFSET              0x00000050
+#define CAM_VFE_48X_IRQ_CMD_REG_OFFSET              0x00000038
+#define CAM_VFE_48X_TOP_RESET_MASK                  0x00000001
 
-static uint32_t camif_irq_err_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0xFBE00200,
-	0x00000000,
-	0x303FFF80,
-};
-
-static uint32_t rdi_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x38E00000,
-	0xFFF0,
-	0x00000000,
-};
-
-static int cam_vfe_get_evt_payload(struct cam_vfe_hw_core_info *core_info,
-	struct cam_vfe_top_irq_evt_payload    **evt_payload)
-{
-	spin_lock(&core_info->spin_lock);
-	if (list_empty(&core_info->free_payload_list)) {
-		*evt_payload = NULL;
-		spin_unlock(&core_info->spin_lock);
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload, core info 0x%x\n",
-			core_info->cpas_handle);
-		return -ENODEV;
-	}
-
-	*evt_payload = list_first_entry(&core_info->free_payload_list,
-		struct cam_vfe_top_irq_evt_payload, list);
-	list_del_init(&(*evt_payload)->list);
-	spin_unlock(&core_info->spin_lock);
-
-	return 0;
-}
-
-int cam_vfe_put_evt_payload(void             *core_info,
-	struct cam_vfe_top_irq_evt_payload  **evt_payload)
-{
-	struct cam_vfe_hw_core_info        *vfe_core_info = core_info;
-	unsigned long                       flags;
-
-	if (!core_info) {
-		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
-		return -EINVAL;
-	}
-	if (*evt_payload == NULL) {
-		CAM_ERR(CAM_ISP, "No payload to put");
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&vfe_core_info->spin_lock, flags);
-	(*evt_payload)->error_type = 0;
-	list_add_tail(&(*evt_payload)->list, &vfe_core_info->free_payload_list);
-	*evt_payload = NULL;
-	spin_unlock_irqrestore(&vfe_core_info->spin_lock, flags);
-
-
-	return 0;
-}
+#define CAM_VFE_LITE_48X_CLEAR_0_REG_OFFSET         0x00000034
+#define CAM_VFE_LITE_48X_CLEAR_1_REG_OFFSET         0x00000038
+#define CAM_VFE_LITE_48X_CLEAR_2_REG_OFFSET         0x0000003C
+#define CAM_VFE_LITE_48X_IRQ_CMD_REG_OFFSET         0x00000024
+#define CAM_VFE_LITE_48X_TOP_RESET_MASK             0x00020000
 
 int cam_vfe_get_hw_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size)
 {
@@ -121,9 +64,12 @@
 	struct cam_irq_th_payload         *th_payload)
 {
 	int32_t                            rc = -EINVAL;
-	struct cam_vfe_irq_handler_priv   *handler_priv;
+	struct cam_hw_info                *vfe_hw;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	void __iomem                      *mem_base;
 
-	handler_priv = th_payload->handler_priv;
+	vfe_hw = th_payload->handler_priv;
+	soc_info = &vfe_hw->soc_info;
 
 	CAM_DBG(CAM_ISP, "Enter");
 
@@ -131,26 +77,56 @@
 	 * Clear All IRQs to avoid spurious IRQs immediately
 	 * after Reset Done.
 	 */
+	CAM_DBG(CAM_ISP, "TOP_IRQ_STATUS_0 = 0x%x",
+		th_payload->evt_status_arr[0]);
 
-	switch (handler_priv->hw_version) {
+	mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+
+	switch (soc_info->hw_version) {
 	case CAM_CPAS_TITAN_480_V100:
-		if (th_payload->evt_status_arr[0] & 0x1) {
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x48);
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x4C);
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x50);
-			cam_io_w(0x1, handler_priv->mem_base + 0x38);
-			CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
-			complete(handler_priv->reset_complete);
-			rc = 0;
+		if (strnstr(soc_info->compatible, "lite",
+			strlen(soc_info->compatible)) == NULL) {
+			if (th_payload->evt_status_arr[0] & 0x1) {
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_48X_CLEAR_0_REG_OFFSET);
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_48X_CLEAR_1_REG_OFFSET);
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_48X_CLEAR_2_REG_OFFSET);
+				cam_io_w(0x00000001, mem_base +
+					CAM_VFE_48X_IRQ_CMD_REG_OFFSET);
+				CAM_DBG(CAM_ISP,
+					"Calling Complete for RESET CMD");
+				complete(&vfe_hw->hw_complete);
+				rc = 0;
+			}
+		} else {
+			if (th_payload->evt_status_arr[0] & (1<<17)) {
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_LITE_48X_CLEAR_0_REG_OFFSET);
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_LITE_48X_CLEAR_1_REG_OFFSET);
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_LITE_48X_CLEAR_2_REG_OFFSET);
+				cam_io_w(0x00000001, mem_base +
+					CAM_VFE_LITE_48X_IRQ_CMD_REG_OFFSET);
+				CAM_DBG(CAM_ISP,
+					"Calling Complete for RESET CMD");
+				complete(&vfe_hw->hw_complete);
+				rc = 0;
+			}
 		}
 		break;
 	default:
 		if (th_payload->evt_status_arr[0] & (1<<31)) {
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x64);
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x68);
-			cam_io_w(0x00000001, handler_priv->mem_base + 0x58);
+			cam_io_w(0xFFFFFFFF, mem_base +
+				CAM_VFE_17X_CLEAR_0_REG_OFFSET);
+			cam_io_w(0xFFFFFFFF, mem_base +
+				CAM_VFE_17X_CLEAR_1_REG_OFFSET);
+			cam_io_w(0x00000001, mem_base +
+				CAM_VFE_17X_IRQ_CMD_REG_OFFSET);
 			CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
-			complete(handler_priv->reset_complete);
+			complete(&vfe_hw->hw_complete);
 			rc = 0;
 		}
 		break;
@@ -160,74 +136,6 @@
 	return rc;
 }
 
-static int cam_vfe_irq_err_top_half(uint32_t    evt_id,
-	struct cam_irq_th_payload   *th_payload)
-{
-	int32_t                              rc;
-	int                                  i;
-	struct cam_vfe_irq_handler_priv     *handler_priv;
-	struct cam_vfe_top_irq_evt_payload  *evt_payload;
-	struct cam_vfe_hw_core_info         *core_info;
-	bool                                 error_flag = false;
-
-	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
-		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
-
-	handler_priv = th_payload->handler_priv;
-	core_info =  handler_priv->core_info;
-	/*
-	 *  need to handle overflow condition here, otherwise irq storm
-	 *  will block everything
-	 */
-	if (th_payload->evt_status_arr[1] ||
-		(th_payload->evt_status_arr[0] & camif_irq_err_reg_mask[0])) {
-		CAM_ERR(CAM_ISP,
-			"Encountered Error: vfe:%d:  Irq_status0=0x%x Status1=0x%x",
-			handler_priv->core_index, th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
-		CAM_ERR(CAM_ISP,
-			"Stopping further IRQ processing from this HW index=%d",
-			handler_priv->core_index);
-		cam_irq_controller_disable_irq(core_info->vfe_irq_controller,
-			core_info->irq_err_handle);
-		cam_irq_controller_clear_and_mask(evt_id,
-			core_info->vfe_irq_controller);
-		error_flag = true;
-	}
-
-	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
-		return rc;
-	}
-
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->core_index = handler_priv->core_index;
-	evt_payload->core_info  = handler_priv->core_info;
-	evt_payload->evt_id  = evt_id;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
-		evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
-			irq_reg_offset[i]);
-	}
-
-	if (error_flag)
-		CAM_INFO(CAM_ISP, "Violation status = %x",
-			evt_payload->irq_reg_val[2]);
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	return rc;
-}
-
 int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
 {
 	struct cam_hw_info                *vfe_hw = hw_priv;
@@ -341,7 +249,7 @@
 	mutex_lock(&vfe_hw->hw_mutex);
 	if (!vfe_hw->open_count) {
 		mutex_unlock(&vfe_hw->hw_mutex);
-		CAM_ERR(CAM_ISP, "Error! Unbalanced deinit");
+		CAM_ERR(CAM_ISP, "Error. Unbalanced deinit");
 		return -EFAULT;
 	}
 	vfe_hw->open_count--;
@@ -407,57 +315,53 @@
 	soc_info = &vfe_hw->soc_info;
 	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
 
-	core_info->irq_payload.core_index = soc_info->index;
-	core_info->irq_payload.mem_base =
-		vfe_hw->soc_info.reg_map[VFE_CORE_BASE_IDX].mem_base;
-	core_info->irq_payload.hw_version = soc_info->hw_version;
-	core_info->irq_payload.core_info = core_info;
-	core_info->irq_payload.reset_complete = &vfe_hw->hw_complete;
-
 	memset(top_reset_irq_reg_mask, 0, sizeof(top_reset_irq_reg_mask));
 
-	switch (vfe_hw->soc_info.hw_version) {
+	switch (soc_info->hw_version) {
 	case CAM_CPAS_TITAN_480_V100:
 		if (strnstr(soc_info->compatible, "lite",
 			strlen(soc_info->compatible)) == NULL)
 			top_reset_irq_reg_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
-				= 0x00000001;
+				= CAM_VFE_48X_TOP_RESET_MASK;
 		else
 			top_reset_irq_reg_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
-				= 0x00020000;
+				= CAM_VFE_LITE_48X_TOP_RESET_MASK;
 		break;
 	default:
 		top_reset_irq_reg_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
-			= 0x80000000;
+			= CAM_VFE_17X_TOP_RESET_MASK;
 		break;
 	}
 
-	core_info->irq_handle = cam_irq_controller_subscribe_irq(
+	core_info->reset_irq_handle = cam_irq_controller_subscribe_irq(
 		core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_0,
-		top_reset_irq_reg_mask, &core_info->irq_payload,
+		top_reset_irq_reg_mask, vfe_hw,
 		cam_vfe_reset_irq_top_half, NULL, NULL, NULL);
-	if (core_info->irq_handle < 0) {
+	if (core_info->reset_irq_handle < 1) {
 		CAM_ERR(CAM_ISP, "subscribe irq controller failed");
+		core_info->reset_irq_handle = 0;
 		return -EFAULT;
 	}
 
 	reinit_completion(&vfe_hw->hw_complete);
 
-	CAM_DBG(CAM_ISP, "calling RESET on vfe %d", soc_info->index);
+	CAM_DBG(CAM_ISP, "calling RESET on VFE:%d", soc_info->index);
+
 	core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv,
 		reset_core_args, arg_size);
-	CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
+
 	/* Wait for Completion or Timeout of 500ms */
 	rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
-	if (!rc)
-		CAM_ERR(CAM_ISP, "Error! Reset Timeout");
 
-	CAM_DBG(CAM_ISP, "reset complete done (%d)", rc);
+	if (!rc)
+		CAM_ERR(CAM_ISP, "Reset Timeout");
+	else
+		CAM_DBG(CAM_ISP, "reset complete done (%d)", rc);
 
 	rc = cam_irq_controller_unsubscribe_irq(
-		core_info->vfe_irq_controller, core_info->irq_handle);
+		core_info->vfe_irq_controller, core_info->reset_irq_handle);
 	if (rc)
-		CAM_ERR(CAM_ISP, "Error! Unsubscribe failed");
+		CAM_ERR(CAM_ISP, "Error. Unsubscribe failed");
 
 	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
@@ -472,55 +376,6 @@
 	time_stamp->mono_time.tv_usec   = ts.tv_nsec/1000;
 }
 
-static int cam_vfe_irq_top_half(uint32_t    evt_id,
-	struct cam_irq_th_payload   *th_payload)
-{
-	int32_t                              rc;
-	int                                  i;
-	struct cam_vfe_irq_handler_priv     *handler_priv;
-	struct cam_vfe_top_irq_evt_payload  *evt_payload;
-	struct cam_vfe_hw_core_info         *core_info;
-
-	handler_priv = th_payload->handler_priv;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		CAM_DBG(CAM_ISP, "IRQ status_%d = 0x%x",
-		i, th_payload->evt_status_arr[i]);
-
-
-	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
-		return rc;
-	}
-
-	core_info =  handler_priv->core_info;
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->core_index = handler_priv->core_index;
-	evt_payload->core_info  = handler_priv->core_info;
-	evt_payload->evt_id  = evt_id;
-	evt_payload->hw_version = handler_priv->hw_version;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
-		irq_reg_offset[i]);
-
-	CAM_DBG(CAM_ISP,
-		"Violation status = 0x%x", evt_payload->irq_reg_val[i]);
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	CAM_DBG(CAM_ISP, "Exit");
-	return rc;
-}
-
 int cam_vfe_reserve(void *hw_priv, void *reserve_args, uint32_t arg_size)
 {
 	struct cam_vfe_hw_core_info       *core_info = NULL;
@@ -528,7 +383,6 @@
 	struct cam_vfe_acquire_args       *acquire;
 	int rc = -ENODEV;
 
-
 	if (!hw_priv || !reserve_args || (arg_size !=
 		sizeof(struct cam_vfe_acquire_args))) {
 		CAM_ERR(CAM_ISP, "Invalid input arguments");
@@ -542,8 +396,7 @@
 	if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_IN) {
 		rc = core_info->vfe_top->hw_ops.reserve(
 			core_info->vfe_top->top_priv,
-			acquire,
-			sizeof(*acquire));
+			acquire, sizeof(*acquire));
 	} else if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_OUT) {
 		rc = core_info->vfe_bus->hw_ops.reserve(
 			core_info->vfe_bus->bus_priv, acquire,
@@ -553,16 +406,14 @@
 			rc = core_info->vfe_rd_bus->hw_ops.reserve(
 				core_info->vfe_rd_bus->bus_priv, acquire,
 				sizeof(*acquire));
-	} else {
+	} else
 		CAM_ERR(CAM_ISP, "Invalid res type:%d", acquire->rsrc_type);
-	}
 
 	mutex_unlock(&vfe_hw->hw_mutex);
 
 	return rc;
 }
 
-
 int cam_vfe_release(void *hw_priv, void *release_args, uint32_t arg_size)
 {
 	struct cam_vfe_hw_core_info       *core_info = NULL;
@@ -621,91 +472,33 @@
 	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
 	isp_res = (struct cam_isp_resource_node  *)start_args;
 	core_info->tasklet_info = isp_res->tasklet_info;
-	core_info->irq_payload.hw_version = soc_info->hw_version;
 
 	mutex_lock(&vfe_hw->hw_mutex);
 	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
-		if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
-			isp_res->irq_handle =
-				cam_irq_controller_subscribe_irq(
-					core_info->vfe_irq_controller,
-					CAM_IRQ_PRIORITY_1,
-					camif_irq_reg_mask,
-					&core_info->irq_payload,
-					cam_vfe_irq_top_half,
-					cam_ife_mgr_do_tasklet,
-					isp_res->tasklet_info,
-					&tasklet_bh_api);
-			if (isp_res->irq_handle < 1)
-				rc = -ENOMEM;
-		} else if (isp_res->res_id == CAM_ISP_HW_VFE_IN_RD) {
-			isp_res->irq_handle =
-				cam_irq_controller_subscribe_irq(
-					core_info->vfe_irq_controller,
-					CAM_IRQ_PRIORITY_1,
-					camif_fe_irq_reg_mask,
-					&core_info->irq_payload,
-					cam_vfe_irq_top_half,
-					cam_ife_mgr_do_tasklet,
-					isp_res->tasklet_info,
-					&tasklet_bh_api);
-			if (isp_res->irq_handle < 1)
-				rc = -ENOMEM;
-		} else if (isp_res->rdi_only_ctx) {
-			isp_res->irq_handle =
-				cam_irq_controller_subscribe_irq(
-					core_info->vfe_irq_controller,
-					CAM_IRQ_PRIORITY_1,
-					rdi_irq_reg_mask,
-					&core_info->irq_payload,
-					cam_vfe_irq_top_half,
-					cam_ife_mgr_do_tasklet,
-					isp_res->tasklet_info,
-					&tasklet_bh_api);
-			if (isp_res->irq_handle < 1)
-				rc = -ENOMEM;
-		}
+		rc = core_info->vfe_top->hw_ops.start(
+			core_info->vfe_top->top_priv, isp_res,
+			sizeof(struct cam_isp_resource_node));
 
-		if (rc == 0) {
-			rc = core_info->vfe_top->hw_ops.start(
-				core_info->vfe_top->top_priv, isp_res,
-				sizeof(struct cam_isp_resource_node));
-			if (rc)
-				CAM_ERR(CAM_ISP, "Start failed. type:%d",
-					isp_res->res_type);
-		} else {
-			CAM_ERR(CAM_ISP,
-				"Error! subscribe irq controller failed");
-		}
+		if (rc)
+			CAM_ERR(CAM_ISP, "Failed to start VFE IN");
 	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
 		rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0);
+
+		if (rc)
+			CAM_ERR(CAM_ISP, "Failed to start VFE OUT");
 	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_BUS_RD) {
-		if (core_info->vfe_rd_bus)
+		if (core_info->vfe_rd_bus) {
 			rc = core_info->vfe_rd_bus->hw_ops.start(isp_res,
 				NULL, 0);
+
+			if (rc)
+				CAM_ERR(CAM_ISP, "Failed to start BUS RD");
+		}
 	} else {
 		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
 		rc = -EFAULT;
 	}
 
-	if (!core_info->irq_err_handle) {
-		core_info->irq_err_handle =
-			cam_irq_controller_subscribe_irq(
-				core_info->vfe_irq_controller,
-				CAM_IRQ_PRIORITY_0,
-				camif_irq_err_reg_mask,
-				&core_info->irq_payload,
-				cam_vfe_irq_err_top_half,
-				cam_ife_mgr_do_tasklet,
-				core_info->tasklet_info,
-				&tasklet_bh_api);
-		if (core_info->irq_err_handle < 1) {
-			CAM_ERR(CAM_ISP, "Error handle subscribe failure");
-			rc = -ENOMEM;
-			core_info->irq_err_handle = 0;
-		}
-	}
-
 	mutex_unlock(&vfe_hw->hw_mutex);
 
 	return rc;
@@ -729,10 +522,6 @@
 
 	mutex_lock(&vfe_hw->hw_mutex);
 	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
-		cam_irq_controller_unsubscribe_irq(
-			core_info->vfe_irq_controller, isp_res->irq_handle);
-		isp_res->irq_handle = 0;
-
 		rc = core_info->vfe_top->hw_ops.stop(
 			core_info->vfe_top->top_priv, isp_res,
 			sizeof(struct cam_isp_resource_node));
@@ -746,11 +535,11 @@
 		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
 	}
 
-	if (core_info->irq_err_handle) {
+	if (core_info->reset_irq_handle > 0) {
 		cam_irq_controller_unsubscribe_irq(
 			core_info->vfe_irq_controller,
-			core_info->irq_err_handle);
-		core_info->irq_err_handle = 0;
+			core_info->reset_irq_handle);
+		core_info->reset_irq_handle = 0;
 	}
 
 	mutex_unlock(&vfe_hw->hw_mutex);
@@ -792,6 +581,7 @@
 	case CAM_ISP_HW_CMD_CLOCK_UPDATE:
 	case CAM_ISP_HW_CMD_BW_UPDATE:
 	case CAM_ISP_HW_CMD_BW_CONTROL:
+	case CAM_ISP_HW_CMD_CORE_CONFIG:
 		rc = core_info->vfe_top->hw_ops.process_cmd(
 			core_info->vfe_top->top_priv, cmd_type, cmd_args,
 			arg_size);
@@ -855,7 +645,6 @@
 	struct cam_vfe_hw_info                     *vfe_hw_info)
 {
 	int rc = -EINVAL;
-	int i;
 
 	CAM_DBG(CAM_ISP, "Enter");
 
@@ -868,8 +657,8 @@
 		return rc;
 	}
 
-	rc = cam_vfe_top_init(vfe_hw_info->top_version,
-		soc_info, hw_intf, vfe_hw_info->top_hw_info,
+	rc = cam_vfe_top_init(vfe_hw_info->top_version, soc_info, hw_intf,
+		vfe_hw_info->top_hw_info, core_info->vfe_irq_controller,
 		&core_info->vfe_top);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Error, cam_vfe_top_init failed rc = %d", rc);
@@ -877,9 +666,8 @@
 	}
 
 	rc = cam_vfe_bus_init(vfe_hw_info->bus_version, BUS_TYPE_WR,
-		soc_info, hw_intf,
-		vfe_hw_info->bus_hw_info, core_info->vfe_irq_controller,
-		&core_info->vfe_bus);
+		soc_info, hw_intf, vfe_hw_info->bus_hw_info,
+		core_info->vfe_irq_controller, &core_info->vfe_bus);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Error, cam_vfe_bus_init failed rc = %d", rc);
 		goto deinit_top;
@@ -899,13 +687,6 @@
 			core_info->vfe_rd_bus, hw_intf->hw_idx);
 	}
 
-	INIT_LIST_HEAD(&core_info->free_payload_list);
-	for (i = 0; i < CAM_VFE_EVT_MAX; i++) {
-		INIT_LIST_HEAD(&core_info->evt_payload[i].list);
-		list_add_tail(&core_info->evt_payload[i].list,
-			&core_info->free_payload_list);
-	}
-
 	spin_lock_init(&core_info->spin_lock);
 
 	return rc;
@@ -924,15 +705,10 @@
 	struct cam_vfe_hw_info                       *vfe_hw_info)
 {
 	int                rc = -EINVAL;
-	int                i;
 	unsigned long      flags;
 
 	spin_lock_irqsave(&core_info->spin_lock, flags);
 
-	INIT_LIST_HEAD(&core_info->free_payload_list);
-	for (i = 0; i < CAM_VFE_EVT_MAX; i++)
-		INIT_LIST_HEAD(&core_info->evt_payload[i].list);
-
 	rc = cam_vfe_bus_deinit(vfe_hw_info->bus_version,
 		&core_info->vfe_bus);
 	if (rc)
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
index 7b5d8e4..43afd03 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_CORE_H_
@@ -51,13 +51,8 @@
 	struct cam_vfe_bus                 *vfe_bus;
 	struct cam_vfe_bus                 *vfe_rd_bus;
 	void                               *tasklet_info;
-	struct cam_vfe_top_irq_evt_payload  evt_payload[CAM_VFE_EVT_MAX];
-	struct list_head                    free_payload_list;
-	struct cam_vfe_irq_handler_priv     irq_payload;
-	uint32_t                            cpas_handle;
-	int                                 irq_handle;
-	int                                 irq_err_handle;
 	spinlock_t                          spin_lock;
+	int                                 reset_irq_handle;
 };
 
 int cam_vfe_get_hw_caps(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index 7f1001f..a3f1220 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -51,6 +51,10 @@
 		return rc;
 	}
 
+	if (strnstr(soc_info->compatible, "lite",
+		strlen(soc_info->compatible)) != NULL)
+		goto end;
+
 	switch (soc_info->hw_version) {
 	case CAM_CPAS_TITAN_480_V100:
 		num_ubwc_cfg = of_property_count_u32_elems(of_node,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
index 82ffa44..db61bfb 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
@@ -75,6 +75,8 @@
 	.eof_irq_mask                    = 0x00000002,
 	.error_irq_mask0                 = 0x0003FC00,
 	.error_irq_mask1                 = 0xEFFF7E80,
+	.subscribe_irq_mask0             = 0x00000017,
+	.subscribe_irq_mask1             = 0x00000000,
 	.enable_diagnostic_hw            = 0x1,
 };
 
@@ -94,8 +96,10 @@
 	.lite_epoch0_irq_mask            = 0x00100000,
 	.dual_pd_reg_upd_irq_mask        = 0x04000000,
 	.lite_eof_irq_mask               = 0x00080000,
-	.lite_error_irq_mask0            = 0x00400000,
-	.lite_error_irq_mask1            = 0x00004100,
+	.lite_err_irq_mask0              = 0x00400000,
+	.lite_err_irq_mask1              = 0x00004100,
+	.lite_subscribe_irq_mask0        = 0x001C0000,
+	.lite_subscribe_irq_mask1        = 0x0,
 	.extern_reg_update_shift         = 4,
 	.dual_pd_path_sel_shift          = 24,
 };
@@ -149,6 +153,13 @@
 	.reg_update_cmd           = 0x000004AC,
 };
 
+static struct cam_vfe_rdi_common_reg_data vfe175_rdi_reg_data = {
+	.subscribe_irq_mask0      = 0x780001E0,
+	.subscribe_irq_mask1      = 0x0,
+	.error_irq_mask0          = 0x0,
+	.error_irq_mask1          = 0x3C,
+};
+
 static struct cam_vfe_rdi_reg_data  vfe_175_rdi_0_data = {
 	.reg_update_cmd_data      = 0x2,
 	.sof_irq_mask             = 0x8000000,
@@ -180,8 +191,9 @@
 		.reg_data       = &vfe175_camif_lite_reg_data,
 		},
 	.rdi_hw_info = {
-		.common_reg = &vfe175_top_common_reg,
-		.rdi_reg    = &vfe175_rdi_reg,
+		.common_reg      = &vfe175_top_common_reg,
+		.rdi_reg         = &vfe175_rdi_reg,
+		.common_reg_data = &vfe175_rdi_reg_data,
 		.reg_data = {
 			&vfe_175_rdi_0_data,
 			&vfe_175_rdi_1_data,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
index 7712496..393c774 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
@@ -92,7 +92,7 @@
 	.reg_update_cmd           = 0x000004AC,
 	.vfe_diag_config          = 0x00000C48,
 	.vfe_diag_sensor_status   = 0x00000C4C,
-	.fe_cfg                = 0x00000084,
+	.fe_cfg                   = 0x00000084,
 };
 
 static struct cam_vfe_fe_reg_data vfe_175_130_fe_reg_data = {
@@ -123,7 +123,7 @@
 	.error_irq_mask0                 = 0x0003FC00,
 	.error_irq_mask1                 = 0xEFFF7E80,
 	.enable_diagnostic_hw            = 0x1,
-	.fe_mux_data                  = 0x2,
+	.fe_mux_data                     = 0x2,
 	.hbi_cnt_shift                   = 0x8,
 };
 
@@ -144,8 +144,8 @@
 	.lite_epoch0_irq_mask            = 0x00100000,
 	.dual_pd_reg_upd_irq_mask        = 0x04000000,
 	.lite_eof_irq_mask               = 0x00080000,
-	.lite_error_irq_mask0            = 0x00400000,
-	.lite_error_irq_mask1            = 0x00004100,
+	.lite_err_irq_mask0              = 0x00400000,
+	.lite_err_irq_mask1              = 0x00004100,
 	.extern_reg_update_shift         = 4,
 	.dual_pd_path_sel_shift          = 24,
 };
@@ -199,6 +199,13 @@
 	.reg_update_cmd           = 0x000004AC,
 };
 
+static struct cam_vfe_rdi_common_reg_data vfe175_130_rdi_reg_data = {
+	.subscribe_irq_mask0      = 0x780001E0,
+	.subscribe_irq_mask1      = 0x0,
+	.error_irq_mask0          = 0x0,
+	.error_irq_mask1          = 0x3C,
+};
+
 static struct cam_vfe_rdi_reg_data  vfe_175_130_rdi_0_data = {
 	.reg_update_cmd_data      = 0x2,
 	.sof_irq_mask             = 0x8000000,
@@ -230,8 +237,9 @@
 		.reg_data       = &vfe175_130_camif_lite_reg_data,
 		},
 	.rdi_hw_info = {
-		.common_reg = &vfe175_130_top_common_reg,
-		.rdi_reg    = &vfe175_130_rdi_reg,
+		.common_reg      = &vfe175_130_top_common_reg,
+		.rdi_reg         = &vfe175_130_rdi_reg,
+		.common_reg_data = &vfe175_130_rdi_reg_data,
 		.reg_data = {
 			&vfe_175_130_rdi_0_data,
 			&vfe_175_130_rdi_1_data,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
index 3f86f61..e855a54 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
@@ -57,10 +57,11 @@
 
 static struct cam_vfe_camif_ver3_reg_data vfe_480_camif_reg_data = {
 	.pp_extern_reg_update_shift      = 4,
-	.lcr_extern_reg_update_shift     = 16,
 	.dual_pd_extern_reg_update_shift = 17,
 	.extern_reg_update_mask          = 1,
 	.dual_ife_pix_en_shift           = 3,
+	.operating_mode_shift            = 11,
+	.input_mux_sel_shift             = 5,
 	.pixel_pattern_shift             = 24,
 	.pixel_pattern_mask              = 0x7,
 	.dsp_mode_shift                  = 24,
@@ -72,10 +73,10 @@
 	.sof_irq_mask                    = 0x00000001,
 	.epoch0_irq_mask                 = 0x00000004,
 	.epoch1_irq_mask                 = 0x00000008,
-	.reg_update_irq_mask             = 0x00000001,
 	.eof_irq_mask                    = 0x00000002,
-	.error_irq_mask0                 = 0x0003FC00,
-	.error_irq_mask2                 = 0xEFFF7E80,
+	.error_irq_mask0                 = 0x82000200,
+	.error_irq_mask2                 = 0x30301F80,
+	.subscribe_irq_mask1             = 0x00000007,
 	.enable_diagnostic_hw            = 0x1,
 	.pp_camif_cfg_en_shift           = 0,
 	.pp_camif_cfg_ife_out_en_shift   = 8,
@@ -104,6 +105,7 @@
 	.diag_config              = 0x00000064,
 	.diag_sensor_status_0     = 0x00000068,
 	.diag_sensor_status_1     = 0x00000098,
+	.bus_overflow_status      = 0x0000AA68,
 };
 
 static struct cam_vfe_camif_lite_ver3_reg vfe480_camif_rdi[3] = {
@@ -152,37 +154,40 @@
 	{
 		.extern_reg_update_shift         = 0,
 		.reg_update_cmd_data             = 0x2,
-		.epoch_line_cfg                  = 0x00140014,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x10,
 		.epoch0_irq_mask                 = 0x40,
 		.epoch1_irq_mask                 = 0x80,
 		.eof_irq_mask                    = 0x20,
 		.error_irq_mask0                 = 0x20000000,
 		.error_irq_mask2                 = 0x20000,
+		.subscribe_irq_mask1             = 0x30,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
 		.reg_update_cmd_data             = 0x4,
-		.epoch_line_cfg                  = 0x00140014,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x100,
 		.epoch0_irq_mask                 = 0x400,
 		.epoch1_irq_mask                 = 0x800,
 		.eof_irq_mask                    = 0x200,
 		.error_irq_mask0                 = 0x10000000,
 		.error_irq_mask2                 = 0x40000,
+		.subscribe_irq_mask1             = 0x300,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
 		.reg_update_cmd_data             = 0x8,
-		.epoch_line_cfg                  = 0x00140014,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x1000,
 		.epoch0_irq_mask                 = 0x4000,
 		.epoch1_irq_mask                 = 0x8000,
 		.eof_irq_mask                    = 0x2000,
 		.error_irq_mask0                 = 0x8000000,
 		.error_irq_mask2                 = 0x80000,
+		.subscribe_irq_mask1             = 0x3000,
 		.enable_diagnostic_hw            = 0x1,
 	},
 };
@@ -198,6 +203,7 @@
 	.lite_debug_0               = 0xA1F4,
 	.lite_test_bus_ctrl         = 0xA1F8,
 	.camif_lite_spare           = 0xA1FC,
+	.reg_update_cmd             = 0x0034,
 };
 
 static struct cam_vfe_camif_lite_ver3_reg_data vfe480_camif_lcr_reg_data = {
@@ -208,7 +214,9 @@
 	.epoch0_irq_mask            = 0x400000,
 	.epoch1_irq_mask            = 0x800000,
 	.eof_irq_mask               = 0x200000,
-	.error_irq_mask0            = 0x18000,
+	.error_irq_mask0            = 0x0,
+	.error_irq_mask2            = 0x18000,
+	.subscribe_irq_mask1        = 0x300000,
 	.enable_diagnostic_hw       = 0x1,
 };
 
@@ -223,17 +231,22 @@
 	.lite_debug_0               = 0xA5F4,
 	.lite_test_bus_ctrl         = 0xA5F8,
 	.camif_lite_spare           = 0xA5FC,
+	.reg_update_cmd             = 0x0034,
 };
 
 static struct cam_vfe_camif_lite_ver3_reg_data vfe480_camif_pd_reg_data = {
 	.extern_reg_update_shift    = 17,
+	.operating_mode_shift       = 13,
+	.input_mux_sel_shift        = 31,
 	.reg_update_cmd_data        = 0x20,
 	.epoch_line_cfg             = 0x00140014,
 	.sof_irq_mask               = 0x10000,
 	.epoch0_irq_mask            = 0x40000,
 	.epoch1_irq_mask            = 0x80000,
 	.eof_irq_mask               = 0x20000,
-	.error_irq_mask0            = 0x6000,
+	.error_irq_mask0            = 0x40000000,
+	.error_irq_mask2            = 0x6000,
+	.subscribe_irq_mask1        = 0x30000,
 	.enable_diagnostic_hw       = 0x1,
 };
 
@@ -1143,127 +1156,145 @@
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI0,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_3,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI1,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_4,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI2,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_5,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_FULL,
 			.max_width     = 4096,
 			.max_height    = 4096,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS4,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS16,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_FD,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_PDAF,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  =
 				CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  =
 				CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP,
 			.max_width     = 4096,
 			.max_height    = 4096,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_2PD,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_1,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_LCR,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_2,
 		},
 	},
+	.comp_done_shift = 6,
+	.top_irq_shift   = 7,
 };
 
 static struct cam_irq_register_set vfe480_bus_rd_irq_reg[1] = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
index beb93c3..c9d66ed 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
@@ -47,6 +47,7 @@
 	.reg_update_cmd           = 0x00000020,
 	.diag_config              = 0x00000050,
 	.diag_sensor_status_0     = 0x00000054,
+	.bus_overflow_status      = 0x00001A68,
 };
 
 static struct cam_vfe_camif_lite_ver3_reg vfe48x_camif_rdi[4] = {
@@ -107,50 +108,54 @@
 static struct cam_vfe_camif_lite_ver3_reg_data vfe48x_camif_rdi_reg_data[4] = {
 	{
 		.extern_reg_update_shift         = 0,
-		.reg_update_cmd_data             = 0x11,
-		.epoch_line_cfg                  = 0x00140014,
+		.reg_update_cmd_data             = 0x1,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x1,
 		.epoch0_irq_mask                 = 0x4,
 		.epoch1_irq_mask                 = 0x8,
 		.eof_irq_mask                    = 0x02,
 		.error_irq_mask0                 = 0x1,
 		.error_irq_mask2                 = 0x100,
+		.subscribe_irq_mask1             = 0x3,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
-		.reg_update_cmd_data             = 0x22,
-		.epoch_line_cfg                  = 0x00140014,
+		.reg_update_cmd_data             = 0x2,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x10,
 		.epoch0_irq_mask                 = 0x40,
 		.epoch1_irq_mask                 = 0x80,
 		.eof_irq_mask                    = 0x20,
 		.error_irq_mask0                 = 0x2,
 		.error_irq_mask2                 = 0x200,
+		.subscribe_irq_mask1             = 0x30,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
-		.reg_update_cmd_data             = 0x44,
-		.epoch_line_cfg                  = 0x00140014,
+		.reg_update_cmd_data             = 0x4,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x100,
 		.epoch0_irq_mask                 = 0x400,
 		.epoch1_irq_mask                 = 0x800,
 		.eof_irq_mask                    = 0x200,
 		.error_irq_mask0                 = 0x4,
 		.error_irq_mask2                 = 0x400,
+		.subscribe_irq_mask1             = 0x300,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
-		.reg_update_cmd_data             = 0x88,
-		.epoch_line_cfg                  = 0x00140014,
+		.reg_update_cmd_data             = 0x8,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x1000,
 		.epoch0_irq_mask                 = 0x4000,
 		.epoch1_irq_mask                 = 0x8000,
 		.eof_irq_mask                    = 0x2000,
 		.error_irq_mask0                 = 0x8,
 		.error_irq_mask2                 = 0x800,
+		.subscribe_irq_mask1             = 0x3000,
 		.enable_diagnostic_hw            = 0x1,
 	},
 };
@@ -356,23 +361,29 @@
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI0,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI1,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_1,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI2,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_2,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI3,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_3,
 		},
 	},
+	.comp_done_shift = 4,
+	.top_irq_shift   = 4,
 };
 
 static struct cam_vfe_hw_info cam_vfe_lite48x_hw_info = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c
index 2a94d69..d508113 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c
@@ -82,7 +82,6 @@
 	struct cam_vfe_bus_rd_ver1_reg_offset_bus_client  *hw_regs;
 	void                *ctx;
 
-	uint32_t             irq_enabled;
 	bool                 init_cfg_done;
 	bool                 hfr_cfg_done;
 
@@ -138,8 +137,8 @@
 	struct cam_isp_resource_node  vfe_bus_rd[
 		CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX];
 
-	uint32_t                            irq_handle;
-	uint32_t                            error_irq_handle;
+	int                                 irq_handle;
+	int                                 error_irq_handle;
 };
 
 static int cam_vfe_bus_process_cmd(
@@ -254,7 +253,6 @@
 	rm_res_local->tasklet_info = tasklet;
 
 	rsrc_data = rm_res_local->res_priv;
-	rsrc_data->irq_enabled = subscribe_irq;
 	rsrc_data->ctx = ctx;
 	rsrc_data->is_dual = is_dual;
 	/* Set RM offset value to default */
@@ -273,7 +271,6 @@
 	struct cam_vfe_bus_rd_ver1_rm_resource_data *rsrc_data =
 		rm_res->res_priv;
 
-	rsrc_data->irq_enabled = 0;
 	rsrc_data->offset = 0;
 	rsrc_data->width = 0;
 	rsrc_data->height = 0;
@@ -507,7 +504,7 @@
 		rc = cam_vfe_bus_acquire_rm(ver1_bus_rd_priv,
 			bus_rd_acquire_args->out_port_info,
 			acq_args->tasklet,
-			bus_rd_acquire_args->ctx,
+			acq_args->priv,
 			bus_rd_res_id,
 			i,
 			subscribe_irq,
@@ -943,7 +940,7 @@
 	void *init_hw_args, uint32_t arg_size)
 {
 	struct cam_vfe_bus_rd_ver1_priv    *bus_priv = hw_priv;
-	uint32_t                            top_irq_reg_mask[2] = {0};
+	uint32_t                            top_irq_reg_mask[3] = {0};
 	uint32_t                            offset = 0, val = 0;
 	struct cam_vfe_bus_rd_ver1_reg_offset_common  *common_reg;
 
@@ -964,8 +961,9 @@
 		NULL,
 		NULL);
 
-	if (bus_priv->irq_handle <= 0) {
+	if (bus_priv->irq_handle < 1) {
 		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		bus_priv->irq_handle = 0;
 		return -EFAULT;
 	}
 	/* no clock gating at bus input */
@@ -1003,10 +1001,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.bus_irq_controller,
 			bus_priv->error_irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe error irq rc=%d", rc);
-
 		bus_priv->error_irq_handle = 0;
 	}
 
@@ -1014,10 +1008,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.vfe_irq_controller,
 			bus_priv->irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe irq rc=%d", rc);
-
 		bus_priv->irq_handle = 0;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 8df2926..02d3ad3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -98,15 +98,14 @@
 	uint32_t                                    secure_mode;
 	uint32_t                                    num_sec_out;
 	uint32_t                                    addr_no_sync;
+	cam_hw_mgr_event_cb_func                    event_cb;
 };
 
 struct cam_vfe_bus_ver2_wm_resource_data {
 	uint32_t             index;
 	struct cam_vfe_bus_ver2_common_data            *common_data;
 	struct cam_vfe_bus_ver2_reg_offset_bus_client  *hw_regs;
-	void                                *ctx;
 
-	uint32_t             irq_enabled;
 	bool                 init_cfg_done;
 	bool                 hfr_cfg_done;
 
@@ -147,7 +146,6 @@
 	struct cam_vfe_bus_ver2_common_data         *common_data;
 	struct cam_vfe_bus_ver2_reg_offset_comp_grp *hw_regs;
 
-	uint32_t                         irq_enabled;
 	uint32_t                         comp_grp_local_idx;
 	uint32_t                         unique_id;
 
@@ -158,9 +156,6 @@
 	uint32_t                         addr_sync_mode;
 
 	uint32_t                         acquire_dev_cnt;
-	uint32_t                         irq_trigger_cnt;
-
-	void                            *ctx;
 };
 
 struct cam_vfe_bus_ver2_vfe_out_data {
@@ -180,6 +175,7 @@
 	uint32_t                         max_height;
 	struct cam_cdm_utils_ops        *cdm_util_ops;
 	uint32_t                         secure_mode;
+	void                            *priv;
 };
 
 struct cam_vfe_bus_ver2_priv {
@@ -195,8 +191,8 @@
 	struct list_head                    free_dual_comp_grp;
 	struct list_head                    used_comp_grp;
 
-	uint32_t                            irq_handle;
-	uint32_t                            error_irq_handle;
+	int                                 irq_handle;
+	int                                 error_irq_handle;
 	void                               *tasklet_info;
 };
 
@@ -253,8 +249,6 @@
 	struct cam_vfe_bus_irq_evt_payload     **evt_payload)
 {
 	struct cam_vfe_bus_ver2_common_data *common_data = NULL;
-	uint32_t  *ife_irq_regs = NULL;
-	uint32_t   status_reg0, status_reg1, status_reg2;
 	unsigned long flags;
 
 	if (!core_info) {
@@ -265,17 +259,6 @@
 		CAM_ERR(CAM_ISP, "No payload to put");
 		return -EINVAL;
 	}
-	(*evt_payload)->error_type = 0;
-	ife_irq_regs = (*evt_payload)->irq_reg_val;
-	status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
-	status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
-	status_reg2 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
-
-	if (status_reg0 || status_reg1 || status_reg2) {
-		CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x status2 0x%x",
-			status_reg0, status_reg1, status_reg2);
-		return 0;
-	}
 
 	common_data = core_info;
 
@@ -814,6 +797,78 @@
 	return wm_idx;
 }
 
+static void cam_vfe_bus_get_comp_vfe_out_res_id_list(
+	uint32_t comp_mask, uint32_t *out_list, int *num_out)
+{
+	int count = 0;
+
+	if (comp_mask & 0x1)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_0;
+
+	if (comp_mask & 0x2)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_1;
+
+	if (comp_mask & 0x4)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_2;
+
+	if ((comp_mask & 0x8) && (((comp_mask >> 4) & 0x1) == 0))
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_3;
+
+	if (comp_mask & 0x18)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FULL;
+
+	if (comp_mask & 0x20)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS4;
+
+	if (comp_mask & 0x40)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS16;
+
+	if (comp_mask & 0x180)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FD;
+
+	if (comp_mask & 0x200)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RAW_DUMP;
+
+	if (comp_mask & 0x800)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_HDR_BE;
+
+	if (comp_mask & 0x1000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST;
+
+	if (comp_mask & 0x2000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_TL_BG;
+
+	if (comp_mask & 0x4000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_BF;
+
+	if (comp_mask & 0x8000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_AWB_BG;
+
+	if (comp_mask & 0x10000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_BHIST;
+
+	if (comp_mask & 0x20000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_RS;
+
+	if (comp_mask & 0x40000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_CS;
+
+	if (comp_mask & 0x80000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_IHIST;
+
+	if (comp_mask & 0x300000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FULL_DISP;
+
+	if (comp_mask & 0x400000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS4_DISP;
+
+	if (comp_mask & 0x800000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS16_DISP;
+
+	*num_out = count;
+
+}
+
 static enum cam_vfe_bus_packer_format
 	cam_vfe_bus_get_packer_fmt(uint32_t out_fmt, int wm_index)
 {
@@ -866,10 +921,8 @@
 	struct cam_vfe_bus_ver2_priv          *ver2_bus_priv,
 	struct cam_isp_out_port_info          *out_port_info,
 	void                                  *tasklet,
-	void                                  *ctx,
 	enum cam_vfe_bus_ver2_vfe_out_type     vfe_out_res_id,
 	enum cam_vfe_bus_plane_type            plane,
-	uint32_t                               subscribe_irq,
 	struct cam_isp_resource_node         **wm_res,
 	uint32_t                              *client_done_mask,
 	uint32_t                               is_dual)
@@ -899,8 +952,6 @@
 	wm_res_local->tasklet_info = tasklet;
 
 	rsrc_data = wm_res_local->res_priv;
-	rsrc_data->irq_enabled = subscribe_irq;
-	rsrc_data->ctx = ctx;
 	rsrc_data->format = out_port_info->format;
 	rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format,
 		wm_idx);
@@ -1096,7 +1147,6 @@
 	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
 		wm_res->res_priv;
 
-	rsrc_data->irq_enabled = 0;
 	rsrc_data->offset = 0;
 	rsrc_data->width = 0;
 	rsrc_data->height = 0;
@@ -1131,14 +1181,15 @@
 	return 0;
 }
 
-static int cam_vfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
+static int cam_vfe_bus_start_wm(
+	struct cam_isp_resource_node *wm_res,
+	uint32_t                     *bus_irq_reg_mask)
 {
 	int rc = 0, val = 0;
 	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
 		wm_res->res_priv;
 	struct cam_vfe_bus_ver2_common_data        *common_data =
 		rsrc_data->common_data;
-	uint32_t                   bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
 	uint32_t camera_hw_version;
 
 	cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
@@ -1155,23 +1206,7 @@
 		cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
 			rsrc_data->hw_regs->stride));
 
-	/* Subscribe IRQ */
-	if (rsrc_data->irq_enabled) {
-		CAM_DBG(CAM_ISP, "Subscribe WM%d IRQ", rsrc_data->index);
-		bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] =
-			(1 << rsrc_data->index);
-		wm_res->irq_handle = cam_irq_controller_subscribe_irq(
-			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
-			bus_irq_reg_mask, wm_res,
-			wm_res->top_half_handler,
-			cam_ife_mgr_do_tasklet_buf_done,
-			wm_res->tasklet_info, &tasklet_bh_api);
-		if (wm_res->irq_handle < 0) {
-			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for WM %d",
-				rsrc_data->index);
-			return -EFAULT;
-		}
-	}
+	bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] = (1 << rsrc_data->index);
 
 	/* enable ubwc if needed*/
 	if (rsrc_data->en_ubwc) {
@@ -1249,13 +1284,6 @@
 		common_data->mem_base + rsrc_data->hw_regs->cfg);
 
 	/* Disable all register access, reply on global reset */
-	CAM_DBG(CAM_ISP, "WM res %d irq_enabled %d",
-		rsrc_data->index, rsrc_data->irq_enabled);
-	/* Unsubscribe IRQ */
-	if (rsrc_data->irq_enabled)
-		rc = cam_irq_controller_unsubscribe_irq(
-			common_data->bus_irq_controller,
-			wm_res->irq_handle);
 
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	rsrc_data->init_cfg_done = false;
@@ -1267,63 +1295,36 @@
 static int cam_vfe_bus_handle_wm_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	int32_t                                     rc;
-	int                                         i;
-	struct cam_isp_resource_node               *wm_res = NULL;
-	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data = NULL;
-	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
-
-	wm_res = th_payload->handler_priv;
-	if (!wm_res) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error: No resource");
-		return -ENODEV;
-	}
-
-	rsrc_data = wm_res->res_priv;
-
-	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
-	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
-
-	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1],
-			th_payload->evt_status_arr[2]);
-
-		return rc;
-	}
-
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->ctx = rsrc_data->ctx;
-	evt_payload->core_index = rsrc_data->common_data->core_index;
-	evt_payload->evt_id  = evt_id;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	CAM_DBG(CAM_ISP, "Exit");
-	return rc;
+	return -EPERM;
 }
 
-static int cam_vfe_bus_handle_wm_done_bottom_half(void *wm_node,
+static int cam_vfe_bus_handle_wm_done_bottom_half(void *handler_priv,
 	void *evt_payload_priv)
 {
 	int rc = CAM_VFE_IRQ_STATUS_ERR;
-	struct cam_isp_resource_node          *wm_res = wm_node;
+	struct cam_isp_resource_node          *wm_res = handler_priv;
 	struct cam_vfe_bus_irq_evt_payload    *evt_payload = evt_payload_priv;
 	struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data =
 		(wm_res == NULL) ? NULL : wm_res->res_priv;
 	uint32_t  *cam_ife_irq_regs;
 	uint32_t   status_reg;
 
-	if (!evt_payload || !rsrc_data)
+	if (!evt_payload || !wm_res || !rsrc_data)
 		return rc;
 
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %llx core index:0x%x",
+		(uint64_t)evt_payload, evt_payload->core_index);
+	CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+	CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+	CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
+	CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]);
+	CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x",
+		evt_payload->irq_reg_val[4]);
+	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x",
+		evt_payload->irq_reg_val[5]);
+	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
+		evt_payload->irq_reg_val[6]);
+
 	cam_ife_irq_regs = evt_payload->irq_reg_val;
 	status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
 
@@ -1331,30 +1332,29 @@
 		cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1] &=
 			~BIT(rsrc_data->index);
 		rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+		evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
 	}
 	CAM_DBG(CAM_ISP, "status_reg %x rc %d wm_idx %d",
 		status_reg, rc, rsrc_data->index);
 
-	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
-		cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
-			&evt_payload);
-
 	return rc;
 }
 
 
-static int cam_vfe_bus_err_bottom_half(void *ctx_priv,
+static int cam_vfe_bus_err_bottom_half(void *handler_priv,
 	void *evt_payload_priv)
 {
-	struct cam_vfe_bus_irq_evt_payload *evt_payload;
+	struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv;
+	struct cam_vfe_bus_ver2_priv *bus_priv = handler_priv;
 	struct cam_vfe_bus_ver2_common_data *common_data;
+	struct cam_isp_hw_event_info evt_info;
 	uint32_t val = 0;
 
-	if (!ctx_priv || !evt_payload_priv)
+	if (!handler_priv || !evt_payload_priv)
 		return -EINVAL;
 
 	evt_payload = evt_payload_priv;
-	common_data = evt_payload->ctx;
+	common_data = &bus_priv->common_data;
 
 	val = evt_payload->debug_status_0;
 	CAM_ERR(CAM_ISP, "Bus Violation: debug_status_0 = 0x%x", val);
@@ -1432,6 +1432,15 @@
 		CAM_INFO(CAM_ISP, "DISP YC 16:1 violation");
 
 	cam_vfe_bus_put_evt_payload(common_data, &evt_payload);
+
+	evt_info.hw_idx = common_data->core_index;
+	evt_info.res_type = CAM_ISP_RESOURCE_VFE_OUT;
+	evt_info.res_id = CAM_VFE_BUS_VER2_VFE_OUT_MAX;
+
+	if (common_data->event_cb)
+		common_data->event_cb(NULL, CAM_ISP_HW_EVENT_ERROR,
+			(void *)&evt_info);
+
 	return 0;
 }
 
@@ -1457,8 +1466,8 @@
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 	INIT_LIST_HEAD(&wm_res->list);
 
-	wm_res->start = cam_vfe_bus_start_wm;
-	wm_res->stop = cam_vfe_bus_stop_wm;
+	wm_res->start = NULL;
+	wm_res->stop = NULL;
 	wm_res->top_half_handler = cam_vfe_bus_handle_wm_done_top_half;
 	wm_res->bottom_half_handler = cam_vfe_bus_handle_wm_done_bottom_half;
 	wm_res->hw_intf = ver2_bus_priv->common_data.hw_intf;
@@ -1525,7 +1534,6 @@
 	struct cam_vfe_bus_ver2_priv        *ver2_bus_priv,
 	struct cam_isp_out_port_info        *out_port_info,
 	void                                *tasklet,
-	void                                *ctx,
 	uint32_t                             unique_id,
 	uint32_t                             is_dual,
 	uint32_t                             is_master,
@@ -1607,7 +1615,6 @@
 
 	CAM_DBG(CAM_ISP, "Comp Grp type %u", rsrc_data->comp_grp_type);
 
-	rsrc_data->ctx = ctx;
 	rsrc_data->acquire_dev_cnt++;
 	*comp_grp = comp_grp_local;
 
@@ -1684,7 +1691,9 @@
 	return 0;
 }
 
-static int cam_vfe_bus_start_comp_grp(struct cam_isp_resource_node *comp_grp)
+static int cam_vfe_bus_start_comp_grp(
+	struct cam_isp_resource_node *comp_grp,
+	uint32_t                     *bus_irq_reg_mask)
 {
 	int rc = 0;
 	uint32_t addr_sync_cfg;
@@ -1692,7 +1701,6 @@
 		comp_grp->res_priv;
 	struct cam_vfe_bus_ver2_common_data        *common_data =
 		rsrc_data->common_data;
-	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
 
 	CAM_DBG(CAM_ISP, "comp group id:%d streaming state:%d",
 		rsrc_data->comp_grp_type, comp_grp->res_state);
@@ -1762,28 +1770,8 @@
 			(1 << (rsrc_data->comp_grp_type + 5));
 	}
 
-	/*
-	 * For Dual composite subscribe IRQ only for master
-	 * For regular composite, subscribe IRQ always
-	 */
-	CAM_DBG(CAM_ISP, "Subscribe COMP_GRP%d IRQ", rsrc_data->comp_grp_type);
-	if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
-		(rsrc_data->is_master)) ||
-		(rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
-		comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
-			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
-			bus_irq_reg_mask, comp_grp,
-			comp_grp->top_half_handler,
-			cam_ife_mgr_do_tasklet_buf_done,
-			comp_grp->tasklet_info, &tasklet_bh_api);
-		if (comp_grp->irq_handle < 0) {
-			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
-				rsrc_data->comp_grp_type);
-			return -EFAULT;
-		}
-	}
+	CAM_DBG(CAM_ISP, "VFE start COMP_GRP%d", rsrc_data->comp_grp_type);
+
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	return rc;
@@ -1792,21 +1780,7 @@
 static int cam_vfe_bus_stop_comp_grp(struct cam_isp_resource_node *comp_grp)
 {
 	int rc = 0;
-	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data =
-		comp_grp->res_priv;
-	struct cam_vfe_bus_ver2_common_data        *common_data =
-		rsrc_data->common_data;
 
-	/* Unsubscribe IRQ */
-	if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
-		(rsrc_data->is_master)) ||
-		(rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
-		rc = cam_irq_controller_unsubscribe_irq(
-			common_data->bus_irq_controller,
-			comp_grp->irq_handle);
-	}
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
 	return rc;
@@ -1815,55 +1789,11 @@
 static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	int32_t                                     rc;
-	int                                         i;
-	struct cam_isp_resource_node               *comp_grp = NULL;
-	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data = NULL;
-	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
-
-	comp_grp = th_payload->handler_priv;
-	if (!comp_grp) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
-		return -ENODEV;
-	}
-
-	rsrc_data = comp_grp->res_priv;
-
-	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
-	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
-	CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
-
-	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1],
-			th_payload->evt_status_arr[2]);
-
-		return rc;
-	}
-
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->ctx = rsrc_data->ctx;
-	evt_payload->core_index = rsrc_data->common_data->core_index;
-	evt_payload->evt_id  = evt_id;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	CAM_DBG(CAM_ISP, "Exit");
-	return rc;
+	return -EPERM;
 }
 
-static int cam_vfe_bus_handle_comp_done_bottom_half(
-	void                *handler_priv,
-	void                *evt_payload_priv)
+static int cam_vfe_bus_handle_comp_done_bottom_half(void *handler_priv,
+	void  *evt_payload_priv, uint32_t *comp_mask)
 {
 	int rc = CAM_VFE_IRQ_STATUS_ERR;
 	struct cam_isp_resource_node          *comp_grp = handler_priv;
@@ -1898,6 +1828,7 @@
 		if ((status_reg & BIT(11)) &&
 			(comp_err_reg & rsrc_data->composite_mask)) {
 			/* Check for Regular composite error */
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
 			rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
 			break;
 		}
@@ -1906,19 +1837,14 @@
 		/* Check for Regular composite Overwrite */
 		if ((status_reg & BIT(12)) &&
 			(comp_err_reg & rsrc_data->composite_mask)) {
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
 			rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
 			break;
 		}
 
 		/* Regular Composite SUCCESS */
 		if (status_reg & BIT(comp_grp_id + 5)) {
-			rsrc_data->irq_trigger_cnt++;
-			if (rsrc_data->irq_trigger_cnt ==
-				rsrc_data->acquire_dev_cnt) {
-				cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0] &=
-					~BIT(comp_grp_id + 5);
-				rsrc_data->irq_trigger_cnt = 0;
-			}
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
 			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
 
@@ -1942,6 +1868,7 @@
 		if ((status_reg & BIT(6)) &&
 			(comp_err_reg & rsrc_data->composite_mask)) {
 			/* Check for DUAL composite error */
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
 			rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
 			break;
 		}
@@ -1950,19 +1877,14 @@
 		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT];
 		if ((status_reg & BIT(7)) &&
 			(comp_err_reg & rsrc_data->composite_mask)) {
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
 			rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
 			break;
 		}
 
 		/* DUAL Composite SUCCESS */
 		if (status_reg & BIT(comp_grp_id)) {
-			rsrc_data->irq_trigger_cnt++;
-			if (rsrc_data->irq_trigger_cnt ==
-				rsrc_data->acquire_dev_cnt) {
-				cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2] &=
-					~BIT(comp_grp_id);
-				rsrc_data->irq_trigger_cnt = 0;
-			}
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
 			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
 
@@ -1974,9 +1896,7 @@
 		break;
 	}
 
-	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
-		cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
-			&evt_payload);
+	*comp_mask = rsrc_data->composite_mask;
 
 	return rc;
 }
@@ -2011,11 +1931,10 @@
 		&& rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)
 		list_add_tail(&comp_grp->list, &ver2_bus_priv->free_comp_grp);
 
-	comp_grp->start = cam_vfe_bus_start_comp_grp;
-	comp_grp->stop = cam_vfe_bus_stop_comp_grp;
+	comp_grp->start = NULL;
+	comp_grp->stop = NULL;
 	comp_grp->top_half_handler = cam_vfe_bus_handle_comp_done_top_half;
-	comp_grp->bottom_half_handler =
-		cam_vfe_bus_handle_comp_done_bottom_half;
+	comp_grp->bottom_half_handler = NULL;
 	comp_grp->hw_intf = ver2_bus_priv->common_data.hw_intf;
 
 	return 0;
@@ -2071,7 +1990,6 @@
 	enum cam_vfe_bus_ver2_vfe_out_type      vfe_out_res_id;
 	uint32_t                                format;
 	int                                     num_wm;
-	uint32_t                                subscribe_irq;
 	uint32_t                                client_done_mask;
 	struct cam_vfe_bus_ver2_priv           *ver2_bus_priv = bus_priv;
 	struct cam_vfe_acquire_args            *acq_args = acquire_args;
@@ -2108,8 +2026,10 @@
 	}
 
 	rsrc_data = rsrc_node->res_priv;
-	secure_caps = cam_vfe_bus_can_be_secure(
-		rsrc_data->out_type);
+	rsrc_data->common_data->event_cb = acq_args->event_cb;
+	rsrc_data->priv = acq_args->priv;
+
+	secure_caps = cam_vfe_bus_can_be_secure(rsrc_data->out_type);
 	mode = out_acquire_args->out_port_info->secure_mode;
 	mutex_lock(&rsrc_data->common_data->bus_mutex);
 	if (secure_caps) {
@@ -2152,7 +2072,6 @@
 		rc = cam_vfe_bus_acquire_comp_grp(ver2_bus_priv,
 			out_acquire_args->out_port_info,
 			acq_args->tasklet,
-			out_acquire_args->ctx,
 			out_acquire_args->unique_id,
 			out_acquire_args->is_dual,
 			out_acquire_args->is_master,
@@ -2165,10 +2084,6 @@
 				vfe_out_res_id, rc);
 			return rc;
 		}
-
-		subscribe_irq = 0;
-	} else {
-		subscribe_irq = 1;
 	}
 
 	/* Reserve WM */
@@ -2176,10 +2091,8 @@
 		rc = cam_vfe_bus_acquire_wm(ver2_bus_priv,
 			out_acquire_args->out_port_info,
 			acq_args->tasklet,
-			out_acquire_args->ctx,
 			vfe_out_res_id,
 			i,
-			subscribe_irq,
 			&rsrc_data->wm_res[i],
 			&client_done_mask,
 			out_acquire_args->is_dual);
@@ -2284,6 +2197,7 @@
 	int rc = 0, i;
 	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
 	struct cam_vfe_bus_ver2_common_data   *common_data = NULL;
+	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX];
 
 	if (!vfe_out) {
 		CAM_ERR(CAM_ISP, "Invalid input");
@@ -2301,11 +2215,29 @@
 		return -EACCES;
 	}
 
+	memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask));
 	for (i = 0; i < rsrc_data->num_wm; i++)
-		rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i]);
+		rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i],
+			bus_irq_reg_mask);
 
-	if (rsrc_data->comp_grp)
-		rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp);
+	if (rsrc_data->comp_grp) {
+		memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask));
+		rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp,
+			bus_irq_reg_mask);
+	}
+
+	vfe_out->irq_handle = cam_irq_controller_subscribe_irq(
+		common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+		bus_irq_reg_mask, vfe_out, vfe_out->top_half_handler,
+		vfe_out->bottom_half_handler, vfe_out->tasklet_info,
+		&tasklet_bh_api);
+
+	if (vfe_out->irq_handle < 1) {
+		CAM_ERR(CAM_ISP, "Subscribe IRQ failed for res_id %d",
+			vfe_out->res_id);
+		vfe_out->irq_handle = 0;
+		return -EFAULT;
+	}
 
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 	return rc;
@@ -2316,6 +2248,7 @@
 {
 	int rc = 0, i;
 	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
+	struct cam_vfe_bus_ver2_common_data   *common_data = NULL;
 
 	if (!vfe_out) {
 		CAM_ERR(CAM_ISP, "Invalid input");
@@ -2323,6 +2256,7 @@
 	}
 
 	rsrc_data = vfe_out->res_priv;
+	common_data = rsrc_data->common_data;
 
 	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
 		vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
@@ -2336,6 +2270,13 @@
 	for (i = 0; i < rsrc_data->num_wm; i++)
 		rc = cam_vfe_bus_stop_wm(rsrc_data->wm_res[i]);
 
+	if (vfe_out->irq_handle) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			vfe_out->irq_handle);
+		vfe_out->irq_handle = 0;
+	}
+
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	return rc;
 }
@@ -2343,16 +2284,66 @@
 static int cam_vfe_bus_handle_vfe_out_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                     rc;
+	int                                         i;
+	struct cam_isp_resource_node               *vfe_out = NULL;
+	struct cam_vfe_bus_ver2_vfe_out_data       *rsrc_data = NULL;
+	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
+
+	vfe_out = th_payload->handler_priv;
+	if (!vfe_out) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
+		return -ENODEV;
+	}
+
+	rsrc_data = vfe_out->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
+
+	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->core_index = rsrc_data->common_data->core_index;
+	evt_payload->evt_id  = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_bus_handle_vfe_out_done_bottom_half(
-	void                *handler_priv,
-	void                *evt_payload_priv)
+	void                                     *handler_priv,
+	void                                     *evt_payload_priv)
 {
 	int rc = -EINVAL;
-	struct cam_isp_resource_node         *vfe_out = handler_priv;
-	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+	struct cam_isp_resource_node             *vfe_out = handler_priv;
+	struct cam_vfe_bus_ver2_vfe_out_data     *rsrc_data = vfe_out->res_priv;
+	struct cam_isp_hw_event_info              evt_info;
+	void                                     *ctx = NULL;
+	uint32_t                                  evt_id = 0;
+	uint32_t                                  comp_mask = 0;
+	int                                       num_out = 0, i = 0;
+	struct cam_vfe_bus_irq_evt_payload       *evt_payload =
+		evt_payload_priv;
+	uint32_t                   out_list[CAM_VFE_BUS_VER2_VFE_OUT_MAX] = {0};
 
 	/*
 	 * If this resource has Composite Group then we only handle
@@ -2360,13 +2351,42 @@
 	 * So Else case is only one individual buf_done = WM[0].
 	 */
 	if (rsrc_data->comp_grp) {
-		rc = rsrc_data->comp_grp->bottom_half_handler(
-			rsrc_data->comp_grp, evt_payload_priv);
+		rc = cam_vfe_bus_handle_comp_done_bottom_half(
+			rsrc_data->comp_grp, evt_payload_priv, &comp_mask);
 	} else {
 		rc = rsrc_data->wm_res[0]->bottom_half_handler(
 			rsrc_data->wm_res[0], evt_payload_priv);
 	}
 
+	ctx = rsrc_data->priv;
+
+	switch (rc) {
+	case CAM_VFE_IRQ_STATUS_SUCCESS:
+		evt_id = evt_payload->evt_id;
+
+		evt_info.res_type = vfe_out->res_type;
+		evt_info.hw_idx   = vfe_out->hw_intf->hw_idx;
+		if (rsrc_data->comp_grp) {
+			cam_vfe_bus_get_comp_vfe_out_res_id_list(
+				comp_mask, out_list, &num_out);
+			for (i = 0; i < num_out; i++) {
+				evt_info.res_id = out_list[i];
+				if (rsrc_data->common_data->event_cb)
+					rsrc_data->common_data->event_cb(ctx,
+						evt_id, (void *)&evt_info);
+			}
+		} else {
+			evt_info.res_id = vfe_out->res_id;
+			if (rsrc_data->common_data->event_cb)
+				rsrc_data->common_data->event_cb(ctx, evt_id,
+					(void *)&evt_info);
+		}
+		break;
+	default:
+		break;
+	}
+
+	cam_vfe_bus_put_evt_payload(rsrc_data->common_data, &evt_payload);
 	CAM_DBG(CAM_ISP, "vfe_out %d rc %d", rsrc_data->out_type, rc);
 
 	return rc;
@@ -2426,6 +2446,7 @@
 		cam_vfe_bus_handle_vfe_out_done_bottom_half;
 	vfe_out->process_cmd = cam_vfe_bus_process_cmd;
 	vfe_out->hw_intf = ver2_bus_priv->common_data.hw_intf;
+	vfe_out->irq_handle = 0;
 
 	return 0;
 }
@@ -2449,6 +2470,7 @@
 	vfe_out->top_half_handler = NULL;
 	vfe_out->bottom_half_handler = NULL;
 	vfe_out->hw_intf = NULL;
+	vfe_out->irq_handle = 0;
 
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
 	INIT_LIST_HEAD(&vfe_out->list);
@@ -2502,7 +2524,6 @@
 
 	evt_payload->core_index = bus_priv->common_data.core_index;
 	evt_payload->evt_id  = evt_id;
-	evt_payload->ctx = &bus_priv->common_data;
 	evt_payload->debug_status_0 = cam_io_r_mb(
 		bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->debug_status_0);
@@ -3381,8 +3402,9 @@
 		NULL,
 		NULL);
 
-	if ((int)bus_priv->irq_handle <= 0) {
+	if (bus_priv->irq_handle < 1) {
 		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		bus_priv->irq_handle = 0;
 		return -EFAULT;
 	}
 
@@ -3397,9 +3419,10 @@
 			bus_priv->tasklet_info,
 			&tasklet_bh_api);
 
-		if ((int)bus_priv->error_irq_handle <= 0) {
+		if (bus_priv->error_irq_handle < 1) {
 			CAM_ERR(CAM_ISP, "Failed to subscribe BUS error IRQ %d",
 				bus_priv->error_irq_handle);
+			bus_priv->error_irq_handle = 0;
 			return -EFAULT;
 		}
 	}
@@ -3441,10 +3464,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.bus_irq_controller,
 			bus_priv->error_irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe error irq rc=%d", rc);
-
 		bus_priv->error_irq_handle = 0;
 	}
 
@@ -3452,10 +3471,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.vfe_irq_controller,
 			bus_priv->irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe irq rc=%d", rc);
-
 		bus_priv->irq_handle = 0;
 	}
 
@@ -3507,11 +3522,6 @@
 			rc = cam_irq_controller_unsubscribe_irq(
 				bus_priv->common_data.bus_irq_controller,
 				bus_priv->error_irq_handle);
-			if (rc)
-				CAM_ERR(CAM_ISP,
-					"Failed to unsubscribe error irq rc=%d",
-					rc);
-
 			bus_priv->error_irq_handle = 0;
 		}
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
index 3358c1e..6d5a514 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
@@ -45,11 +45,6 @@
 	0x00000000,
 };
 
-static uint32_t rup_irq_mask[2] = {
-	0x0000003F,
-	0x00000000,
-};
-
 enum cam_vfe_bus_ver3_packer_format {
 	PACKER_FMT_VER3_PLAIN_128,
 	PACKER_FMT_VER3_PLAIN_8,
@@ -84,14 +79,16 @@
 	uint32_t                                    secure_mode;
 	uint32_t                                    num_sec_out;
 	uint32_t                                    addr_no_sync;
+	uint32_t                                    comp_done_shift;
 	bool                                        is_lite;
+	cam_hw_mgr_event_cb_func                    event_cb;
+	int                        rup_irq_handle[CAM_VFE_BUS_VER3_SRC_GRP_MAX];
 };
 
 struct cam_vfe_bus_ver3_wm_resource_data {
 	uint32_t             index;
 	struct cam_vfe_bus_ver3_common_data            *common_data;
 	struct cam_vfe_bus_ver3_reg_offset_bus_client  *hw_regs;
-	void                                           *ctx;
 
 	bool                 init_cfg_done;
 	bool                 hfr_cfg_done;
@@ -132,25 +129,22 @@
 struct cam_vfe_bus_ver3_comp_grp_data {
 	enum cam_vfe_bus_ver3_comp_grp_type          comp_grp_type;
 	struct cam_vfe_bus_ver3_common_data         *common_data;
-	struct cam_vfe_bus_ver3_reg_offset_comp_grp *hw_regs;
-
-	uint32_t                                     irq_enabled;
 
 	uint32_t                                     is_master;
 	uint32_t                                     is_dual;
 	uint32_t                                     dual_slave_core;
 	uint32_t                                     intra_client_mask;
 	uint32_t                                     addr_sync_mode;
+	uint32_t                                     composite_mask;
 
 	uint32_t                                     acquire_dev_cnt;
 	uint32_t                                     irq_trigger_cnt;
 	uint32_t                                     ubwc_static_ctrl;
-
-	void                                        *ctx;
 };
 
 struct cam_vfe_bus_ver3_vfe_out_data {
 	uint32_t                              out_type;
+	uint32_t                              source_group;
 	struct cam_vfe_bus_ver3_common_data  *common_data;
 
 	uint32_t                         num_wm;
@@ -161,17 +155,22 @@
 	uint32_t                         dual_hw_alternate_vfe_id;
 	struct list_head                 vfe_out_list;
 
+	uint32_t                         is_master;
+	uint32_t                         is_dual;
+
 	uint32_t                         format;
 	uint32_t                         max_width;
 	uint32_t                         max_height;
 	struct cam_cdm_utils_ops        *cdm_util_ops;
 	uint32_t                         secure_mode;
+	void                            *priv;
 };
 
 struct cam_vfe_bus_ver3_priv {
 	struct cam_vfe_bus_ver3_common_data common_data;
 	uint32_t                            num_client;
 	uint32_t                            num_out;
+	uint32_t                            top_irq_shift;
 
 	struct cam_isp_resource_node  bus_client[CAM_VFE_BUS_VER3_MAX_CLIENTS];
 	struct cam_isp_resource_node  comp_grp[CAM_VFE_BUS_VER3_COMP_GRP_MAX];
@@ -180,9 +179,8 @@
 	struct list_head                    free_comp_grp;
 	struct list_head                    used_comp_grp;
 
-	uint32_t                            irq_handle;
-	uint32_t                            error_irq_handle;
-	uint32_t                            rup_irq_handle;
+	int                                 irq_handle;
+	int                                 error_irq_handle;
 	void                               *tasklet_info;
 };
 
@@ -217,8 +215,6 @@
 	struct cam_vfe_bus_irq_evt_payload     **evt_payload)
 {
 	struct cam_vfe_bus_ver3_common_data *common_data = NULL;
-	uint32_t  *ife_irq_regs = NULL;
-	uint32_t   status_reg0, status_reg1;
 	unsigned long flags;
 
 	if (!core_info) {
@@ -229,16 +225,6 @@
 		CAM_ERR(CAM_ISP, "No payload to put");
 		return -EINVAL;
 	}
-	(*evt_payload)->error_type = 0;
-	ife_irq_regs = (*evt_payload)->irq_reg_val;
-	status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
-	status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS1];
-
-	if (status_reg0 || status_reg1) {
-		CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x",
-			status_reg0, status_reg1);
-		return 0;
-	}
 
 	common_data = core_info;
 
@@ -320,7 +306,6 @@
 	case CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP:
 		return true;
 
-	case CAM_VFE_BUS_VER3_VFE_OUT_PDAF:
 	case CAM_VFE_BUS_VER3_VFE_OUT_2PD:
 	case CAM_VFE_BUS_VER3_VFE_OUT_LCR:
 	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE:
@@ -351,8 +336,6 @@
 		return CAM_VFE_BUS_VER3_VFE_OUT_FD;
 	case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
 		return CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP;
-	case CAM_ISP_IFE_OUT_RES_PDAF:
-		return CAM_VFE_BUS_VER3_VFE_OUT_PDAF;
 	case CAM_ISP_IFE_OUT_RES_2PD:
 		return CAM_VFE_BUS_VER3_VFE_OUT_2PD;
 	case CAM_ISP_IFE_OUT_RES_RDI_0:
@@ -484,17 +467,6 @@
 			break;
 		}
 		break;
-	case CAM_VFE_BUS_VER3_VFE_OUT_PDAF:
-		switch (format) {
-		case CAM_FORMAT_PLAIN8:
-		case CAM_FORMAT_PLAIN16_10:
-		case CAM_FORMAT_PLAIN16_12:
-		case CAM_FORMAT_PLAIN16_14:
-			return 1;
-		default:
-			break;
-		}
-		break;
 	case CAM_VFE_BUS_VER3_VFE_OUT_2PD:
 		switch (format) {
 		case CAM_FORMAT_PLAIN16_8:
@@ -647,19 +619,10 @@
 			break;
 		}
 		break;
-	case CAM_VFE_BUS_VER3_VFE_OUT_PDAF:
-		switch (plane) {
-		case PLANE_Y:
-			wm_idx = 21;
-			break;
-		default:
-			break;
-		}
-		break;
 	case CAM_VFE_BUS_VER3_VFE_OUT_2PD:
 		switch (plane) {
 		case PLANE_Y:
-			wm_idx = 11;
+			wm_idx = 21;
 			break;
 		default:
 			break;
@@ -791,6 +754,100 @@
 	return wm_idx;
 }
 
+static int cam_vfe_bus_ver3_get_comp_vfe_out_res_id_list(
+	uint32_t comp_mask, uint32_t *out_list, int *num_out, bool is_lite)
+{
+	int count = 0;
+
+	if (is_lite)
+		goto vfe_lite;
+
+	if (comp_mask & 0x3)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FULL;
+
+	if (comp_mask & 0x4)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS4;
+
+	if (comp_mask & 0x8)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS16;
+
+	if (comp_mask & 0x30)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FULL_DISP;
+
+	if (comp_mask & 0x40)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS4_DISP;
+
+	if (comp_mask & 0x80)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS16_DISP;
+
+	if (comp_mask & 0x300)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FD;
+
+	if (comp_mask & 0x400)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RAW_DUMP;
+
+	if (comp_mask & 0x1000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_HDR_BE;
+
+	if (comp_mask & 0x2000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST;
+
+	if (comp_mask & 0x4000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_TL_BG;
+
+	if (comp_mask & 0x8000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_AWB_BG;
+
+	if (comp_mask & 0x10000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_BHIST;
+
+	if (comp_mask & 0x20000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_RS;
+
+	if (comp_mask & 0x40000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_CS;
+
+	if (comp_mask & 0x80000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_IHIST;
+
+	if (comp_mask & 0x100000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_BF;
+
+	if (comp_mask & 0x200000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_2PD;
+
+	if (comp_mask & 0x400000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_LCR;
+
+	if (comp_mask & 0x800000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_0;
+
+	if (comp_mask & 0x1000000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_1;
+
+	if (comp_mask & 0x2000000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_2;
+
+	*num_out = count;
+	return 0;
+
+vfe_lite:
+	if (comp_mask & 0x1)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_0;
+
+	if (comp_mask & 0x2)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_1;
+
+	if (comp_mask & 0x4)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_2;
+
+	if (comp_mask & 0x8)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_3;
+
+	*num_out = count;
+	return 0;
+}
+
 static enum cam_vfe_bus_ver3_packer_format
 	cam_vfe_bus_ver3_get_packer_fmt(uint32_t out_fmt, int wm_index)
 {
@@ -844,35 +901,34 @@
 {
 	int32_t                                     rc;
 	int                                         i;
-	struct cam_vfe_bus_ver3_priv               *bus_priv;
+	struct cam_isp_resource_node               *vfe_out = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data       *rsrc_data = NULL;
 	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
 
-	bus_priv = th_payload->handler_priv;
-	if (!bus_priv) {
+	vfe_out = th_payload->handler_priv;
+	if (!vfe_out) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
 		return -ENODEV;
 	}
 
-	CAM_DBG(CAM_ISP, "bus_IRQ status_0 = 0x%x, bus_IRQ status_1 = 0x%x",
-		th_payload->evt_status_arr[0],
-		th_payload->evt_status_arr[1]);
+	rsrc_data = vfe_out->res_priv;
 
-	rc  = cam_vfe_bus_ver3_get_evt_payload(&bus_priv->common_data,
+	CAM_DBG(CAM_ISP, "BUS_IRQ status_0 = 0x%x",
+		th_payload->evt_status_arr[0]);
+
+	rc  = cam_vfe_bus_ver3_get_evt_payload(rsrc_data->common_data,
 		&evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
 			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"IRQ status_0 = 0x%x status_1 = 0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status_0 = 0x%x",
+			th_payload->evt_status_arr[0]);
 
 		return rc;
 	}
 
-	evt_payload->core_index = bus_priv->common_data.core_index;
+	evt_payload->core_index = rsrc_data->common_data->core_index;
 	evt_payload->evt_id  = evt_id;
-	evt_payload->ctx = &bus_priv->common_data;
 	for (i = 0; i < th_payload->num_registers; i++)
 		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
 	th_payload->evt_payload_priv = evt_payload;
@@ -885,7 +941,10 @@
 {
 	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
 	struct cam_vfe_bus_irq_evt_payload   *payload;
-	uint32_t                              irq_status0;
+	struct cam_isp_resource_node         *vfe_out = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL;
+	struct cam_isp_hw_event_info          evt_info;
+	uint32_t                              irq_status;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP, "Invalid params");
@@ -893,18 +952,103 @@
 	}
 
 	payload = evt_payload_priv;
-	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
+	vfe_out = handler_priv;
+	rsrc_data = vfe_out->res_priv;
 
-	if (irq_status0 & 0x3F) {
-		CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
-		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	if (!rsrc_data->common_data->event_cb) {
+		CAM_ERR(CAM_ISP, "Callback to HW MGR not found");
+		return ret;
 	}
-	CAM_DBG(CAM_ISP,
-		"event ID:%d, bus_irq_status_0 = 0x%x returning status = %d",
-		payload->evt_id, irq_status0, ret);
 
-	if (ret == CAM_VFE_IRQ_STATUS_SUCCESS)
-		cam_vfe_bus_ver3_put_evt_payload(payload->ctx, &payload);
+	irq_status = payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
+
+	evt_info.hw_idx = rsrc_data->common_data->core_index;
+	evt_info.res_type = CAM_ISP_RESOURCE_VFE_IN;
+
+	if (!rsrc_data->common_data->is_lite) {
+		if (irq_status & 0x1) {
+			CAM_DBG(CAM_ISP, "Received CAMIF RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_CAMIF;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x2) {
+			CAM_DBG(CAM_ISP, "Received PDLIB RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_PDLIB;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x4)
+			CAM_DBG(CAM_ISP, "Received LCR RUP");
+
+		if (irq_status & 0x8) {
+			CAM_DBG(CAM_ISP, "Received RDI0 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI0;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x10) {
+			CAM_DBG(CAM_ISP, "Received RDI1 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI1;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x20) {
+			CAM_DBG(CAM_ISP, "Received RDI2 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI2;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+	} else {
+		if (irq_status & 0x1) {
+			CAM_DBG(CAM_ISP, "Received RDI0 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI0;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x2) {
+			CAM_DBG(CAM_ISP, "Received RDI1 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI1;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x4) {
+			CAM_DBG(CAM_ISP, "Received RDI2 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI2;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x8) {
+			CAM_DBG(CAM_ISP, "Received RDI3 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI3;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+	}
+
+	ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+
+	CAM_DBG(CAM_ISP,
+		"event ID:%d, bus_irq_status_0 = 0x%x rc = %d",
+		payload->evt_id, irq_status, ret);
+
+	cam_vfe_bus_ver3_put_evt_payload(rsrc_data->common_data, &payload);
 
 	return ret;
 }
@@ -913,10 +1057,10 @@
 	struct cam_vfe_bus_ver3_priv          *ver3_bus_priv,
 	struct cam_isp_out_port_info          *out_port_info,
 	void                                  *tasklet,
-	void                                  *ctx,
 	enum cam_vfe_bus_ver3_vfe_out_type     vfe_out_res_id,
 	enum cam_vfe_bus_plane_type            plane,
 	struct cam_isp_resource_node         **wm_res,
+	uint32_t                              *client_done_mask,
 	uint32_t                               is_dual,
 	enum cam_vfe_bus_ver3_comp_grp_type   *comp_grp_id)
 {
@@ -945,7 +1089,6 @@
 	wm_res_local->tasklet_info = tasklet;
 
 	rsrc_data = wm_res_local->res_priv;
-	rsrc_data->ctx = ctx;
 	rsrc_data->format = out_port_info->format;
 	rsrc_data->pack_fmt = cam_vfe_bus_ver3_get_packer_fmt(rsrc_data->format,
 		wm_idx);
@@ -1097,15 +1240,12 @@
 		rsrc_data->height = 0;
 		rsrc_data->stride = 1;
 		rsrc_data->en_cfg = (0x1 << 16) | 0x1;
-	} else if (rsrc_data->index == 11 || rsrc_data->index == 21) {
-		/* WM 21/11 PDAF/2PD */
+	} else if (rsrc_data->index == 21) {
+		/* WM 21 PD */
 		rsrc_data->width = 0;
 		rsrc_data->height = 0;
 		rsrc_data->stride = 1;
 		rsrc_data->en_cfg = (0x1 << 16) | 0x1;
-		if (vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_PDAF)
-			/* LSB aligned */
-			rsrc_data->pack_fmt |= 0x10;
 	} else if (rsrc_data->index == 10) {
 		/* WM 10 Raw dump */
 		rsrc_data->stride = rsrc_data->width;
@@ -1115,10 +1255,8 @@
 	} else if (rsrc_data->index == 22) {
 		switch (rsrc_data->format) {
 		case CAM_FORMAT_PLAIN16_16:
-			rsrc_data->width = 0;
-			rsrc_data->height = 0;
-			rsrc_data->stride = 1;
-			rsrc_data->en_cfg = (0x1 << 16) | 0x1;
+			rsrc_data->stride = ALIGNUP(rsrc_data->width * 2, 8);
+			rsrc_data->en_cfg = 0x1;
 			/* LSB aligned */
 			rsrc_data->pack_fmt |= 0x10;
 			break;
@@ -1127,16 +1265,22 @@
 				rsrc_data->format);
 			return -EINVAL;
 		}
-	} else {
+	} else if ((rsrc_data->index == 2) || (rsrc_data->index == 3) ||
+		(rsrc_data->index == 6) || (rsrc_data->index == 7)) {
 		/* Write master 2-3 and 6-7 DS ports */
 
 		rsrc_data->height = rsrc_data->height / 2;
 		rsrc_data->width  = rsrc_data->width / 2;
 		rsrc_data->en_cfg = 0x1;
+
+	} else {
+		CAM_ERR(CAM_ISP, "Invalid WM:%d requested", rsrc_data->index);
+		return -EINVAL;
 	}
 
 	*wm_res = wm_res_local;
 	*comp_grp_id = rsrc_data->hw_regs->comp_group;
+	*client_done_mask |= (1 << wm_idx);
 
 	CAM_DBG(CAM_ISP,
 		"WM:%d processed width:%d height:%d format:0x%x en_ubwc:%d",
@@ -1329,6 +1473,15 @@
 	return 0;
 }
 
+static void cam_vfe_bus_ver3_add_wm_to_comp_grp(
+	struct cam_isp_resource_node    *comp_grp,
+	uint32_t                         composite_mask)
+{
+	struct cam_vfe_bus_ver3_comp_grp_data  *rsrc_data = comp_grp->res_priv;
+
+	rsrc_data->composite_mask |= composite_mask;
+}
+
 static bool cam_vfe_bus_ver3_match_comp_grp(
 	struct cam_vfe_bus_ver3_priv           *ver3_bus_priv,
 	struct cam_isp_resource_node          **comp_grp,
@@ -1368,7 +1521,6 @@
 	struct cam_vfe_bus_ver3_priv        *ver3_bus_priv,
 	struct cam_isp_out_port_info        *out_port_info,
 	void                                *tasklet,
-	void                                *ctx,
 	uint32_t                             is_dual,
 	uint32_t                             is_master,
 	enum cam_vfe_bus_ver3_vfe_core_id    dual_slave_core,
@@ -1429,7 +1581,6 @@
 
 	CAM_DBG(CAM_ISP, "Acquire comp_grp:%u", rsrc_data->comp_grp_type);
 
-	rsrc_data->ctx = ctx;
 	rsrc_data->acquire_dev_cnt++;
 	*comp_grp = comp_grp_local;
 
@@ -1482,6 +1633,7 @@
 
 		in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER3_VFE_CORE_MAX;
 		in_rsrc_data->addr_sync_mode = 0;
+		in_rsrc_data->composite_mask = 0;
 
 		comp_grp->tasklet_info = NULL;
 		comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
@@ -1493,19 +1645,19 @@
 }
 
 static int cam_vfe_bus_ver3_start_comp_grp(
-	struct cam_isp_resource_node *comp_grp)
+	struct cam_isp_resource_node *comp_grp, uint32_t *bus_irq_reg_mask)
 {
 	int rc = 0;
 	uint32_t val;
 	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL;
 	struct cam_vfe_bus_ver3_common_data *common_data = NULL;
-	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_MAX] = {0};
 
 	rsrc_data = comp_grp->res_priv;
 	common_data = rsrc_data->common_data;
 
-	CAM_DBG(CAM_ISP, "comp_grp_type:%d streaming state:%d",
-		rsrc_data->comp_grp_type, comp_grp->res_state);
+	CAM_DBG(CAM_ISP, "comp_grp_type:%d streaming state:%d mask:0x%x",
+		rsrc_data->comp_grp_type, comp_grp->res_state,
+		rsrc_data->composite_mask);
 
 	if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		return 0;
@@ -1514,25 +1666,34 @@
 		if (rsrc_data->is_master) {
 			val = cam_io_r_mb(common_data->mem_base +
 				common_data->common_reg->comp_cfg_0);
+
 			val |= (0x1 << (rsrc_data->comp_grp_type + 14));
+
 			cam_io_w_mb(val, common_data->mem_base +
 				common_data->common_reg->comp_cfg_0);
 
 			val = cam_io_r_mb(common_data->mem_base +
 				common_data->common_reg->comp_cfg_1);
+
 			val |= (0x1 << rsrc_data->comp_grp_type);
+
 			cam_io_w_mb(val, common_data->mem_base +
 				common_data->common_reg->comp_cfg_1);
 		} else {
 			val = cam_io_r_mb(common_data->mem_base +
 				common_data->common_reg->comp_cfg_0);
+
 			val |= (0x1 << rsrc_data->comp_grp_type);
+			val |= (0x1 << (rsrc_data->comp_grp_type + 14));
+
 			cam_io_w_mb(val, common_data->mem_base +
 				common_data->common_reg->comp_cfg_0);
 
 			val = cam_io_r_mb(common_data->mem_base +
 				common_data->common_reg->comp_cfg_1);
+
 			val |= (0x1 << rsrc_data->comp_grp_type);
+
 			cam_io_w_mb(val, common_data->mem_base +
 				common_data->common_reg->comp_cfg_1);
 		}
@@ -1547,28 +1708,13 @@
 	}
 
 	bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0] =
-		(0x1 << (rsrc_data->comp_grp_type + 6));
+		(0x1 << (rsrc_data->comp_grp_type +
+		rsrc_data->common_data->comp_done_shift));
 
-	/*
-	 * For Dual composite subscribe IRQ only for master
-	 * For regular composite, subscribe IRQ always
-	 */
-	CAM_DBG(CAM_ISP, "Subscribe comp_grp_type:%d IRQ",
-		rsrc_data->comp_grp_type);
-	if ((rsrc_data->is_dual && rsrc_data->is_master) ||
-		(!rsrc_data->is_dual)) {
-		comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
-			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
-			bus_irq_reg_mask, comp_grp,
-			comp_grp->top_half_handler,
-			cam_ife_mgr_do_tasklet_buf_done,
-			comp_grp->tasklet_info, &tasklet_bh_api);
-		if (comp_grp->irq_handle < 0) {
-			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
-				rsrc_data->comp_grp_type);
-			return -EFAULT;
-		}
-	}
+	CAM_DBG(CAM_ISP, "VFE start COMP_GRP:%d bus_irq_mask_0 0x%x",
+		rsrc_data->comp_grp_type,
+		bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0]);
+
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	return rc;
@@ -1577,76 +1723,21 @@
 static int cam_vfe_bus_ver3_stop_comp_grp(
 	struct cam_isp_resource_node          *comp_grp)
 {
-	int rc = 0;
-	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL;
-	struct cam_vfe_bus_ver3_common_data   *common_data = NULL;
-
-	rsrc_data = comp_grp->res_priv;
-	common_data = rsrc_data->common_data;
-
-	/* Unsubscribe IRQ */
-	if ((rsrc_data->is_dual && rsrc_data->is_master) ||
-		(!rsrc_data->is_dual)) {
-		rc = cam_irq_controller_unsubscribe_irq(
-			common_data->bus_irq_controller,
-			comp_grp->irq_handle);
-	}
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
-	return rc;
+	return 0;
 }
 
 static int cam_vfe_bus_ver3_handle_comp_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	int32_t                                     rc;
-	int                                         i;
-	struct cam_isp_resource_node               *comp_grp = NULL;
-	struct cam_vfe_bus_ver3_comp_grp_data      *rsrc_data = NULL;
-	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
-
-	comp_grp = th_payload->handler_priv;
-	if (!comp_grp) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
-		return -ENODEV;
-	}
-
-	rsrc_data = comp_grp->res_priv;
-
-	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
-	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
-
-	rc  = cam_vfe_bus_ver3_get_evt_payload(rsrc_data->common_data,
-		&evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"IRQ status_0 = 0x%x status_1 = 0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
-
-		return rc;
-	}
-
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->ctx = rsrc_data->ctx;
-	evt_payload->core_index = rsrc_data->common_data->core_index;
-	evt_payload->evt_id  = evt_id;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	CAM_DBG(CAM_ISP, "Exit");
-	return rc;
+	return -EPERM;
 }
 
 static int cam_vfe_bus_ver3_handle_comp_done_bottom_half(
 	void                *handler_priv,
-	void                *evt_payload_priv)
+	void                *evt_payload_priv,
+	uint32_t            *comp_mask)
 {
 	int rc = CAM_VFE_IRQ_STATUS_ERR;
 	struct cam_isp_resource_node          *comp_grp = handler_priv;
@@ -1669,23 +1760,15 @@
 	cam_ife_irq_regs = evt_payload->irq_reg_val;
 	status0_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
 
-	if (status0_reg & BIT(rsrc_data->comp_grp_type + 6)) {
-		rsrc_data->irq_trigger_cnt++;
-		if (rsrc_data->irq_trigger_cnt ==
-			rsrc_data->acquire_dev_cnt) {
-			cam_ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0] &=
-					~BIT(rsrc_data->comp_grp_type + 6);
-			rsrc_data->irq_trigger_cnt = 0;
-		}
+	if (status0_reg & BIT(rsrc_data->comp_grp_type +
+		rsrc_data->common_data->comp_done_shift)) {
+		evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
 		rc = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
-	CAM_DBG(CAM_ISP, "status_0_reg = 0x%x, bit index = %d rc %d",
-		status0_reg, (rsrc_data->comp_grp_type + 6), rc);
+	CAM_DBG(CAM_ISP, "status_0_reg = 0x%x rc %d", status0_reg, rc);
 
-	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
-		cam_vfe_bus_ver3_put_evt_payload(rsrc_data->common_data,
-			&evt_payload);
+	*comp_mask = rsrc_data->composite_mask;
 
 	return rc;
 }
@@ -1698,6 +1781,7 @@
 {
 	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL;
 	struct cam_vfe_soc_private *vfe_soc_private = soc_info->soc_private;
+	int ddr_type = 0;
 
 	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver3_comp_grp_data),
 		GFP_KERNEL);
@@ -1717,7 +1801,9 @@
 		rsrc_data->comp_grp_type != CAM_VFE_BUS_VER3_COMP_GRP_1)
 		rsrc_data->ubwc_static_ctrl = 0;
 	else {
-		if (of_fdt_get_ddrtype() == DDR_TYPE_LPDDR5)
+		ddr_type = of_fdt_get_ddrtype();
+		if ((ddr_type == DDR_TYPE_LPDDR5) ||
+			(ddr_type == DDR_TYPE_LPDDR5X))
 			rsrc_data->ubwc_static_ctrl =
 				vfe_soc_private->ubwc_static_ctrl[1];
 		else
@@ -1727,11 +1813,7 @@
 
 	list_add_tail(&comp_grp->list, &ver3_bus_priv->free_comp_grp);
 
-	comp_grp->start = cam_vfe_bus_ver3_start_comp_grp;
-	comp_grp->stop = cam_vfe_bus_ver3_stop_comp_grp;
 	comp_grp->top_half_handler = cam_vfe_bus_ver3_handle_comp_done_top_half;
-	comp_grp->bottom_half_handler =
-		cam_vfe_bus_ver3_handle_comp_done_bottom_half;
 	comp_grp->hw_intf = ver3_bus_priv->common_data.hw_intf;
 
 	return 0;
@@ -1793,6 +1875,7 @@
 	struct cam_vfe_bus_ver3_vfe_out_data   *rsrc_data = NULL;
 	uint32_t                                secure_caps = 0, mode;
 	enum cam_vfe_bus_ver3_comp_grp_type     comp_grp_id;
+	uint32_t                                client_done_mask = 0;
 
 	if (!bus_priv || !acquire_args) {
 		CAM_ERR(CAM_ISP, "Invalid Param");
@@ -1822,6 +1905,9 @@
 	}
 
 	rsrc_data = rsrc_node->res_priv;
+	rsrc_data->common_data->event_cb = acq_args->event_cb;
+	rsrc_data->priv = acq_args->priv;
+
 	secure_caps = cam_vfe_bus_ver3_can_be_secure(
 		rsrc_data->out_type);
 	mode = out_acquire_args->out_port_info->secure_mode;
@@ -1861,10 +1947,10 @@
 		rc = cam_vfe_bus_ver3_acquire_wm(ver3_bus_priv,
 			out_acquire_args->out_port_info,
 			acq_args->tasklet,
-			out_acquire_args->ctx,
 			vfe_out_res_id,
 			i,
 			&rsrc_data->wm_res[i],
+			&client_done_mask,
 			out_acquire_args->is_dual,
 			&comp_grp_id);
 		if (rc) {
@@ -1880,7 +1966,6 @@
 	rc = cam_vfe_bus_ver3_acquire_comp_grp(ver3_bus_priv,
 		out_acquire_args->out_port_info,
 		acq_args->tasklet,
-		out_acquire_args->ctx,
 		out_acquire_args->is_dual,
 		out_acquire_args->is_master,
 		out_acquire_args->dual_slave_core,
@@ -1894,6 +1979,11 @@
 		return rc;
 	}
 
+	rsrc_data->is_dual = out_acquire_args->is_dual;
+	rsrc_data->is_master = out_acquire_args->is_master;
+
+	cam_vfe_bus_ver3_add_wm_to_comp_grp(rsrc_data->comp_grp,
+		client_done_mask);
 
 	rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	out_acquire_args->rsrc_node = rsrc_node;
@@ -1984,6 +2074,9 @@
 	int rc = 0, i;
 	struct cam_vfe_bus_ver3_vfe_out_data  *rsrc_data = NULL;
 	struct cam_vfe_bus_ver3_common_data   *common_data = NULL;
+	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_MAX];
+	uint32_t rup_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_MAX];
+	uint32_t source_group = 0;
 
 	if (!vfe_out) {
 		CAM_ERR(CAM_ISP, "Invalid input");
@@ -1992,6 +2085,7 @@
 
 	rsrc_data = vfe_out->res_priv;
 	common_data = rsrc_data->common_data;
+	source_group = rsrc_data->source_group;
 
 	CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
 
@@ -2004,9 +2098,57 @@
 	for (i = 0; i < rsrc_data->num_wm; i++)
 		rc = cam_vfe_bus_ver3_start_wm(rsrc_data->wm_res[i]);
 
-	if (rsrc_data->comp_grp)
-		rc = cam_vfe_bus_ver3_start_comp_grp(rsrc_data->comp_grp);
+	memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask));
+	rc = cam_vfe_bus_ver3_start_comp_grp(rsrc_data->comp_grp,
+		bus_irq_reg_mask);
 
+	if (rsrc_data->is_dual && !rsrc_data->is_master)
+		goto end;
+
+	vfe_out->irq_handle = cam_irq_controller_subscribe_irq(
+		common_data->bus_irq_controller,
+		CAM_IRQ_PRIORITY_1,
+		bus_irq_reg_mask,
+		vfe_out,
+		vfe_out->top_half_handler,
+		vfe_out->bottom_half_handler,
+		vfe_out->tasklet_info,
+		&tasklet_bh_api);
+
+	if (vfe_out->irq_handle < 1) {
+		CAM_ERR(CAM_ISP, "Subscribe IRQ failed for VFE out_res %d",
+			vfe_out->res_id);
+		vfe_out->irq_handle = 0;
+		return -EFAULT;
+	}
+
+	if (!common_data->rup_irq_handle[source_group]) {
+		memset(rup_irq_reg_mask, 0, sizeof(rup_irq_reg_mask));
+		rup_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0] |=
+			0x1 << source_group;
+
+		CAM_DBG(CAM_ISP, "bus_irq_mask_0 for rup 0x%x",
+			rup_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0]);
+
+		common_data->rup_irq_handle[source_group] =
+			cam_irq_controller_subscribe_irq(
+				common_data->bus_irq_controller,
+				CAM_IRQ_PRIORITY_1,
+				rup_irq_reg_mask,
+				vfe_out,
+				cam_vfe_bus_ver3_handle_rup_top_half,
+				cam_vfe_bus_ver3_handle_rup_bottom_half,
+				vfe_out->tasklet_info,
+				&tasklet_bh_api);
+
+		if (common_data->rup_irq_handle[source_group] < 1) {
+			CAM_ERR(CAM_ISP, "Failed to subscribe RUP IRQ");
+			common_data->rup_irq_handle[source_group] = 0;
+			return -EFAULT;
+		}
+	}
+
+end:
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 	return rc;
 }
@@ -2016,6 +2158,7 @@
 {
 	int rc = 0, i;
 	struct cam_vfe_bus_ver3_vfe_out_data  *rsrc_data = NULL;
+	struct cam_vfe_bus_ver3_common_data   *common_data = NULL;
 
 	if (!vfe_out) {
 		CAM_ERR(CAM_ISP, "Invalid input");
@@ -2023,6 +2166,7 @@
 	}
 
 	rsrc_data = vfe_out->res_priv;
+	common_data = rsrc_data->common_data;
 
 	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
 		vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
@@ -2030,12 +2174,25 @@
 		return rc;
 	}
 
-	if (rsrc_data->comp_grp)
-		rc = cam_vfe_bus_ver3_stop_comp_grp(rsrc_data->comp_grp);
+	rc = cam_vfe_bus_ver3_stop_comp_grp(rsrc_data->comp_grp);
 
 	for (i = 0; i < rsrc_data->num_wm; i++)
 		rc = cam_vfe_bus_ver3_stop_wm(rsrc_data->wm_res[i]);
 
+	if (common_data->rup_irq_handle[rsrc_data->source_group]) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			common_data->rup_irq_handle[rsrc_data->source_group]);
+		common_data->rup_irq_handle[rsrc_data->source_group] = 0;
+	}
+
+	if (vfe_out->irq_handle) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			vfe_out->irq_handle);
+		vfe_out->irq_handle = 0;
+	}
+
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	return rc;
 }
@@ -2043,31 +2200,94 @@
 static int cam_vfe_bus_ver3_handle_vfe_out_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                     rc;
+	int                                         i;
+	struct cam_isp_resource_node               *vfe_out = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data       *rsrc_data = NULL;
+	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
+
+	vfe_out = th_payload->handler_priv;
+	if (!vfe_out) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
+		return -ENODEV;
+	}
+
+	rsrc_data = vfe_out->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_bus_ver3_get_evt_payload(rsrc_data->common_data,
+		&evt_payload);
+
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->core_index = rsrc_data->common_data->core_index;
+	evt_payload->evt_id = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_bus_ver3_handle_vfe_out_done_bottom_half(
 	void                *handler_priv,
 	void                *evt_payload_priv)
 {
-	int rc = -EINVAL;
+	int                                   rc = -EINVAL, num_out = 0, i = 0;
 	struct cam_isp_resource_node         *vfe_out = handler_priv;
 	struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = vfe_out->res_priv;
 	struct cam_vfe_bus_irq_evt_payload   *evt_payload = evt_payload_priv;
+	struct cam_isp_hw_event_info          evt_info;
+	void                                 *ctx = NULL;
+	uint32_t                              evt_id = 0, comp_mask = 0;
+	uint32_t                         out_list[CAM_VFE_BUS_VER3_VFE_OUT_MAX];
 
-	if (evt_payload->evt_id == CAM_ISP_HW_EVENT_REG_UPDATE) {
-		rc = cam_vfe_bus_ver3_handle_rup_bottom_half(
-			handler_priv, evt_payload_priv);
-		return rc;
-	}
-	/* We only handle composite buf done */
-	if (rsrc_data->comp_grp) {
-		rc = rsrc_data->comp_grp->bottom_half_handler(
-			rsrc_data->comp_grp, evt_payload_priv);
-	}
-
+	rc = cam_vfe_bus_ver3_handle_comp_done_bottom_half(
+		rsrc_data->comp_grp, evt_payload_priv, &comp_mask);
 	CAM_DBG(CAM_ISP, "vfe_out %d rc %d", rsrc_data->out_type, rc);
 
+	ctx = rsrc_data->priv;
+	memset(out_list, 0, sizeof(out_list));
+
+	switch (rc) {
+	case CAM_VFE_IRQ_STATUS_SUCCESS:
+		evt_id = evt_payload->evt_id;
+
+		evt_info.res_type = vfe_out->res_type;
+		evt_info.hw_idx   = vfe_out->hw_intf->hw_idx;
+
+		rc = cam_vfe_bus_ver3_get_comp_vfe_out_res_id_list(
+			comp_mask, out_list, &num_out,
+			rsrc_data->common_data->is_lite);
+		for (i = 0; i < num_out; i++) {
+			evt_info.res_id = out_list[i];
+			if (rsrc_data->common_data->event_cb)
+				rsrc_data->common_data->event_cb(ctx, evt_id,
+					(void *)&evt_info);
+		}
+		break;
+	default:
+		break;
+	}
+
+	cam_vfe_bus_ver3_put_evt_payload(rsrc_data->common_data, &evt_payload);
+
 	return rc;
 }
 
@@ -2109,14 +2329,16 @@
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 	INIT_LIST_HEAD(&vfe_out->list);
 
-	rsrc_data->out_type    =
+	rsrc_data->source_group =
+		ver3_hw_info->vfe_out_hw_info[index].source_group;
+	rsrc_data->out_type     =
 		ver3_hw_info->vfe_out_hw_info[index].vfe_out_type;
-	rsrc_data->common_data = &ver3_bus_priv->common_data;
-	rsrc_data->max_width   =
+	rsrc_data->common_data  = &ver3_bus_priv->common_data;
+	rsrc_data->max_width    =
 		ver3_hw_info->vfe_out_hw_info[index].max_width;
-	rsrc_data->max_height  =
+	rsrc_data->max_height   =
 		ver3_hw_info->vfe_out_hw_info[index].max_height;
-	rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE;
+	rsrc_data->secure_mode  = CAM_SECURE_MODE_NON_SECURE;
 
 	vfe_out->start = cam_vfe_bus_ver3_start_vfe_out;
 	vfe_out->stop = cam_vfe_bus_ver3_stop_vfe_out;
@@ -2126,6 +2348,7 @@
 		cam_vfe_bus_ver3_handle_vfe_out_done_bottom_half;
 	vfe_out->process_cmd = cam_vfe_bus_ver3_process_cmd;
 	vfe_out->hw_intf = ver3_bus_priv->common_data.hw_intf;
+	vfe_out->irq_handle = 0;
 
 	return 0;
 }
@@ -2149,6 +2372,7 @@
 	vfe_out->top_half_handler = NULL;
 	vfe_out->bottom_half_handler = NULL;
 	vfe_out->hw_intf = NULL;
+	vfe_out->irq_handle = 0;
 
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
 	INIT_LIST_HEAD(&vfe_out->list);
@@ -2202,19 +2426,11 @@
 		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
 
 	evt_payload->core_index = bus_priv->common_data.core_index;
-	evt_payload->evt_id  = evt_id;
 
-	evt_payload->ctx = &bus_priv->common_data;
 	evt_payload->ccif_violation_status = cam_io_r_mb(
 		bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->ccif_violation_status);
 
-	evt_payload->overflow_status = cam_io_r_mb(
-		bus_priv->common_data.mem_base +
-		bus_priv->common_data.common_reg->overflow_status);
-	cam_io_w_mb(0x1, bus_priv->common_data.mem_base +
-		bus_priv->common_data.common_reg->overflow_status_clear);
-
 	evt_payload->image_size_violation_status = cam_io_r_mb(
 		bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->image_size_violation_status);
@@ -2224,18 +2440,19 @@
 	return rc;
 }
 
-static int cam_vfe_bus_ver3_err_irq_bottom_half(void *ctx_priv,
-	void *evt_payload_priv)
+static int cam_vfe_bus_ver3_err_irq_bottom_half(
+	void *handler_priv, void *evt_payload_priv)
 {
-	struct cam_vfe_bus_irq_evt_payload *evt_payload;
+	struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv;
+	struct cam_vfe_bus_ver3_priv *bus_priv = handler_priv;
 	struct cam_vfe_bus_ver3_common_data *common_data;
+	struct cam_isp_hw_event_info evt_info;
 	uint32_t val = 0, image_size_violation = 0, ccif_violation = 0;
 
-	if (!ctx_priv || !evt_payload_priv)
+	if (!handler_priv || !evt_payload_priv)
 		return -EINVAL;
 
-	evt_payload = evt_payload_priv;
-	common_data = evt_payload->ctx;
+	common_data = &bus_priv->common_data;
 
 	val = evt_payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
 	image_size_violation = (val >> 31) & 0x1;
@@ -2245,6 +2462,50 @@
 	CAM_ERR(CAM_ISP, "image_size_violation %d ccif_violation %d",
 		image_size_violation, ccif_violation);
 
+	if (common_data->is_lite) {
+		if (image_size_violation) {
+			val = evt_payload->image_size_violation_status;
+
+			if (val & 0x01)
+				CAM_INFO(CAM_ISP,
+					"RDI 0 image size violation");
+
+			if (val & 0x02)
+				CAM_INFO(CAM_ISP,
+					"RDI 1 image size violation");
+
+			if (val & 0x04)
+				CAM_INFO(CAM_ISP,
+					"RDI 2 image size violation");
+
+			if (val & 0x08)
+				CAM_INFO(CAM_ISP,
+					"RDI 3 image size violation");
+		}
+
+		if (ccif_violation) {
+			val = evt_payload->ccif_violation_status;
+
+			if (val & 0x01)
+				CAM_INFO(CAM_ISP,
+					"RDI 0 ccif violation");
+
+			if (val & 0x02)
+				CAM_INFO(CAM_ISP,
+					"RDI 1 ccif violation");
+
+			if (val & 0x04)
+				CAM_INFO(CAM_ISP,
+					"RDI 2 ccif violation");
+
+			if (val & 0x08)
+				CAM_INFO(CAM_ISP,
+					"RDI 3 ccif violation");
+		}
+
+		goto end;
+	}
+
 	if (image_size_violation) {
 		val = evt_payload->image_size_violation_status;
 
@@ -2282,9 +2543,6 @@
 			CAM_INFO(CAM_ISP,
 			"PIXEL RAW DUMP image size violation");
 
-		if (val & 0x0800)
-			CAM_INFO(CAM_ISP, "CAMIF PD image size violation");
-
 		if (val & 0x01000)
 			CAM_INFO(CAM_ISP, "STATS HDR BE image size violation");
 
@@ -2315,7 +2573,7 @@
 			CAM_INFO(CAM_ISP, "STATS BAF image size violation");
 
 		if (val & 0x0200000)
-			CAM_INFO(CAM_ISP, "PDAF image size violation");
+			CAM_INFO(CAM_ISP, "PD image size violation");
 
 		if (val & 0x0400000)
 			CAM_INFO(CAM_ISP, "LCR image size violation");
@@ -2367,9 +2625,6 @@
 		if (val & 0x0400)
 			CAM_INFO(CAM_ISP, "PIXEL RAW DUMP ccif violation");
 
-		if (val & 0x0800)
-			CAM_INFO(CAM_ISP, "CAMIF PD ccif violation");
-
 		if (val & 0x01000)
 			CAM_INFO(CAM_ISP, "STATS HDR BE ccif violation");
 
@@ -2398,7 +2653,7 @@
 			CAM_INFO(CAM_ISP, "STATS BAF ccif violation");
 
 		if (val & 0x0200000)
-			CAM_INFO(CAM_ISP, "PDAF ccif violation");
+			CAM_INFO(CAM_ISP, "PD ccif violation");
 
 		if (val & 0x0400000)
 			CAM_INFO(CAM_ISP, "LCR ccif violation");
@@ -2414,7 +2669,17 @@
 
 	}
 
+end:
 	cam_vfe_bus_ver3_put_evt_payload(common_data, &evt_payload);
+
+	evt_info.hw_idx = common_data->core_index;
+	evt_info.res_type = CAM_ISP_RESOURCE_VFE_OUT;
+	evt_info.res_id = CAM_VFE_BUS_VER3_VFE_OUT_MAX;
+	evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
+
+	if (common_data->event_cb)
+		common_data->event_cb(NULL, CAM_ISP_HW_EVENT_ERROR,
+			(void *)&evt_info);
 	return 0;
 }
 
@@ -2631,8 +2896,6 @@
 		else
 			loop_size = 1;
 
-
-
 		/* WM Image address */
 		for (k = 0; k < loop_size; k++) {
 			if (wm_data->en_ubwc)
@@ -2861,9 +3124,9 @@
 		CAM_ERR(CAM_ISP, "num_wm %d h_init 0x%x",
 			vfe_out_data->num_wm,
 			ubwc_generic_plane_cfg->h_init);
-		if ((!wm_data->is_dual) && ((wm_data->ubwc_meta_cfg !=
-			ubwc_generic_plane_cfg->meta_stride) ||
-			!wm_data->init_cfg_done)) {
+		if (wm_data->ubwc_meta_cfg !=
+			ubwc_generic_plane_cfg->meta_stride ||
+			!wm_data->init_cfg_done) {
 			wm_data->ubwc_meta_cfg =
 				ubwc_generic_plane_cfg->meta_stride;
 			wm_data->ubwc_updated = true;
@@ -2922,47 +3185,6 @@
 	return rc;
 }
 
-static uint32_t cam_vfe_bus_ver3_convert_bytes_to_pixels(uint32_t packer_fmt,
-	uint32_t width)
-{
-	int pixels = 0;
-
-	switch (packer_fmt) {
-	case PACKER_FMT_VER3_PLAIN_128:
-		pixels = width / 16;
-		break;
-	case PACKER_FMT_VER3_PLAIN_8:
-	case PACKER_FMT_VER3_PLAIN_8_ODD_EVEN:
-		pixels = width;
-		break;
-	case PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10:
-	case PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10_ODD_EVEN:
-		pixels = width * 8 / 10;
-		break;
-	case PACKER_FMT_VER3_PLAIN_16_10BPP:
-	case PACKER_FMT_VER3_PLAIN_16_12BPP:
-	case PACKER_FMT_VER3_PLAIN_16_14BPP:
-	case PACKER_FMT_VER3_PLAIN_16_16BPP:
-		pixels = width / 2;
-		break;
-	case PACKER_FMT_VER3_PLAIN_32:
-		pixels = width / 4;
-		break;
-	case PACKER_FMT_VER3_PLAIN_64:
-		pixels = width / 8;
-		break;
-	case PACKER_FMT_VER3_TP_10:
-		pixels = width * 3 / 4;
-		break;
-	case PACKER_FMT_VER3_MAX:
-	default:
-		CAM_ERR(CAM_ISP, "Invalid packer cfg 0x%x", packer_fmt);
-		break;
-	}
-
-	return pixels;
-}
-
 static int cam_vfe_bus_ver3_update_stripe_cfg(void *priv, void *cmd_args,
 	uint32_t arg_size)
 {
@@ -2996,8 +3218,7 @@
 		wm_data = vfe_out_data->wm_res[i]->res_priv;
 		stripe_config = (struct cam_isp_dual_stripe_config  *)
 			&stripe_args->dual_cfg->stripes[ports_plane_idx + i];
-		wm_data->width = cam_vfe_bus_ver3_convert_bytes_to_pixels(
-			wm_data->pack_fmt, stripe_config->width);
+		wm_data->width = stripe_config->width;
 		wm_data->offset = stripe_config->offset;
 		CAM_DBG(CAM_ISP, "id:%x WM:%d width:0x%x offset:%x",
 			stripe_args->res->res_id, wm_data->index,
@@ -3023,14 +3244,14 @@
 	void *init_hw_args, uint32_t arg_size)
 {
 	struct cam_vfe_bus_ver3_priv    *bus_priv = hw_priv;
-	uint32_t                         top_irq_reg_mask[2] = {0};
+	uint32_t                         top_irq_reg_mask[3] = {0};
 
 	if (!bus_priv) {
 		CAM_ERR(CAM_ISP, "Invalid args");
 		return -EINVAL;
 	}
 
-	top_irq_reg_mask[0] = (1 << 7);
+	top_irq_reg_mask[0] = (1 << bus_priv->top_irq_shift);
 
 	bus_priv->irq_handle = cam_irq_controller_subscribe_irq(
 		bus_priv->common_data.vfe_irq_controller,
@@ -3042,8 +3263,9 @@
 		NULL,
 		NULL);
 
-	if ((int)bus_priv->irq_handle <= 0) {
+	if (bus_priv->irq_handle < 1) {
 		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		bus_priv->irq_handle = 0;
 		return -EFAULT;
 	}
 
@@ -3058,35 +3280,19 @@
 			bus_priv->tasklet_info,
 			&tasklet_bh_api);
 
-		if ((int)bus_priv->error_irq_handle <= 0) {
+		if (bus_priv->error_irq_handle < 1) {
 			CAM_ERR(CAM_ISP, "Failed to subscribe BUS Error IRQ");
+			bus_priv->error_irq_handle = 0;
 			return -EFAULT;
 		}
 	}
 
-	if (bus_priv->tasklet_info != NULL) {
-		bus_priv->rup_irq_handle = cam_irq_controller_subscribe_irq(
-			bus_priv->common_data.bus_irq_controller,
-			CAM_IRQ_PRIORITY_0,
-			rup_irq_mask,
-			bus_priv,
-			cam_vfe_bus_ver3_handle_rup_top_half,
-			cam_ife_mgr_do_tasklet_reg_update,
-			bus_priv->tasklet_info,
-			&tasklet_bh_api);
-
-		if (bus_priv->rup_irq_handle <= 0) {
-			CAM_ERR(CAM_ISP, "Failed to subscribe RUP IRQ");
-			return -EFAULT;
-		}
-	}
-
-	// no clock gating at bus input
+	/* no clock gating at bus input */
 	CAM_INFO(CAM_ISP, "Overriding clock gating at bus input");
 	cam_io_w_mb(0x3FFFFFF, bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->cgc_ovd);
 
-	// BUS_WR_TEST_BUS_CTRL
+	/* BUS_WR_TEST_BUS_CTRL */
 	cam_io_w_mb(0x0, bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->test_bus_ctrl);
 
@@ -3108,10 +3314,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.bus_irq_controller,
 			bus_priv->error_irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe error irq rc=%d", rc);
-
 		bus_priv->error_irq_handle = 0;
 	}
 
@@ -3119,10 +3321,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.vfe_irq_controller,
 			bus_priv->irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe irq rc=%d", rc);
-
 		bus_priv->irq_handle = 0;
 	}
 
@@ -3175,11 +3373,6 @@
 			rc = cam_irq_controller_unsubscribe_irq(
 				bus_priv->common_data.bus_irq_controller,
 				bus_priv->error_irq_handle);
-			if (rc)
-				CAM_ERR(CAM_ISP,
-					"Failed to unsubscribe error irq rc=%d",
-					rc);
-
 			bus_priv->error_irq_handle = 0;
 		}
 		break;
@@ -3236,6 +3429,7 @@
 
 	bus_priv->num_client                     = ver3_hw_info->num_client;
 	bus_priv->num_out                        = ver3_hw_info->num_out;
+	bus_priv->top_irq_shift                  = ver3_hw_info->top_irq_shift;
 	bus_priv->common_data.num_sec_out        = 0;
 	bus_priv->common_data.secure_mode        = CAM_SECURE_MODE_NON_SECURE;
 	bus_priv->common_data.core_index         = soc_info->index;
@@ -3244,6 +3438,8 @@
 	bus_priv->common_data.hw_intf            = hw_intf;
 	bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
 	bus_priv->common_data.common_reg         = &ver3_hw_info->common_reg;
+	bus_priv->common_data.comp_done_shift    =
+		ver3_hw_info->comp_done_shift;
 
 	if (strnstr(soc_info->compatible, "lite",
 		strlen(soc_info->compatible)) != NULL)
@@ -3251,6 +3447,9 @@
 	else
 		bus_priv->common_data.is_lite = false;
 
+	for (i = 0; i < CAM_VFE_BUS_VER3_SRC_GRP_MAX; i++)
+		bus_priv->common_data.rup_irq_handle[i] = 0;
+
 	mutex_init(&bus_priv->common_data.bus_mutex);
 
 	rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h
index 4711d75..c5b4ab6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h
@@ -158,6 +158,7 @@
 	enum cam_vfe_bus_ver3_vfe_out_type  vfe_out_type;
 	uint32_t                            max_width;
 	uint32_t                            max_height;
+	uint32_t                            source_group;
 };
 
 /*
@@ -166,9 +167,11 @@
  * @Brief:            HW register info for entire Bus
  *
  * @common_reg:       Common register details
+ * @num_client:       Total number of write clients
  * @bus_client_reg:   Bus client register info
- * @comp_reg_grp:     Composite group register info
  * @vfe_out_hw_info:  VFE output capability
+ * @comp_done_shift:  Mask shift for comp done mask
+ * @top_irq_shift:    Mask shift for top level BUS WR irq
  */
 struct cam_vfe_bus_ver3_hw_info {
 	struct cam_vfe_bus_ver3_reg_offset_common common_reg;
@@ -178,6 +181,8 @@
 	uint32_t num_out;
 	struct cam_vfe_bus_ver3_vfe_out_hw_info
 		vfe_out_hw_info[CAM_VFE_BUS_VER3_VFE_OUT_MAX];
+	uint32_t comp_done_shift;
+	uint32_t top_irq_shift;
 };
 
 /*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
index 07d52e0..be4902b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -6,6 +6,7 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
index b79c6e6..da717a3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -12,6 +12,8 @@
 #include "cam_vfe_soc.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 #include "cam_vfe_camif_lite_ver2.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
@@ -24,8 +26,130 @@
 	struct cam_vfe_camif_lite_ver2_reg_data     *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
 	enum cam_isp_hw_sync_mode                    sync_mode;
+
+	cam_hw_mgr_event_cb_func              event_cb;
+	void                                 *priv;
+	int                                   irq_err_handle;
+	int                                   irq_handle;
+	void                                 *vfe_irq_controller;
+	struct cam_vfe_top_irq_evt_payload
+		evt_payload[CAM_VFE_CAMIF_LITE_EVT_MAX];
+	struct list_head                      free_payload_list;
+	spinlock_t                            spin_lock;
 };
 
+static int cam_vfe_camif_lite_get_evt_payload(
+	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&camif_lite_priv->spin_lock);
+	if (list_empty(&camif_lite_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&camif_lite_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	rc = 0;
+done:
+	spin_unlock(&camif_lite_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_camif_lite_put_evt_payload(
+	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	unsigned long flags;
+
+	if (!camif_lite_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&camif_lite_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list,
+		&camif_lite_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&camif_lite_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_camif_lite_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data    *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+	camif_lite_node = th_payload->handler_priv;
+	camif_lite_priv = camif_lite_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[1] || (th_payload->evt_status_arr[0] &
+		camif_lite_priv->reg_data->lite_err_irq_mask0)) {
+		CAM_ERR(CAM_ISP,
+			"CAMIF LITE ERR VFE:%d IRQ STATUS_0=0x%x STATUS_1=0x%x",
+			camif_lite_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%d",
+			camif_lite_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(
+			camif_lite_priv->vfe_irq_controller,
+			camif_lite_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			camif_lite_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(camif_lite_priv->mem_base +
+		camif_lite_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[i]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
+
 static int cam_vfe_camif_lite_get_reg_update(
 	struct cam_isp_resource_node          *camif_lite_res,
 	void                                  *cmd_args,
@@ -94,6 +218,8 @@
 	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
 
 	camif_lite_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+	camif_lite_data->event_cb    = acquire_data->event_cb;
+	camif_lite_data->priv        = acquire_data->priv;
 
 	CAM_DBG(CAM_ISP, "hw id:%d sync_mode=%d",
 		camif_lite_res->hw_intf->hw_idx,
@@ -106,6 +232,8 @@
 {
 	struct cam_vfe_mux_camif_lite_data   *rsrc_data;
 	uint32_t                              val = 0;
+	int                                   rc = 0;
+	uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
 
 	if (!camif_lite_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -121,6 +249,11 @@
 	rsrc_data = (struct cam_vfe_mux_camif_lite_data *)
 		camif_lite_res->res_priv;
 
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->lite_err_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->lite_err_irq_mask1;
+
 	/* vfe core config */
 	val = cam_io_r_mb(rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg);
@@ -151,14 +284,32 @@
 		camif_lite_res->hw_intf->hw_idx,
 		rsrc_data->reg_data->dual_pd_reg_update_cmd_data);
 
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			camif_lite_res,
+			cam_vfe_camif_lite_err_irq_top_half,
+			camif_lite_res->bottom_half_handler,
+			camif_lite_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
 	CAM_DBG(CAM_ISP, "Start Camif Lite IFE %d Done",
 		camif_lite_res->hw_intf->hw_idx);
-	return 0;
+	return rc;
 }
 
 static int cam_vfe_camif_lite_resource_stop(
 	struct cam_isp_resource_node             *camif_lite_res)
 {
+	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv;
 	int                                       rc = 0;
 
 	if (!camif_lite_res) {
@@ -170,9 +321,25 @@
 		(camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE))
 		return 0;
 
+	camif_lite_priv = (struct cam_vfe_mux_camif_lite_data *)camif_lite_res;
+
 	if (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
+	if (camif_lite_priv->irq_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_lite_priv->vfe_irq_controller,
+			camif_lite_priv->irq_handle);
+		camif_lite_priv->irq_handle = 0;
+	}
+
+	if (camif_lite_priv->irq_err_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_lite_priv->vfe_irq_controller,
+			camif_lite_priv->irq_err_handle);
+		camif_lite_priv->irq_err_handle = 0;
+	}
+
 	return rc;
 }
 
@@ -204,19 +371,49 @@
 static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data    *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+
+	camif_lite_node = th_payload->handler_priv;
+	camif_lite_priv = camif_lite_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_camif_lite_handle_irq_bottom_half(
-	void                                    *handler_priv,
-	void                                    *evt_payload_priv)
+	void                                 *handler_priv,
+	void                                 *evt_payload_priv)
 {
-	int                                      ret = CAM_VFE_IRQ_STATUS_ERR;
-	struct cam_isp_resource_node            *camif_lite_node;
-	struct cam_vfe_mux_camif_lite_data      *camif_lite_priv;
-	struct cam_vfe_top_irq_evt_payload      *payload;
-	uint32_t                                 irq_status0;
-	uint32_t                                 irq_status1;
+	int                                   ret = CAM_VFE_IRQ_STATUS_MAX;
+	struct cam_isp_resource_node         *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data   *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload *payload;
+	struct cam_isp_hw_event_info          evt_info;
+	uint32_t                              irq_status0;
+	uint32_t                              irq_status1;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP, "Invalid params");
@@ -225,55 +422,50 @@
 
 	camif_lite_node = handler_priv;
 	camif_lite_priv = camif_lite_node->res_priv;
-	payload         = evt_payload_priv;
-	irq_status0     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
-	irq_status1     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
+	payload = evt_payload_priv;
+	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
 
-	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
-	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+	evt_info.hw_idx   = camif_lite_node->hw_intf->hw_idx;
+	evt_info.res_id   = camif_lite_node->res_id;
+	evt_info.res_type = camif_lite_node->res_type;
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status0 &
-			camif_lite_priv->reg_data->lite_sof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received SOF");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status0 &
-			camif_lite_priv->reg_data->lite_epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_REG_UPDATE:
-		if (irq_status0 &
-			camif_lite_priv->reg_data->dual_pd_reg_upd_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status0 &
-			camif_lite_priv->reg_data->lite_eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF\n");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if (irq_status1 &
-			camif_lite_priv->reg_data->lite_error_irq_mask1) {
-			CAM_DBG(CAM_ISP, "Received ERROR\n");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
-		}
-		break;
-	default:
-		break;
+	CAM_DBG(CAM_ISP, "irq_status_0 = 0x%x irq_status_1 = 0x%x",
+		irq_status0, irq_status1);
+
+	if (irq_status0 & camif_lite_priv->reg_data->lite_sof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received SOF");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
+	if (irq_status0 & camif_lite_priv->reg_data->lite_epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_lite_priv->reg_data->dual_pd_reg_upd_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_lite_priv->reg_data->lite_eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF\n");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if ((irq_status0 & camif_lite_priv->reg_data->lite_err_irq_mask0) ||
+		(irq_status1 & camif_lite_priv->reg_data->lite_err_irq_mask1)) {
+		CAM_DBG(CAM_ISP, "Received ERROR\n");
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+	}
+
+	cam_vfe_camif_lite_put_evt_payload(camif_lite_priv, &payload);
+
 	CAM_DBG(CAM_ISP, "returning status = %d", ret);
 	return ret;
 }
@@ -282,11 +474,13 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_lite_hw_info,
-	struct cam_isp_resource_node  *camif_lite_node)
+	struct cam_isp_resource_node  *camif_lite_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv = NULL;
 	struct cam_vfe_camif_lite_ver2_hw_info   *camif_lite_info =
 		camif_lite_hw_info;
+	int                                       i = 0;
 
 	camif_lite_priv = kzalloc(sizeof(*camif_lite_priv),
 		GFP_KERNEL);
@@ -295,13 +489,14 @@
 
 	camif_lite_node->res_priv = camif_lite_priv;
 
-	camif_lite_priv->mem_base         =
+	camif_lite_priv->mem_base           =
 		soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
-	camif_lite_priv->camif_lite_reg   = camif_lite_info->camif_lite_reg;
-	camif_lite_priv->common_reg       = camif_lite_info->common_reg;
-	camif_lite_priv->reg_data         = camif_lite_info->reg_data;
-	camif_lite_priv->hw_intf          = hw_intf;
-	camif_lite_priv->soc_info         = soc_info;
+	camif_lite_priv->camif_lite_reg     = camif_lite_info->camif_lite_reg;
+	camif_lite_priv->common_reg         = camif_lite_info->common_reg;
+	camif_lite_priv->reg_data           = camif_lite_info->reg_data;
+	camif_lite_priv->hw_intf            = hw_intf;
+	camif_lite_priv->soc_info           = soc_info;
+	camif_lite_priv->vfe_irq_controller = vfe_irq_controller;
 
 	camif_lite_node->init    = NULL;
 	camif_lite_node->deinit  = NULL;
@@ -313,14 +508,27 @@
 	camif_lite_node->bottom_half_handler =
 		cam_vfe_camif_lite_handle_irq_bottom_half;
 
+	spin_lock_init(&camif_lite_priv->spin_lock);
+	INIT_LIST_HEAD(&camif_lite_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list);
+		list_add_tail(&camif_lite_priv->evt_payload[i].list,
+			&camif_lite_priv->free_payload_list);
+	}
+
 	return 0;
 }
 
 int cam_vfe_camif_lite_ver2_deinit(
 	struct cam_isp_resource_node  *camif_lite_node)
 {
-	struct cam_vfe_mux_camif_data *camif_lite_priv =
+	struct cam_vfe_mux_camif_lite_data *camif_lite_priv =
 		camif_lite_node->res_priv;
+	int                                 i = 0;
+
+	INIT_LIST_HEAD(&camif_lite_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++)
+		INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list);
 
 	camif_lite_node->start = NULL;
 	camif_lite_node->stop  = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
index 291e350..7813e55 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_CAMIF_LITE_VER2_H_
@@ -9,6 +9,8 @@
 #include "cam_isp_hw.h"
 #include "cam_vfe_top.h"
 
+#define CAM_VFE_CAMIF_LITE_EVT_MAX     256
+
 struct cam_vfe_camif_lite_ver2_reg {
 	uint32_t     camif_lite_cmd;
 	uint32_t     camif_lite_config;
@@ -25,8 +27,10 @@
 	uint32_t     lite_epoch0_irq_mask;
 	uint32_t     dual_pd_reg_upd_irq_mask;
 	uint32_t     lite_eof_irq_mask;
-	uint32_t     lite_error_irq_mask0;
-	uint32_t     lite_error_irq_mask1;
+	uint32_t     lite_err_irq_mask0;
+	uint32_t     lite_err_irq_mask1;
+	uint32_t     lite_subscribe_irq_mask0;
+	uint32_t     lite_subscribe_irq_mask1;
 	uint32_t     extern_reg_update_shift;
 	uint32_t     dual_pd_path_sel_shift;
 };
@@ -45,7 +49,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_lite_hw_info,
-	struct cam_isp_resource_node  *camif_lite_node);
+	struct cam_isp_resource_node  *camif_lite_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_camif_lite_ver2_deinit(
 	struct cam_isp_resource_node  *camif_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
index 67683cb..7dc1f83 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
@@ -12,6 +12,8 @@
 #include "cam_vfe_soc.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver3.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 #include "cam_vfe_camif_lite_ver3.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
@@ -24,8 +26,131 @@
 	struct cam_vfe_camif_lite_ver3_reg_data     *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
 	enum cam_isp_hw_sync_mode                    sync_mode;
+	struct cam_vfe_camif_common_cfg              cam_common_cfg;
+
+	cam_hw_mgr_event_cb_func                     event_cb;
+	void                                        *priv;
+	int                                          irq_err_handle;
+	int                                          irq_handle;
+	void                                        *vfe_irq_controller;
+	struct list_head                             free_payload_list;
+	spinlock_t                                   spin_lock;
+	struct cam_vfe_top_irq_evt_payload
+		evt_payload[CAM_VFE_CAMIF_LITE_EVT_MAX];
 };
 
+static int cam_vfe_camif_lite_get_evt_payload(
+	struct cam_vfe_mux_camif_lite_data     *camif_lite_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&camif_lite_priv->spin_lock);
+	if (list_empty(&camif_lite_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&camif_lite_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	rc = 0;
+done:
+	spin_unlock(&camif_lite_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_camif_lite_put_evt_payload(
+	struct cam_vfe_mux_camif_lite_data     *camif_lite_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	unsigned long flags;
+
+	if (!camif_lite_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&camif_lite_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list,
+		&camif_lite_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&camif_lite_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_camif_lite_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data    *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_2 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+	camif_lite_node = th_payload->handler_priv;
+	camif_lite_priv = camif_lite_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[2] || (th_payload->evt_status_arr[0] &
+		camif_lite_priv->reg_data->error_irq_mask0)) {
+		CAM_ERR(CAM_ISP,
+			"CAMIF Lite Err VFE:%d IRQ STATUS_0=0x%x STATUS_2=0x%x",
+			camif_lite_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[2]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%d",
+			camif_lite_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(
+			camif_lite_priv->vfe_irq_controller,
+			camif_lite_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			camif_lite_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_2=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[2]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(camif_lite_priv->mem_base +
+		camif_lite_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[i]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
+
 static int cam_vfe_camif_lite_get_reg_update(
 	struct cam_isp_resource_node          *camif_lite_res,
 	void                                  *cmd_args,
@@ -96,6 +221,8 @@
 	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
 
 	camif_lite_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+	camif_lite_data->event_cb    = acquire_data->event_cb;
+	camif_lite_data->priv        = acquire_data->priv;
 
 	CAM_DBG(CAM_ISP, "hw id:%d sync_mode=%d",
 		camif_lite_res->hw_intf->hw_idx,
@@ -108,6 +235,9 @@
 {
 	struct cam_vfe_mux_camif_lite_data   *rsrc_data;
 	uint32_t                              val = 0;
+	int                                   rc = 0;
+	uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
 
 	if (!camif_lite_res) {
 		CAM_ERR(CAM_ISP, "Invalid input arguments");
@@ -125,26 +255,36 @@
 	rsrc_data = (struct cam_vfe_mux_camif_lite_data *)
 		camif_lite_res->res_priv;
 
+	if (strnstr(rsrc_data->soc_info->compatible, "lite",
+		strlen(rsrc_data->soc_info->compatible)) != NULL)
+		goto skip_core_cfg;
+
 	/* vfe core config */
 	val = cam_io_r_mb(rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg_0);
 
-	if (camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB &&
-		camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_LCR &&
+	if (camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_LCR &&
 		rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
 		val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
 
+	if (camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB) {
+		val |= (1 << rsrc_data->reg_data->operating_mode_shift);
+		val |= (rsrc_data->cam_common_cfg.input_mux_sel_pdaf & 0x1) <<
+			CAM_SHIFT_TOP_CORE_CFG_MUXSEL_PDAF;
+	}
+
 	cam_io_w_mb(val, rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg_0);
 
 	CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d",
 		camif_lite_res->hw_intf->hw_idx, val);
 
-	/* epoch config with 20 line */
+	/* epoch config */
 	cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
 		rsrc_data->mem_base +
 		rsrc_data->camif_lite_reg->lite_epoch_irq);
 
+skip_core_cfg:
 	/* Enable Camif */
 	cam_io_w_mb(0x1,
 		rsrc_data->mem_base +
@@ -157,15 +297,57 @@
 		rsrc_data->mem_base +
 		rsrc_data->camif_lite_reg->reg_update_cmd);
 
-	CAM_DBG(CAM_ISP, "Start Camif Lite IFE %d Done",
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->error_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS2] =
+		rsrc_data->reg_data->error_irq_mask2;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->subscribe_irq_mask1;
+
+	if (!rsrc_data->irq_handle) {
+		rsrc_data->irq_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			irq_mask,
+			camif_lite_res,
+			camif_lite_res->top_half_handler,
+			camif_lite_res->bottom_half_handler,
+			camif_lite_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_handle < 1) {
+			CAM_ERR(CAM_ISP, "IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_handle = 0;
+		}
+	}
+
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			camif_lite_res,
+			cam_vfe_camif_lite_err_irq_top_half,
+			camif_lite_res->bottom_half_handler,
+			camif_lite_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Start CAMIF Lite VFE:%d Done",
 		camif_lite_res->hw_intf->hw_idx);
-	return 0;
+	return rc;
 }
 
 static int cam_vfe_camif_lite_resource_stop(
 	struct cam_isp_resource_node             *camif_lite_res)
 {
 	struct cam_vfe_mux_camif_lite_data       *rsrc_data;
+	int                                       rc = 0;
 
 	if (!camif_lite_res) {
 		CAM_ERR(CAM_ISP, "Invalid input arguments");
@@ -182,13 +364,59 @@
 		(struct cam_vfe_mux_camif_lite_data *)camif_lite_res->res_priv;
 
 	/* Disable Camif */
-	cam_io_w_mb(0x0,
-		rsrc_data->mem_base +
+	cam_io_w_mb(0x0, rsrc_data->mem_base +
 		rsrc_data->camif_lite_reg->lite_module_config);
 
 	if (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
+	if (rsrc_data->irq_handle > 0) {
+		cam_irq_controller_unsubscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			rsrc_data->irq_handle);
+		rsrc_data->irq_handle = 0;
+	}
+
+	if (rsrc_data->irq_err_handle > 0) {
+		cam_irq_controller_unsubscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			rsrc_data->irq_err_handle);
+		rsrc_data->irq_err_handle = 0;
+	}
+
+	return rc;
+}
+
+static int cam_vfe_camif_lite_ver3_core_config(
+	struct cam_isp_resource_node *rsrc_node, void *cmd_args)
+{
+	struct cam_vfe_mux_camif_lite_data *camif_lite_priv;
+	struct cam_vfe_core_config_args *vfe_core_cfg =
+		(struct cam_vfe_core_config_args *)cmd_args;
+
+	camif_lite_priv =
+		(struct cam_vfe_mux_camif_lite_data *)rsrc_node->res_priv;
+	camif_lite_priv->cam_common_cfg.vid_ds16_r2pd =
+		vfe_core_cfg->core_config.vid_ds16_r2pd;
+	camif_lite_priv->cam_common_cfg.vid_ds4_r2pd =
+		vfe_core_cfg->core_config.vid_ds4_r2pd;
+	camif_lite_priv->cam_common_cfg.disp_ds16_r2pd =
+		vfe_core_cfg->core_config.disp_ds16_r2pd;
+	camif_lite_priv->cam_common_cfg.disp_ds4_r2pd =
+		vfe_core_cfg->core_config.disp_ds4_r2pd;
+	camif_lite_priv->cam_common_cfg.dsp_streaming_tap_point =
+		vfe_core_cfg->core_config.dsp_streaming_tap_point;
+	camif_lite_priv->cam_common_cfg.ihist_src_sel =
+		vfe_core_cfg->core_config.ihist_src_sel;
+	camif_lite_priv->cam_common_cfg.hdr_be_src_sel =
+		vfe_core_cfg->core_config.hdr_be_src_sel;
+	camif_lite_priv->cam_common_cfg.hdr_bhist_src_sel =
+		vfe_core_cfg->core_config.hdr_bhist_src_sel;
+	camif_lite_priv->cam_common_cfg.input_mux_sel_pdaf =
+		vfe_core_cfg->core_config.input_mux_sel_pdaf;
+	camif_lite_priv->cam_common_cfg.input_mux_sel_pp =
+		vfe_core_cfg->core_config.input_mux_sel_pp;
+
 	return 0;
 }
 
@@ -208,6 +436,9 @@
 		rc = cam_vfe_camif_lite_get_reg_update(rsrc_node, cmd_args,
 			arg_size);
 		break;
+	case CAM_ISP_HW_CMD_CORE_CONFIG:
+		rc = cam_vfe_camif_lite_ver3_core_config(rsrc_node, cmd_args);
+		break;
 	default:
 		CAM_ERR(CAM_ISP,
 			"unsupported process command:%d", cmd_type);
@@ -217,23 +448,166 @@
 	return rc;
 }
 
+static void cam_vfe_camif_lite_print_status(uint32_t val,
+	uint32_t violation_status, int ret, bool is_ife_lite)
+{
+	uint32_t violation_mask = 0x3F00;
+
+	if (is_ife_lite) {
+
+		if (ret == CAM_VFE_IRQ_STATUS_OVERFLOW) {
+			if (val & 0x100)
+				CAM_INFO(CAM_ISP, "RDI3 FRAME DROP");
+
+			if (val & 0x80)
+				CAM_INFO(CAM_ISP, "RDI2 FRAME DROP");
+
+			if (val & 0x40)
+				CAM_INFO(CAM_ISP, "RDI1 FRAME DROP");
+
+			if (val & 0x20)
+				CAM_INFO(CAM_ISP, "RDI0 FRAME DROP");
+
+			if (val & 0x8)
+				CAM_INFO(CAM_ISP, "RDI3 OVERFLOW");
+
+			if (val & 0x4)
+				CAM_INFO(CAM_ISP, "RDI2 OVERFLOW");
+
+			if (val & 0x2)
+				CAM_INFO(CAM_ISP, "RDI1 OVERFLOW");
+
+			if (val & 0x1)
+				CAM_INFO(CAM_ISP, "RDI0 OVERFLOW");
+		}
+
+		if (ret == CAM_VFE_IRQ_STATUS_VIOLATION) {
+
+			if (val & 0x800)
+				CAM_INFO(CAM_ISP, "RDI3 CAMIF VIOLATION");
+
+			if (val & 0x400)
+				CAM_INFO(CAM_ISP, "RDI2 CAMIF VIOLATION");
+
+			if (val & 0x200)
+				CAM_INFO(CAM_ISP, "RDI1 CAMIF VIOLATION");
+
+			if (val & 0x100)
+				CAM_INFO(CAM_ISP, "RDI0 CAMIF VIOLATION");
+		}
+	} else {
+
+		if (ret == CAM_VFE_IRQ_STATUS_OVERFLOW) {
+			if (val & 0x200000)
+				CAM_INFO(CAM_ISP, "RDI2 FRAME DROP");
+
+			if (val & 0x400000)
+				CAM_INFO(CAM_ISP, "RDI1 FRAME DROP");
+
+			if (val & 0x800000)
+				CAM_INFO(CAM_ISP, "RDI0 FRAME DROP");
+
+			if (val & 0x1000000)
+				CAM_INFO(CAM_ISP, "PD PIPE FRAME DROP");
+
+			if (val & 0x8000000)
+				CAM_INFO(CAM_ISP, "RDI2 OVERFLOW");
+
+			if (val & 0x10000000)
+				CAM_INFO(CAM_ISP, "RDI1 OVERFLOW");
+
+			if (val & 0x20000000)
+				CAM_INFO(CAM_ISP, "RDI0 OVERFLOW");
+
+			if (val & 0x40000000)
+				CAM_INFO(CAM_ISP, "PD PIPE OVERFLOW");
+		}
+
+		if (ret == CAM_VFE_IRQ_STATUS_VIOLATION) {
+			if (val & 0x02000)
+				CAM_INFO(CAM_ISP, "PD CAMIF VIOLATION");
+
+			if (val & 0x04000)
+				CAM_INFO(CAM_ISP, "PD VIOLATION");
+
+			if (val & 0x08000)
+				CAM_INFO(CAM_ISP, "LCR CAMIF VIOLATION");
+
+			if (val & 0x010000)
+				CAM_INFO(CAM_ISP, "LCR VIOLATION");
+
+			if (val & 0x020000)
+				CAM_INFO(CAM_ISP, "RDI0 CAMIF VIOLATION");
+
+			if (val & 0x040000)
+				CAM_INFO(CAM_ISP, "RDI1 CAMIF VIOLATION");
+
+			if (val & 0x080000)
+				CAM_INFO(CAM_ISP, "RDI2 CAMIF VIOLATION");
+		}
+
+		if (violation_mask & violation_status)
+			CAM_INFO(CAM_ISP, "LCR VIOLATION, module = %d",
+				violation_mask & violation_status);
+
+		violation_mask = 0x0F0000;
+		if (violation_mask & violation_status)
+			CAM_INFO(CAM_ISP, "PD Violation, module = %d",
+				violation_mask & violation_status);
+	}
+}
+
 static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data    *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+
+	camif_lite_node = th_payload->handler_priv;
+	camif_lite_priv = camif_lite_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[2]);
+
+	rc  = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0: 0x%x status_1: 0x%x status_2: 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_camif_lite_handle_irq_bottom_half(
 	void                                    *handler_priv,
 	void                                    *evt_payload_priv)
 {
-	int                                      ret = CAM_VFE_IRQ_STATUS_ERR;
-	struct cam_isp_resource_node            *camif_lite_node;
-	struct cam_vfe_mux_camif_lite_data      *camif_lite_priv;
-	struct cam_vfe_top_irq_evt_payload      *payload;
-	uint32_t                                 irq_status0;
-	uint32_t                                 irq_status1;
-	uint32_t                                 irq_status2;
+	int ret = CAM_VFE_IRQ_STATUS_MAX;
+	struct cam_isp_resource_node *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload *payload;
+	struct cam_isp_hw_event_info evt_info;
+	uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX];
+	int i = 0;
+	bool is_ife_lite = true;
+	uint32_t val = 0;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP, "Invalid params");
@@ -243,52 +617,129 @@
 	camif_lite_node = handler_priv;
 	camif_lite_priv = camif_lite_node->res_priv;
 	payload         = evt_payload_priv;
-	irq_status0     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
-	irq_status1     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
-	irq_status2     = payload->irq_reg_val[CAM_IFE_IRQ_VIOLATION_STATUS];
 
-	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+	for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++)
+		irq_status[i] = payload->irq_reg_val[i];
+
+	evt_info.hw_idx   = camif_lite_node->hw_intf->hw_idx;
+	evt_info.res_id   = camif_lite_node->res_id;
+	evt_info.res_type = camif_lite_node->res_type;
+
 	CAM_DBG(CAM_ISP,
-		"irq_status_0 = 0x%x, irq_status_0 = 0x%x, irq_status_0 = 0x%x",
-		irq_status0, irq_status1, irq_status2);
+		"irq_status_0 = 0x%x, irq_status_1 = 0x%x, irq_status_2 = 0x%x",
+		irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0],
+		irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1],
+		irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]);
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status1 &
-			camif_lite_priv->reg_data->sof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received SOF");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status1 &
-			camif_lite_priv->reg_data->epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status1 &
-			camif_lite_priv->reg_data->eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF\n");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if ((irq_status0 &
-			camif_lite_priv->reg_data->error_irq_mask0) ||
-			(irq_status2 &
-			camif_lite_priv->reg_data->error_irq_mask2)) {
-			CAM_ERR(CAM_ISP, "Received ERROR\n");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
-		}
-		break;
-	default:
-		break;
+	if (strnstr(camif_lite_priv->soc_info->compatible, "lite",
+		strlen(camif_lite_priv->soc_info->compatible)) == NULL)
+		is_ife_lite = false;
+
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_lite_priv->reg_data->sof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received SOF");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
 	}
 
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_lite_priv->reg_data->epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
+	}
+
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_lite_priv->reg_data->eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF\n");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
+	}
+
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
+		& camif_lite_priv->reg_data->error_irq_mask0) {
+		CAM_DBG(CAM_ISP, "Received VFE Overflow ERROR\n");
+
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		val = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->bus_overflow_status);
+
+		if (is_ife_lite && val) {
+
+			if (val & 0x01)
+				CAM_INFO(CAM_ISP,
+					"RDI0 bus overflow");
+
+			if (val & 0x02)
+				CAM_INFO(CAM_ISP,
+					"RDI1 bus overflow");
+
+			if (val & 0x04)
+				CAM_INFO(CAM_ISP,
+					"RDI2 bus overflow");
+
+			if (val & 0x08)
+				CAM_INFO(CAM_ISP,
+					"RDI3 bus overflow");
+		}
+
+		if (!is_ife_lite && val) {
+
+			if (val & 0x0800)
+				CAM_INFO(CAM_ISP, "CAMIF PD bus overflow");
+
+			if (val & 0x0400000)
+				CAM_INFO(CAM_ISP, "LCR bus overflow");
+
+			if (val & 0x0800000)
+				CAM_INFO(CAM_ISP, "RDI0 bus overflow");
+
+			if (val & 0x01000000)
+				CAM_INFO(CAM_ISP, "RDI1 bus overflow");
+
+			if (val & 0x02000000)
+				CAM_INFO(CAM_ISP, "RDI2 bus overflow");
+		}
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_camif_lite_print_status(
+			irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0],
+			irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret,
+			is_ife_lite);
+	}
+
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]) {
+		CAM_DBG(CAM_ISP, "Received CAMIF Lite Violation ERROR\n");
+
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_VIOLATION;
+		cam_vfe_camif_lite_print_status(
+			irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2],
+			irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret,
+			is_ife_lite);
+	}
+
+	cam_vfe_camif_lite_put_evt_payload(camif_lite_priv, &payload);
+
 	CAM_DBG(CAM_ISP, "returning status = %d", ret);
 	return ret;
 }
@@ -297,12 +748,13 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_lite_hw_info,
-	struct cam_isp_resource_node  *camif_lite_node)
+	struct cam_isp_resource_node  *camif_lite_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv = NULL;
 	struct cam_vfe_camif_lite_ver3_hw_info   *camif_lite_info =
 		camif_lite_hw_info;
-
+	int                                       i = 0;
 	CAM_DBG(CAM_ISP, "res id %d", camif_lite_node->res_id);
 
 	camif_lite_priv = kzalloc(sizeof(*camif_lite_priv),
@@ -319,6 +771,7 @@
 	camif_lite_priv->reg_data         = camif_lite_info->reg_data;
 	camif_lite_priv->hw_intf          = hw_intf;
 	camif_lite_priv->soc_info         = soc_info;
+	camif_lite_priv->vfe_irq_controller = vfe_irq_controller;
 
 	camif_lite_node->init    = NULL;
 	camif_lite_node->deinit  = NULL;
@@ -330,14 +783,27 @@
 	camif_lite_node->bottom_half_handler =
 		cam_vfe_camif_lite_handle_irq_bottom_half;
 
+	spin_lock_init(&camif_lite_priv->spin_lock);
+	INIT_LIST_HEAD(&camif_lite_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list);
+		list_add_tail(&camif_lite_priv->evt_payload[i].list,
+			&camif_lite_priv->free_payload_list);
+	}
+
 	return 0;
 }
 
 int cam_vfe_camif_lite_ver3_deinit(
 	struct cam_isp_resource_node  *camif_lite_node)
 {
-	struct cam_vfe_mux_camif_data *camif_lite_priv =
+	struct cam_vfe_mux_camif_lite_data *camif_lite_priv =
 		camif_lite_node->res_priv;
+	int                                 i = 0;
+
+	INIT_LIST_HEAD(&camif_lite_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++)
+		INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list);
 
 	camif_lite_node->start = NULL;
 	camif_lite_node->stop  = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
index 45aaa11..ad8e44e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
@@ -10,6 +10,7 @@
 #include "cam_vfe_top.h"
 
 #define CAM_VFE_RDI_VER2_MAX 4
+#define CAM_VFE_CAMIF_LITE_EVT_MAX 256
 
 struct cam_vfe_camif_lite_ver3_reg {
 	uint32_t     lite_hw_version;
@@ -27,6 +28,8 @@
 
 struct cam_vfe_camif_lite_ver3_reg_data {
 	uint32_t     extern_reg_update_shift;
+	uint32_t     operating_mode_shift;
+	uint32_t     input_mux_sel_shift;
 	uint32_t     reg_update_cmd_data;
 	uint32_t     epoch_line_cfg;
 	uint32_t     sof_irq_mask;
@@ -35,6 +38,7 @@
 	uint32_t     eof_irq_mask;
 	uint32_t     error_irq_mask0;
 	uint32_t     error_irq_mask2;
+	uint32_t     subscribe_irq_mask1;
 	uint32_t     enable_diagnostic_hw;
 };
 
@@ -52,7 +56,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_lite_hw_info,
-	struct cam_isp_resource_node  *camif_lite_node);
+	struct cam_isp_resource_node  *camif_lite_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_camif_lite_ver3_deinit(
 	struct cam_isp_resource_node  *camif_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 149d45e..27ea3af 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -12,6 +12,8 @@
 #include "cam_vfe_soc.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 #include "cam_vfe_camif_ver2.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
@@ -27,6 +29,15 @@
 	struct cam_vfe_camif_reg_data               *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
 
+	cam_hw_mgr_event_cb_func             event_cb;
+	void                                *priv;
+	int                                  irq_err_handle;
+	int                                  irq_handle;
+	void                                *vfe_irq_controller;
+	struct cam_vfe_top_irq_evt_payload evt_payload[CAM_VFE_CAMIF_EVT_MAX];
+	struct list_head                     free_payload_list;
+	spinlock_t                           spin_lock;
+
 	enum cam_isp_hw_sync_mode          sync_mode;
 	uint32_t                           dsp_mode;
 	uint32_t                           pix_pattern;
@@ -39,6 +50,114 @@
 	uint32_t                           camif_debug;
 };
 
+static int cam_vfe_camif_get_evt_payload(
+	struct cam_vfe_mux_camif_data            *camif_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&camif_priv->spin_lock);
+	if (list_empty(&camif_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&camif_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+done:
+	spin_unlock(&camif_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_camif_put_evt_payload(
+	struct cam_vfe_mux_camif_data            *camif_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	unsigned long flags;
+
+	if (!camif_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&camif_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list, &camif_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&camif_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_camif_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_node;
+	struct cam_vfe_mux_camif_data         *camif_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+	camif_node = th_payload->handler_priv;
+	camif_priv = camif_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[1] || (th_payload->evt_status_arr[0] &
+		camif_priv->reg_data->error_irq_mask0)) {
+		CAM_ERR(CAM_ISP,
+			"Camif Error: vfe:%d: IRQ STATUS_0=0x%x STATUS_1=0x%x",
+			camif_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from vfe=%d",
+			camif_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(camif_priv->vfe_irq_controller,
+			camif_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			camif_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_camif_get_evt_payload(camif_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(camif_priv->mem_base +
+		camif_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[2]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
 static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern)
 {
 	int rc;
@@ -135,6 +254,8 @@
 	camif_data->last_pixel  = acquire_data->vfe_in.in_port->left_stop;
 	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
 	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
+	camif_data->event_cb    = acquire_data->event_cb;
+	camif_data->priv        = acquire_data->priv;
 
 	CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
 		camif_res->hw_intf->hw_idx,
@@ -194,18 +315,20 @@
 	}
 
 	return rc;
-
 }
 
 static int cam_vfe_camif_resource_start(
-	struct cam_isp_resource_node        *camif_res)
+	struct cam_isp_resource_node   *camif_res)
 {
-	struct cam_vfe_mux_camif_data       *rsrc_data;
-	uint32_t                             val = 0;
-	uint32_t                             epoch0_irq_mask;
-	uint32_t                             epoch1_irq_mask;
-	uint32_t                             computed_epoch_line_cfg;
-	struct cam_vfe_soc_private          *soc_private;
+	struct cam_vfe_mux_camif_data  *rsrc_data;
+	uint32_t                        val = 0;
+	uint32_t                        epoch0_irq_mask;
+	uint32_t                        epoch1_irq_mask;
+	uint32_t                        computed_epoch_line_cfg;
+	int                             rc = 0;
+	uint32_t                        err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t                        irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	struct cam_vfe_soc_private     *soc_private;
 
 	if (!camif_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -219,6 +342,14 @@
 	}
 
 	rsrc_data = (struct cam_vfe_mux_camif_data  *)camif_res->res_priv;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->error_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->error_irq_mask1;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->subscribe_irq_mask0;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->subscribe_irq_mask1;
 
 	soc_private = rsrc_data->soc_info->soc_private;
 
@@ -302,6 +433,40 @@
 			rsrc_data->camif_reg->vfe_diag_config);
 	}
 
+	if (!rsrc_data->irq_handle) {
+		rsrc_data->irq_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			irq_mask,
+			camif_res,
+			camif_res->top_half_handler,
+			camif_res->bottom_half_handler,
+			camif_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_handle < 1) {
+			CAM_ERR(CAM_ISP, "IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_handle = 0;
+		}
+	}
+
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			camif_res,
+			cam_vfe_camif_err_irq_top_half,
+			camif_res->bottom_half_handler,
+			camif_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
 	CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
 	return 0;
 }
@@ -455,6 +620,19 @@
 			camif_priv->camif_reg->vfe_diag_config);
 	}
 
+	if (camif_priv->irq_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_priv->vfe_irq_controller, camif_priv->irq_handle);
+		camif_priv->irq_handle = 0;
+	}
+
+	if (camif_priv->irq_err_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_priv->vfe_irq_controller,
+			camif_priv->irq_err_handle);
+		camif_priv->irq_err_handle = 0;
+	}
+
 	return rc;
 }
 
@@ -514,16 +692,46 @@
 static int cam_vfe_camif_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_node;
+	struct cam_vfe_mux_camif_data         *camif_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+
+	camif_node = th_payload->handler_priv;
+	camif_priv = camif_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_camif_get_evt_payload(camif_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
 	void *evt_payload_priv)
 {
-	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
+	int                                   ret = CAM_VFE_IRQ_STATUS_MAX;
 	struct cam_isp_resource_node         *camif_node;
 	struct cam_vfe_mux_camif_data        *camif_priv;
-	struct cam_vfe_top_irq_evt_payload   *payload;
+	struct cam_vfe_top_irq_evt_payload *payload;
+	struct cam_isp_hw_event_info          evt_info;
 	uint32_t                              irq_status0;
 	uint32_t                              irq_status1;
 	uint32_t                              val;
@@ -539,70 +747,104 @@
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
 
-	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
-	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+	evt_info.hw_idx   = camif_node->hw_intf->hw_idx;
+	evt_info.res_id   = camif_node->res_id;
+	evt_info.res_type = camif_node->res_type;
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
-			if ((camif_priv->enable_sof_irq_debug) &&
-				(camif_priv->irq_debug_cnt <=
-				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
-				CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+	CAM_DBG(CAM_ISP, "irq_status_0 = 0x%x irq_status_1 = 0x%x",
+		irq_status0, irq_status1);
 
-				camif_priv->irq_debug_cnt++;
-				if (camif_priv->irq_debug_cnt ==
-					CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
-					camif_priv->enable_sof_irq_debug =
-						false;
-					camif_priv->irq_debug_cnt = 0;
-				}
-			} else {
-				CAM_DBG(CAM_ISP, "Received SOF");
+	if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
+		if ((camif_priv->enable_sof_irq_debug) &&
+			(camif_priv->irq_debug_cnt <=
+			CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+
+			camif_priv->irq_debug_cnt++;
+			if (camif_priv->irq_debug_cnt ==
+				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+				camif_priv->enable_sof_irq_debug =
+					false;
+				camif_priv->irq_debug_cnt = 0;
 			}
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_REG_UPDATE:
-		if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status0 & camif_priv->reg_data->eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF\n");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
-			CAM_DBG(CAM_ISP, "Received ERROR\n");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-			cam_vfe_camif_reg_dump(camif_node->res_priv);
-		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
-		}
+		} else
+			CAM_DBG(CAM_ISP, "Received SOF");
 
-		if (camif_priv->camif_debug &
-			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
-			val = cam_io_r(camif_priv->mem_base +
-				camif_priv->camif_reg->vfe_diag_sensor_status);
-			CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
-				camif_priv->mem_base, val);
-		}
-		break;
-	default:
-		break;
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
-	CAM_DBG(CAM_ISP, "returing status = %d", ret);
+	if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_priv->reg_data->eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF\n");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_priv->reg_data->error_irq_mask0) {
+		CAM_DBG(CAM_ISP, "Received ERROR\n");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		CAM_INFO(CAM_ISP, "Violation status = %x",
+			payload->irq_reg_val[2]);
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_camif_reg_dump(camif_node->res_priv);
+	}
+
+	if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
+		CAM_DBG(CAM_ISP, "Received ERROR\n");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		CAM_INFO(CAM_ISP, "Violation status = %x",
+			payload->irq_reg_val[2]);
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_camif_reg_dump(camif_node->res_priv);
+	}
+
+	if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+		val = cam_io_r(camif_priv->mem_base +
+			camif_priv->camif_reg->vfe_diag_sensor_status);
+		CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
+			camif_priv->mem_base, val);
+	}
+
+	cam_vfe_camif_put_evt_payload(camif_priv, &payload);
+
+	CAM_DBG(CAM_ISP, "returning status = %d", ret);
 	return ret;
 }
 
@@ -610,10 +852,12 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_hw_info,
-	struct cam_isp_resource_node  *camif_node)
+	struct cam_isp_resource_node  *camif_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_camif_data     *camif_priv = NULL;
 	struct cam_vfe_camif_ver2_hw_info *camif_info = camif_hw_info;
+	int                                i = 0;
 
 	camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_data),
 		GFP_KERNEL);
@@ -630,6 +874,7 @@
 	camif_priv->reg_data    = camif_info->reg_data;
 	camif_priv->hw_intf     = hw_intf;
 	camif_priv->soc_info    = soc_info;
+	camif_priv->vfe_irq_controller = vfe_irq_controller;
 
 	camif_node->init    = cam_vfe_camif_resource_init;
 	camif_node->deinit  = cam_vfe_camif_resource_deinit;
@@ -639,6 +884,14 @@
 	camif_node->top_half_handler = cam_vfe_camif_handle_irq_top_half;
 	camif_node->bottom_half_handler = cam_vfe_camif_handle_irq_bottom_half;
 
+	spin_lock_init(&camif_priv->spin_lock);
+	INIT_LIST_HEAD(&camif_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&camif_priv->evt_payload[i].list);
+		list_add_tail(&camif_priv->evt_payload[i].list,
+			&camif_priv->free_payload_list);
+	}
+
 	return 0;
 }
 
@@ -646,6 +899,11 @@
 	struct cam_isp_resource_node  *camif_node)
 {
 	struct cam_vfe_mux_camif_data *camif_priv = camif_node->res_priv;
+	int                            i = 0;
+
+	INIT_LIST_HEAD(&camif_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++)
+		INIT_LIST_HEAD(&camif_priv->evt_payload[i].list);
 
 	camif_node->start = NULL;
 	camif_node->stop  = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
index 2927f35..e1cbc94 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_CAMIF_VER2_H_
@@ -14,6 +14,8 @@
  */
 #define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS      BIT(0)
 
+#define CAM_VFE_CAMIF_EVT_MAX                      256
+
 struct cam_vfe_camif_ver2_reg {
 	uint32_t     camif_cmd;
 	uint32_t     camif_config;
@@ -63,6 +65,8 @@
 	uint32_t     eof_irq_mask;
 	uint32_t     error_irq_mask0;
 	uint32_t     error_irq_mask1;
+	uint32_t     subscribe_irq_mask0;
+	uint32_t     subscribe_irq_mask1;
 
 	uint32_t     enable_diagnostic_hw;
 };
@@ -81,7 +85,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_hw_info,
-	struct cam_isp_resource_node  *camif_node);
+	struct cam_isp_resource_node  *camif_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_camif_ver2_deinit(
 	struct cam_isp_resource_node  *camif_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
index a14f4df..058482b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
@@ -12,13 +12,14 @@
 #include "cam_vfe_soc.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver3.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 #include "cam_vfe_camif_ver3.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
 #include "cam_cpas_api.h"
 
 #define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2
-#define CAM_VFE_CAMIF_VER3_CORE_CFG_0_DEFAULT 0x78002800
 
 struct cam_vfe_mux_camif_ver3_data {
 	void __iomem                                *mem_base;
@@ -27,6 +28,16 @@
 	struct cam_vfe_top_ver3_reg_offset_common   *common_reg;
 	struct cam_vfe_camif_ver3_reg_data          *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
+	struct cam_vfe_camif_common_cfg             cam_common_cfg;
+
+	cam_hw_mgr_event_cb_func             event_cb;
+	void                                *priv;
+	int                                  irq_err_handle;
+	int                                  irq_handle;
+	void                                *vfe_irq_controller;
+	struct cam_vfe_top_irq_evt_payload   evt_payload[CAM_VFE_CAMIF_EVT_MAX];
+	struct list_head                     free_payload_list;
+	spinlock_t                           spin_lock;
 
 	enum cam_isp_hw_sync_mode          sync_mode;
 	uint32_t                           dsp_mode;
@@ -40,6 +51,114 @@
 	uint32_t                           camif_debug;
 };
 
+static int cam_vfe_camif_ver3_get_evt_payload(
+	struct cam_vfe_mux_camif_ver3_data     *camif_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&camif_priv->spin_lock);
+	if (list_empty(&camif_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&camif_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+done:
+	spin_unlock(&camif_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_camif_ver3_put_evt_payload(
+	struct cam_vfe_mux_camif_ver3_data     *camif_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	unsigned long flags;
+
+	if (!camif_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&camif_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list, &camif_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&camif_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_camif_ver3_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_node;
+	struct cam_vfe_mux_camif_ver3_data    *camif_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_2 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[2]);
+
+	camif_node = th_payload->handler_priv;
+	camif_priv = camif_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[2] || (th_payload->evt_status_arr[0] &
+		camif_priv->reg_data->error_irq_mask0)) {
+		CAM_ERR(CAM_ISP,
+			"CAMIF Err VFE:%d: IRQ STATUS_0=0x%x STATUS_2=0x%x",
+			camif_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[2]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%d",
+			camif_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(camif_priv->vfe_irq_controller,
+			camif_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			camif_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_camif_ver3_get_evt_payload(camif_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_2=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[2]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(camif_priv->mem_base +
+		camif_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[i]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
 static int cam_vfe_camif_ver3_validate_pix_pattern(uint32_t pattern)
 {
 	int rc;
@@ -74,7 +193,7 @@
 	struct cam_vfe_mux_camif_ver3_data *rsrc_data = NULL;
 
 	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
-		CAM_ERR(CAM_ISP, "Invalid arg size: %d expected:%d",
+		CAM_ERR(CAM_ISP, "Invalid arg size: %d expected:%ld",
 			arg_size, sizeof(struct cam_isp_hw_get_cmd_update));
 		return -EINVAL;
 	}
@@ -140,6 +259,8 @@
 	camif_data->last_pixel  = acquire_data->vfe_in.in_port->left_stop;
 	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
 	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
+	camif_data->event_cb    = acquire_data->event_cb;
+	camif_data->priv        = acquire_data->priv;
 
 	CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
 		camif_res->hw_intf->hw_idx,
@@ -227,6 +348,9 @@
 	uint32_t                             epoch0_line_cfg;
 	uint32_t                             epoch1_line_cfg;
 	uint32_t                             computed_epoch_line_cfg;
+	int                                  rc = 0;
+	uint32_t                        err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t                        irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
 	struct cam_vfe_soc_private          *soc_private;
 
 	if (!camif_res) {
@@ -240,7 +364,16 @@
 		return -EINVAL;
 	}
 
+	memset(err_irq_mask, 0, sizeof(err_irq_mask));
+	memset(irq_mask, 0, sizeof(irq_mask));
+
 	rsrc_data = (struct cam_vfe_mux_camif_ver3_data *)camif_res->res_priv;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->error_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS2] =
+		rsrc_data->reg_data->error_irq_mask2;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->subscribe_irq_mask1;
 
 	soc_private = rsrc_data->soc_info->soc_private;
 
@@ -261,11 +394,6 @@
 	val = cam_io_r_mb(rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg_0);
 
-	/* Programming to default value must be removed once uapis have been
-	 * updated to receive this programming from userspace.
-	 */
-	val |= CAM_VFE_CAMIF_VER3_CORE_CFG_0_DEFAULT;
-
 	/* AF stitching by hw disabled by default
 	 * PP CAMIF currently operates only in offline mode
 	 */
@@ -286,6 +414,25 @@
 		(rsrc_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
 		val |= (1 << rsrc_data->reg_data->dual_ife_pix_en_shift);
 
+	val |= (~rsrc_data->cam_common_cfg.vid_ds16_r2pd & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_VID_DS16_R2PD;
+	val |= (~rsrc_data->cam_common_cfg.vid_ds4_r2pd & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_VID_DS4_R2PD;
+	val |= (~rsrc_data->cam_common_cfg.disp_ds16_r2pd & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_DISP_DS16_R2PD;
+	val |= (~rsrc_data->cam_common_cfg.disp_ds4_r2pd & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_DISP_DS4_R2PD;
+	val |= (rsrc_data->cam_common_cfg.dsp_streaming_tap_point & 0x3) <<
+		CAM_SHIFT_TOP_CORE_CFG_DSP_STREAMING;
+	val |= (rsrc_data->cam_common_cfg.ihist_src_sel & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_STATS_IHIST;
+	val |= (rsrc_data->cam_common_cfg.hdr_be_src_sel & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BE;
+	val |= (rsrc_data->cam_common_cfg.hdr_bhist_src_sel & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BHIST;
+	val |= (rsrc_data->cam_common_cfg.input_mux_sel_pp & 0x3) <<
+		CAM_SHIFT_TOP_CORE_CFG_INPUTMUX_PP;
+
 	cam_io_w_mb(val, rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg_0);
 
@@ -337,6 +484,41 @@
 			rsrc_data->common_reg->diag_config);
 	}
 
+	if (!rsrc_data->irq_handle) {
+		rsrc_data->irq_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			irq_mask,
+			camif_res,
+			camif_res->top_half_handler,
+			camif_res->bottom_half_handler,
+			camif_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_handle < 1) {
+			CAM_ERR(CAM_ISP, "IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_handle = 0;
+		}
+	}
+
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			camif_res,
+			cam_vfe_camif_ver3_err_irq_top_half,
+			camif_res->bottom_half_handler,
+			camif_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
 	return 0;
 }
 
@@ -365,7 +547,10 @@
 			wm_idx, offset,
 			cam_io_r_mb(camif_priv->mem_base + offset),
 			offset + 4, cam_io_r_mb(camif_priv->mem_base +
-			offset + 4), offset + 8,
+			offset + 4));
+		CAM_INFO(CAM_ISP,
+			"BUS_WM%u offset 0x%x val 0x%x offset 0x%x val 0x%x",
+			wm_idx, offset + 8,
 			cam_io_r_mb(camif_priv->mem_base + offset + 8),
 			offset + 12, cam_io_r_mb(camif_priv->mem_base +
 			offset + 12));
@@ -489,9 +674,55 @@
 			camif_priv->common_reg->diag_config);
 	}
 
+	if (camif_priv->irq_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_priv->vfe_irq_controller, camif_priv->irq_handle);
+		camif_priv->irq_handle = 0;
+	}
+
+	if (camif_priv->irq_err_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_priv->vfe_irq_controller,
+			camif_priv->irq_err_handle);
+		camif_priv->irq_err_handle = 0;
+	}
+
 	return rc;
 }
 
+static int cam_vfe_camif_ver3_core_config(
+	struct cam_isp_resource_node *rsrc_node, void *cmd_args)
+{
+	struct cam_vfe_mux_camif_ver3_data *camif_priv;
+	struct cam_vfe_core_config_args *vfe_core_cfg =
+		(struct cam_vfe_core_config_args *)cmd_args;
+
+	camif_priv =
+		(struct cam_vfe_mux_camif_ver3_data *)rsrc_node->res_priv;
+	camif_priv->cam_common_cfg.vid_ds16_r2pd =
+		vfe_core_cfg->core_config.vid_ds16_r2pd;
+	camif_priv->cam_common_cfg.vid_ds4_r2pd =
+		vfe_core_cfg->core_config.vid_ds4_r2pd;
+	camif_priv->cam_common_cfg.disp_ds16_r2pd =
+		vfe_core_cfg->core_config.disp_ds16_r2pd;
+	camif_priv->cam_common_cfg.disp_ds4_r2pd =
+		vfe_core_cfg->core_config.disp_ds4_r2pd;
+	camif_priv->cam_common_cfg.dsp_streaming_tap_point =
+		vfe_core_cfg->core_config.dsp_streaming_tap_point;
+	camif_priv->cam_common_cfg.ihist_src_sel =
+		vfe_core_cfg->core_config.ihist_src_sel;
+	camif_priv->cam_common_cfg.hdr_be_src_sel =
+		vfe_core_cfg->core_config.hdr_be_src_sel;
+	camif_priv->cam_common_cfg.hdr_bhist_src_sel =
+		vfe_core_cfg->core_config.hdr_bhist_src_sel;
+	camif_priv->cam_common_cfg.input_mux_sel_pdaf =
+		vfe_core_cfg->core_config.input_mux_sel_pdaf;
+	camif_priv->cam_common_cfg.input_mux_sel_pp =
+		vfe_core_cfg->core_config.input_mux_sel_pp;
+
+	return 0;
+}
+
 static int cam_vfe_camif_ver3_sof_irq_debug(
 	struct cam_isp_resource_node *rsrc_node, void *cmd_args)
 {
@@ -534,6 +765,9 @@
 	case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG:
 		rc = cam_vfe_camif_ver3_sof_irq_debug(rsrc_node, cmd_args);
 		break;
+	case CAM_ISP_HW_CMD_CORE_CONFIG:
+		rc = cam_vfe_camif_ver3_core_config(rsrc_node, cmd_args);
+		break;
 	case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG:
 		camif_priv = (struct cam_vfe_mux_camif_ver3_data *)
 			rsrc_node->res_priv;
@@ -548,23 +782,311 @@
 	return rc;
 }
 
+static void cam_vfe_camif_ver3_print_status(uint32_t val,
+	uint32_t violation_status, int ret)
+{
+	uint32_t violation_mask = 0x3F;
+	uint32_t module_id;
+
+	if (ret == CAM_VFE_IRQ_STATUS_OVERFLOW) {
+		if (val & 0x0200)
+			CAM_INFO(CAM_ISP, "DSP OVERFLOW");
+
+		if (val & 0x2000000)
+			CAM_INFO(CAM_ISP, "PIXEL PIPE FRAME DROP");
+
+		if (val & 0x80000000)
+			CAM_INFO(CAM_ISP, "PIXEL PIPE OVERFLOW");
+	}
+
+	if (ret == CAM_VFE_IRQ_STATUS_VIOLATION) {
+
+		if (val & 0x080)
+			CAM_INFO(CAM_ISP, "DSP IFE PROTOCOL VIOLATION");
+
+		if (val & 0x0100)
+			CAM_INFO(CAM_ISP, "IFE DSP TX PROTOCOL VIOLATION");
+
+		if (val & 0x0200)
+			CAM_INFO(CAM_ISP, "DSP IFE RX PROTOCOL VIOLATION");
+
+		if (val & 0x0400)
+			CAM_INFO(CAM_ISP, "PP PREPROCESS VIOLATION");
+
+		if (val & 0x0800)
+			CAM_INFO(CAM_ISP, "PP CAMIF VIOLATION");
+
+		if (val & 0x01000)
+			CAM_INFO(CAM_ISP, "PP VIOLATION");
+
+		if (val & 0x0100000)
+			CAM_INFO(CAM_ISP,
+				"DSP_TX_VIOLATION:overflow on DSP interface TX path FIFO");
+
+		if (val & 0x0200000)
+			CAM_INFO(CAM_ISP,
+			"DSP_RX_VIOLATION:overflow on DSP interface RX path FIFO");
+
+		if (val & 0x10000000)
+			CAM_INFO(CAM_ISP, "DSP ERROR VIOLATION");
+
+		if (val & 0x20000000)
+			CAM_INFO(CAM_ISP,
+				"DIAG VIOLATION: HBI is less than the minimum required HBI");
+	}
+
+	if (violation_mask & violation_status) {
+		CAM_INFO(CAM_ISP, "PP VIOLATION, module = %d",
+			violation_mask & violation_status);
+		module_id = violation_mask & violation_status;
+		switch (module_id) {
+		case 0:
+			CAM_INFO(CAM_ISP, "Demux");
+			break;
+		case 1:
+			CAM_INFO(CAM_ISP,
+				"CHROMA_UP");
+			break;
+		case 2:
+			CAM_INFO(CAM_ISP,
+				"PEDESTAL");
+			break;
+		case 3:
+			CAM_INFO(CAM_ISP,
+				"LINEARIZATION");
+			break;
+		case 4:
+			CAM_INFO(CAM_ISP,
+				"BPC_PDPC");
+			break;
+		case 5:
+			CAM_INFO(CAM_ISP,
+				"HDR_BINCORRECT");
+			break;
+		case 6:
+			CAM_INFO(CAM_ISP, "ABF");
+			break;
+		case 7:
+			CAM_INFO(CAM_ISP, "LSC");
+			break;
+		case 8:
+			CAM_INFO(CAM_ISP, "DEMOSAIC");
+			break;
+		case 9:
+			CAM_INFO(CAM_ISP,
+				"COLOR_CORRECT");
+			break;
+		case 10:
+			CAM_INFO(CAM_ISP, "GTM");
+			break;
+		case 11:
+			CAM_INFO(CAM_ISP, "GLUT");
+			break;
+		case 12:
+			CAM_INFO(CAM_ISP,
+				"COLOR_XFORM");
+			break;
+		case 13:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_PIXEL_RAW_OUT");
+			break;
+		case 14:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_Y_FD_OUT");
+			break;
+		case 15:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_C_FD_OUT");
+			break;
+		case 16:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_FD_OUT");
+			break;
+		case 17:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_FD_OUT");
+			break;
+		case 18:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_Y_DISP_OUT");
+			break;
+		case 19:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_C_DISP_OUT");
+			break;
+		case 20:
+			CAM_INFO(CAM_ISP,
+				"module: CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_DISP_OUT");
+			break;
+		case 21:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_DISP_OUT");
+			break;
+		case 22:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_Y_DISP_DS4_OUT");
+			break;
+		case 23:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_C_DISP_DS4_OUT");
+			break;
+		case 24:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS4_OUT");
+			break;
+		case 25:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS4_OUT");
+			break;
+		case 26:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_Y_DISP_DS16_OUT");
+			break;
+		case 27:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_C_DISP_DS16_OUT");
+			break;
+		case 28:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS16_OUT");
+			break;
+		case 29:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS16_OUT");
+			break;
+		case 30:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_Y_VID_OUT");
+			break;
+		case 31:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_C_VID_OUT");
+			break;
+		case 32:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_VID_OUT");
+			break;
+		case 33:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_VID_OUT");
+			break;
+		case 34:
+			CAM_INFO(CAM_ISP, "DSX_Y_VID_OUT");
+			break;
+		case 35:
+			CAM_INFO(CAM_ISP, "DSX_C_VID_OUT");
+			break;
+		case 36:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DSX_Y_VID_OUT");
+			break;
+		case 37:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DSX_C_VID_OUT");
+			break;
+		case 38:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_Y_VID_DS16_OUT");
+			break;
+		case 39:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_C_VID_DS16_OUT");
+			break;
+		case 40:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_VID_DS16_OUT");
+			break;
+		case 41:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_VID_DS16_OUT");
+			break;
+		case 42:
+			CAM_INFO(CAM_ISP, "BLS");
+			break;
+		case 43:
+			CAM_INFO(CAM_ISP, "STATS_TINTLESS_BG");
+			break;
+		case 44:
+			CAM_INFO(CAM_ISP, "STATS_HDR_BHIST");
+			break;
+		case 45:
+			CAM_INFO(CAM_ISP, "STATS_HDR_BE");
+			break;
+		case 46:
+			CAM_INFO(CAM_ISP, "STATS_AWB_BG");
+			break;
+		case 47:
+			CAM_INFO(CAM_ISP, "STATS_BHIST");
+			break;
+		case 48:
+			CAM_INFO(CAM_ISP, "STATS_BAF");
+			break;
+		case 49:
+			CAM_INFO(CAM_ISP, "STATS_RS");
+			break;
+		case 50:
+			CAM_INFO(CAM_ISP, "STATS_CS");
+			break;
+		case 51:
+			CAM_INFO(CAM_ISP, "STATS_IHIST");
+			break;
+		default:
+			CAM_ERR(CAM_ISP,
+				"Invalid Module ID:%d", module_id);
+			break;
+		}
+	}
+}
+
 static int cam_vfe_camif_ver3_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_node;
+	struct cam_vfe_mux_camif_ver3_data    *camif_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+
+	camif_node = th_payload->handler_priv;
+	camif_priv = camif_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
+
+	rc  = cam_vfe_camif_ver3_get_evt_payload(camif_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0: 0x%x status_1 : 0x%x status_2: 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
 	void *evt_payload_priv)
 {
-	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
-	struct cam_isp_resource_node         *camif_node;
-	struct cam_vfe_mux_camif_ver3_data   *camif_priv;
-	struct cam_vfe_top_irq_evt_payload   *payload;
-	uint32_t                              irq_status0;
-	uint32_t                              irq_status1;
-	uint32_t                              irq_status2;
-	uint32_t                              val;
+	int ret = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node *camif_node;
+	struct cam_vfe_mux_camif_ver3_data *camif_priv;
+	struct cam_vfe_top_irq_evt_payload *payload;
+	struct cam_isp_hw_event_info evt_info;
+	uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t val;
+	int i = 0;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP,
@@ -576,68 +1098,170 @@
 	camif_node = handler_priv;
 	camif_priv = camif_node->res_priv;
 	payload = evt_payload_priv;
-	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
-	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
+	for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++)
+		irq_status[i] = payload->irq_reg_val[i];
 
-	CAM_DBG(CAM_ISP,
-		"evt_id:%d, irq_status0:0x%x, irq_status1:0x%x, irq_status2:0x%x",
-		payload->evt_id, irq_status0, irq_status1, irq_status2);
+	evt_info.hw_idx   = camif_node->hw_intf->hw_idx;
+	evt_info.res_id   = camif_node->res_id;
+	evt_info.res_type = camif_node->res_type;
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status1 & camif_priv->reg_data->sof_irq_mask) {
-			if ((camif_priv->enable_sof_irq_debug) &&
-				(camif_priv->irq_debug_cnt <=
-				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
-				CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_priv->reg_data->sof_irq_mask) {
+		if ((camif_priv->enable_sof_irq_debug) &&
+			(camif_priv->irq_debug_cnt <=
+			CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
 
-				camif_priv->irq_debug_cnt++;
-				if (camif_priv->irq_debug_cnt ==
-					CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
-					camif_priv->enable_sof_irq_debug =
-						false;
-					camif_priv->irq_debug_cnt = 0;
-				}
-			} else {
-				CAM_DBG(CAM_ISP, "Received SOF");
+			camif_priv->irq_debug_cnt++;
+			if (camif_priv->irq_debug_cnt ==
+				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+				camif_priv->enable_sof_irq_debug =
+					false;
+				camif_priv->irq_debug_cnt = 0;
 			}
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status1 & camif_priv->reg_data->epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status1 & camif_priv->reg_data->eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if (irq_status2 & camif_priv->reg_data->error_irq_mask2) {
-			CAM_DBG(CAM_ISP, "Received ERROR");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-			cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
-		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
-		}
+		} else
+			CAM_DBG(CAM_ISP, "Received SOF");
 
-		if (camif_priv->camif_debug &
-			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
-			val = cam_io_r(camif_priv->mem_base +
-				camif_priv->common_reg->diag_sensor_status_0);
-			CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
-				camif_priv->mem_base, val);
-		}
-		break;
-	default:
-		break;
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
-	CAM_DBG(CAM_ISP, "returing status = %d", ret);
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_priv->reg_data->epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_priv->reg_data->eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
+		& camif_priv->reg_data->error_irq_mask0) {
+		CAM_ERR(CAM_ISP, "VFE Overflow");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		val = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->bus_overflow_status);
+
+		if (val) {
+
+			if (val & 0x01)
+				CAM_INFO(CAM_ISP, "VID Y 1:1 bus overflow");
+
+			if (val & 0x02)
+				CAM_INFO(CAM_ISP, "VID C 1:1 bus overflow");
+
+			if (val & 0x04)
+				CAM_INFO(CAM_ISP, "VID YC 4:1 bus overflow");
+
+			if (val & 0x08)
+				CAM_INFO(CAM_ISP, "VID YC 16:1 bus overflow");
+
+			if (val & 0x010)
+				CAM_INFO(CAM_ISP, "DISP Y 1:1 bus overflow");
+
+			if (val & 0x020)
+				CAM_INFO(CAM_ISP, "DISP C 1:1 bus overflow");
+
+			if (val & 0x040)
+				CAM_INFO(CAM_ISP, "DISP YC 4:1 bus overflow");
+
+			if (val & 0x080)
+				CAM_INFO(CAM_ISP, "DISP YC 16:1 bus overflow");
+
+			if (val & 0x0100)
+				CAM_INFO(CAM_ISP, "FD Y bus overflow");
+
+			if (val & 0x0200)
+				CAM_INFO(CAM_ISP, "FD C bus overflow");
+
+			if (val & 0x0400)
+				CAM_INFO(CAM_ISP,
+				"PIXEL RAW DUMP bus overflow");
+
+			if (val & 0x01000)
+				CAM_INFO(CAM_ISP, "STATS HDR BE bus overflow");
+
+			if (val & 0x02000)
+				CAM_INFO(CAM_ISP,
+				"STATS HDR BHIST bus overflow");
+
+			if (val & 0x04000)
+				CAM_INFO(CAM_ISP,
+				"STATS TINTLESS BG bus overflow");
+
+			if (val & 0x08000)
+				CAM_INFO(CAM_ISP, "STATS AWB BG bus overflow");
+
+			if (val & 0x010000)
+				CAM_INFO(CAM_ISP, "STATS BHIST bus overflow");
+
+			if (val & 0x020000)
+				CAM_INFO(CAM_ISP, "STATS RS bus overflow");
+
+			if (val & 0x040000)
+				CAM_INFO(CAM_ISP, "STATS CS bus overflow");
+
+			if (val & 0x080000)
+				CAM_INFO(CAM_ISP, "STATS IHIST bus overflow");
+
+			if (val & 0x0100000)
+				CAM_INFO(CAM_ISP, "STATS BAF bus overflow");
+
+			if (val & 0x0200000)
+				CAM_INFO(CAM_ISP, "PDAF bus overflow");
+		}
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_camif_ver3_print_status(
+			irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0],
+			irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret);
+		cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
+	}
+
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]) {
+		CAM_ERR(CAM_ISP, "VFE Violation");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_VIOLATION;
+		cam_vfe_camif_ver3_print_status(
+			irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2],
+			irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret);
+		cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
+	}
+
+	if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+		val = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->diag_sensor_status_0);
+		CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
+			camif_priv->mem_base, val);
+	}
+
+	cam_vfe_camif_ver3_put_evt_payload(camif_priv, &payload);
+
+	CAM_DBG(CAM_ISP, "returning status = %d", ret);
 	return ret;
 }
 
@@ -645,10 +1269,12 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_hw_info,
-	struct cam_isp_resource_node  *camif_node)
+	struct cam_isp_resource_node  *camif_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_camif_ver3_data *camif_priv = NULL;
 	struct cam_vfe_camif_ver3_hw_info *camif_info = camif_hw_info;
+	int i = 0;
 
 	camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_ver3_data),
 		GFP_KERNEL);
@@ -663,6 +1289,7 @@
 	camif_priv->reg_data    = camif_info->reg_data;
 	camif_priv->hw_intf     = hw_intf;
 	camif_priv->soc_info    = soc_info;
+	camif_priv->vfe_irq_controller = vfe_irq_controller;
 
 	camif_node->init    = cam_vfe_camif_ver3_resource_init;
 	camif_node->deinit  = cam_vfe_camif_ver3_resource_deinit;
@@ -672,6 +1299,13 @@
 	camif_node->top_half_handler = cam_vfe_camif_ver3_handle_irq_top_half;
 	camif_node->bottom_half_handler =
 		cam_vfe_camif_ver3_handle_irq_bottom_half;
+	spin_lock_init(&camif_priv->spin_lock);
+	INIT_LIST_HEAD(&camif_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&camif_priv->evt_payload[i].list);
+		list_add_tail(&camif_priv->evt_payload[i].list,
+			&camif_priv->free_payload_list);
+	}
 
 	return 0;
 }
@@ -680,6 +1314,7 @@
 	struct cam_isp_resource_node  *camif_node)
 {
 	struct cam_vfe_mux_camif_ver3_data *camif_priv;
+	int i = 0;
 
 	if (!camif_node) {
 		CAM_ERR(CAM_ISP, "Error, camif_node is NULL %pK", camif_node);
@@ -688,6 +1323,12 @@
 
 	camif_priv = camif_node->res_priv;
 
+	INIT_LIST_HEAD(&camif_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++)
+		INIT_LIST_HEAD(&camif_priv->evt_payload[i].list);
+
+	camif_priv = camif_node->res_priv;
+
 	camif_node->start = NULL;
 	camif_node->stop  = NULL;
 	camif_node->process_cmd = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
index 70daa13..f28a0a4 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
@@ -13,6 +13,7 @@
  * Debug values for camif module
  */
 #define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS      BIT(0)
+#define CAM_VFE_CAMIF_EVT_MAX                      256
 
 struct cam_vfe_camif_ver3_pp_clc_reg {
 	uint32_t     hw_version;
@@ -34,10 +35,11 @@
 
 struct cam_vfe_camif_ver3_reg_data {
 	uint32_t     pp_extern_reg_update_shift;
-	uint32_t     lcr_extern_reg_update_shift;
 	uint32_t     dual_pd_extern_reg_update_shift;
 	uint32_t     extern_reg_update_mask;
 	uint32_t     dual_ife_pix_en_shift;
+	uint32_t     operating_mode_shift;
+	uint32_t     input_mux_sel_shift;
 
 	uint32_t     pixel_pattern_shift;
 	uint32_t     pixel_pattern_mask;
@@ -52,10 +54,10 @@
 	uint32_t     sof_irq_mask;
 	uint32_t     epoch0_irq_mask;
 	uint32_t     epoch1_irq_mask;
-	uint32_t     reg_update_irq_mask;
 	uint32_t     eof_irq_mask;
 	uint32_t     error_irq_mask0;
 	uint32_t     error_irq_mask2;
+	uint32_t     subscribe_irq_mask1;
 
 	uint32_t     enable_diagnostic_hw;
 	uint32_t     pp_camif_cfg_en_shift;
@@ -76,7 +78,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_hw_info,
-	struct cam_isp_resource_node  *camif_node);
+	struct cam_isp_resource_node  *camif_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_camif_ver3_deinit(
 	struct cam_isp_resource_node  *camif_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c
index 3a4036b..941523a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c
@@ -488,8 +488,9 @@
 {
 	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
 	struct cam_isp_resource_node         *fe_node;
-	struct cam_vfe_mux_fe_data        *fe_priv;
+	struct cam_vfe_mux_fe_data           *fe_priv;
 	struct cam_vfe_top_irq_evt_payload   *payload;
+	struct cam_isp_hw_event_info          evt_info;
 	uint32_t                              irq_status0;
 	uint32_t                              irq_status1;
 
@@ -504,59 +505,53 @@
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
 
-	CAM_DBG(CAM_ISP, "event ID:%d, irq_status_0 = 0x%x",
-			payload->evt_id, irq_status0);
+	evt_info.hw_idx = fe_node->hw_intf->hw_idx;
+	evt_info.res_id = fe_node->res_id;
+	evt_info.res_type = fe_node->res_type;
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status0 & fe_priv->reg_data->sof_irq_mask) {
-			if ((fe_priv->enable_sof_irq_debug) &&
-				(fe_priv->irq_debug_cnt <=
-				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
-				CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+	CAM_DBG(CAM_ISP, "event ID, irq_status_0 = 0x%x", irq_status0);
 
-				fe_priv->irq_debug_cnt++;
-				if (fe_priv->irq_debug_cnt ==
-					CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
-					fe_priv->enable_sof_irq_debug =
-						false;
-					fe_priv->irq_debug_cnt = 0;
-				}
-			} else {
-				CAM_DBG(CAM_ISP, "Received SOF");
+	if (irq_status0 & fe_priv->reg_data->sof_irq_mask) {
+		if ((fe_priv->enable_sof_irq_debug) &&
+			(fe_priv->irq_debug_cnt <=
+			CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+
+			fe_priv->irq_debug_cnt++;
+			if (fe_priv->irq_debug_cnt ==
+				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+				fe_priv->enable_sof_irq_debug =
+					false;
+				fe_priv->irq_debug_cnt = 0;
 			}
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status0 & fe_priv->reg_data->epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_REG_UPDATE:
-		if (irq_status0 & fe_priv->reg_data->reg_update_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status0 & fe_priv->reg_data->eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF\n");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if (irq_status1 & fe_priv->reg_data->error_irq_mask1) {
-			CAM_DBG(CAM_ISP, "Received ERROR\n");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-			cam_vfe_fe_reg_dump(fe_node);
 		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
+			CAM_DBG(CAM_ISP, "Received SOF");
 		}
-		break;
-	default:
-		break;
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & fe_priv->reg_data->epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & fe_priv->reg_data->reg_update_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & fe_priv->reg_data->eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF\n");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status1 & fe_priv->reg_data->error_irq_mask1) {
+		CAM_DBG(CAM_ISP, "Received ERROR\n");
+		ret = CAM_ISP_HW_ERROR_OVERFLOW;
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_fe_reg_dump(fe_node);
+	} else {
+		ret = CAM_ISP_HW_ERROR_NONE;
 	}
 
 	CAM_DBG(CAM_ISP, "returing status = %d", ret);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 49079a8..0b230ce 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -8,20 +8,138 @@
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_isp_hw.h"
 #include "cam_vfe_hw_intf.h"
+#include "cam_vfe_top_ver2.h"
 #include "cam_io_util.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 
 struct cam_vfe_mux_rdi_data {
 	void __iomem                                *mem_base;
 	struct cam_hw_intf                          *hw_intf;
 	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
 	struct cam_vfe_rdi_ver2_reg                 *rdi_reg;
+	struct cam_vfe_rdi_common_reg_data          *rdi_common_reg_data;
 	struct cam_vfe_rdi_reg_data                 *reg_data;
 
+	cam_hw_mgr_event_cb_func              event_cb;
+	void                                 *priv;
+	int                                   irq_err_handle;
+	int                                   irq_handle;
+	void                                 *vfe_irq_controller;
+	struct cam_vfe_top_irq_evt_payload    evt_payload[CAM_VFE_RDI_EVT_MAX];
+	struct list_head                      free_payload_list;
+	spinlock_t                            spin_lock;
+
 	enum cam_isp_hw_sync_mode          sync_mode;
 };
 
+static int cam_vfe_rdi_get_evt_payload(
+	struct cam_vfe_mux_rdi_data              *rdi_priv,
+	struct cam_vfe_top_irq_evt_payload     **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&rdi_priv->spin_lock);
+	if (list_empty(&rdi_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&rdi_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	rc = 0;
+done:
+	spin_unlock(&rdi_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_rdi_put_evt_payload(
+	struct cam_vfe_mux_rdi_data              *rdi_priv,
+	struct cam_vfe_top_irq_evt_payload      **evt_payload)
+{
+	unsigned long flags;
+
+	if (!rdi_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&rdi_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list, &rdi_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&rdi_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_rdi_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *rdi_node;
+	struct cam_vfe_mux_rdi_data           *rdi_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+	rdi_node = th_payload->handler_priv;
+	rdi_priv = rdi_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[1]) {
+		CAM_ERR(CAM_ISP,
+			"RDI Error: vfe:%d: STATUS_1=0x%x",
+			rdi_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[1]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from vfe=%d",
+			rdi_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(rdi_priv->vfe_irq_controller,
+			rdi_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			rdi_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_rdi_get_evt_payload(rdi_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "STATUS_1=0x%x",
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(rdi_priv->mem_base +
+			rdi_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[i]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
 static int cam_vfe_rdi_get_reg_update(
 	struct cam_isp_resource_node  *rdi_res,
 	void *cmd_args, uint32_t arg_size)
@@ -80,6 +198,8 @@
 	rdi_data     = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv;
 	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
 
+	rdi_data->event_cb    = acquire_data->event_cb;
+	rdi_data->priv        = acquire_data->priv;
 	rdi_data->sync_mode   = acquire_data->vfe_in.sync_mode;
 
 	return 0;
@@ -90,6 +210,8 @@
 {
 	struct cam_vfe_mux_rdi_data   *rsrc_data;
 	int                            rc = 0;
+	uint32_t                       err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t                       irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
 
 	if (!rdi_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -103,15 +225,61 @@
 	}
 
 	rsrc_data = (struct cam_vfe_mux_rdi_data  *)rdi_res->res_priv;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->rdi_common_reg_data->error_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->rdi_common_reg_data->error_irq_mask1;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->rdi_common_reg_data->subscribe_irq_mask0;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->rdi_common_reg_data->subscribe_irq_mask1;
+
 	rdi_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	/* Reg Update */
 	cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
 		rsrc_data->mem_base + rsrc_data->rdi_reg->reg_update_cmd);
 
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			rdi_res,
+			cam_vfe_rdi_err_irq_top_half,
+			rdi_res->bottom_half_handler,
+			rdi_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
+	if (!rdi_res->rdi_only_ctx)
+		goto end;
+
+	if (!rsrc_data->irq_handle) {
+		rsrc_data->irq_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			irq_mask,
+			rdi_res,
+			rdi_res->top_half_handler,
+			rdi_res->bottom_half_handler,
+			rdi_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_handle < 1) {
+			CAM_ERR(CAM_ISP, "IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_handle = 0;
+		}
+	}
+
 	CAM_DBG(CAM_ISP, "Start RDI %d",
 		rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0);
-
+end:
 	return rc;
 }
 
@@ -119,7 +287,7 @@
 static int cam_vfe_rdi_resource_stop(
 	struct cam_isp_resource_node        *rdi_res)
 {
-	struct cam_vfe_mux_rdi_data           *rdi_priv;
+	struct cam_vfe_mux_rdi_data         *rdi_priv;
 	int rc = 0;
 
 	if (!rdi_res) {
@@ -136,6 +304,17 @@
 	if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		rdi_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
+	if (rdi_priv->irq_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			rdi_priv->vfe_irq_controller, rdi_priv->irq_handle);
+		rdi_priv->irq_handle = 0;
+	}
+
+	if (rdi_priv->irq_err_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			rdi_priv->vfe_irq_controller, rdi_priv->irq_err_handle);
+		rdi_priv->irq_err_handle = 0;
+	}
 
 	return rc;
 }
@@ -167,7 +346,36 @@
 static int cam_vfe_rdi_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *rdi_node;
+	struct cam_vfe_mux_rdi_data           *rdi_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+
+	rdi_node = th_payload->handler_priv;
+	rdi_priv = rdi_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_rdi_get_evt_payload(rdi_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_rdi_handle_irq_bottom_half(void *handler_priv,
@@ -177,6 +385,7 @@
 	struct cam_isp_resource_node        *rdi_node;
 	struct cam_vfe_mux_rdi_data         *rdi_priv;
 	struct cam_vfe_top_irq_evt_payload  *payload;
+	struct cam_isp_hw_event_info         evt_info;
 	uint32_t                             irq_status0;
 
 	if (!handler_priv || !evt_payload_priv) {
@@ -187,28 +396,34 @@
 	rdi_node = handler_priv;
 	rdi_priv = rdi_node->res_priv;
 	payload = evt_payload_priv;
+
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 
-	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+	evt_info.hw_idx   = rdi_node->hw_intf->hw_idx;
+	evt_info.res_id   = rdi_node->res_id;
+	evt_info.res_type = rdi_node->res_type;
+
 	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received SOF");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_REG_UPDATE:
-		if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received REG UPDATE");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	default:
-		break;
+	if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received SOF");
+
+		if (rdi_priv->event_cb)
+			rdi_priv->event_cb(rdi_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	} else if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received REG UPDATE");
+
+		if (rdi_priv->event_cb)
+			rdi_priv->event_cb(rdi_priv->priv,
+				CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
+	cam_vfe_rdi_put_evt_payload(rdi_priv, &payload);
 	CAM_DBG(CAM_ISP, "returing status = %d", ret);
 	return ret;
 }
@@ -217,10 +432,12 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *rdi_hw_info,
-	struct cam_isp_resource_node  *rdi_node)
+	struct cam_isp_resource_node  *rdi_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_rdi_data     *rdi_priv = NULL;
 	struct cam_vfe_rdi_ver2_hw_info *rdi_info = rdi_hw_info;
+	int                              i = 0;
 
 	rdi_priv = kzalloc(sizeof(struct cam_vfe_mux_rdi_data),
 			GFP_KERNEL);
@@ -235,6 +452,8 @@
 	rdi_priv->hw_intf    = hw_intf;
 	rdi_priv->common_reg = rdi_info->common_reg;
 	rdi_priv->rdi_reg    = rdi_info->rdi_reg;
+	rdi_priv->vfe_irq_controller  = vfe_irq_controller;
+	rdi_priv->rdi_common_reg_data = rdi_info->common_reg_data;
 
 	switch (rdi_node->res_id) {
 	case CAM_ISP_HW_VFE_IN_RDI0:
@@ -265,6 +484,14 @@
 	rdi_node->top_half_handler = cam_vfe_rdi_handle_irq_top_half;
 	rdi_node->bottom_half_handler = cam_vfe_rdi_handle_irq_bottom_half;
 
+	spin_lock_init(&rdi_priv->spin_lock);
+	INIT_LIST_HEAD(&rdi_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_RDI_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&rdi_priv->evt_payload[i].list);
+		list_add_tail(&rdi_priv->evt_payload[i].list,
+			&rdi_priv->free_payload_list);
+	}
+
 	return 0;
 err_init:
 	kfree(rdi_priv);
@@ -275,6 +502,11 @@
 	struct cam_isp_resource_node  *rdi_node)
 {
 	struct cam_vfe_mux_rdi_data *rdi_priv = rdi_node->res_priv;
+	int                          i = 0;
+
+	INIT_LIST_HEAD(&rdi_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_RDI_EVT_MAX; i++)
+		INIT_LIST_HEAD(&rdi_priv->evt_payload[i].list);
 
 	rdi_node->start = NULL;
 	rdi_node->stop  = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
index 797ed55..c570e84 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_RDI_H_
@@ -11,19 +11,30 @@
 
 #define CAM_VFE_RDI_VER2_MAX  4
 
+#define CAM_VFE_RDI_EVT_MAX   256
+
 struct cam_vfe_rdi_ver2_reg {
 	uint32_t     reg_update_cmd;
 };
 
+struct cam_vfe_rdi_common_reg_data {
+	uint32_t     subscribe_irq_mask0;
+	uint32_t     subscribe_irq_mask1;
+	uint32_t     error_irq_mask0;
+	uint32_t     error_irq_mask1;
+	uint32_t     error_irq_mask2;
+	uint32_t     rdi_frame_drop_mask;
+};
+
 struct cam_vfe_rdi_reg_data {
 	uint32_t     reg_update_cmd_data;
 	uint32_t     sof_irq_mask;
 	uint32_t     reg_update_irq_mask;
 };
-
 struct cam_vfe_rdi_ver2_hw_info {
 	struct cam_vfe_top_ver2_reg_offset_common  *common_reg;
 	struct cam_vfe_rdi_ver2_reg                *rdi_reg;
+	struct cam_vfe_rdi_common_reg_data         *common_reg_data;
 	struct cam_vfe_rdi_reg_data  *reg_data[CAM_VFE_RDI_VER2_MAX];
 };
 
@@ -35,7 +46,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *rdi_hw_info,
-	struct cam_isp_resource_node  *rdi_node);
+	struct cam_isp_resource_node  *rdi_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_rdi_ver2_deinit(
 	struct cam_isp_resource_node  *rdi_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
index 287a10e..077f890 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
@@ -12,18 +12,19 @@
 	struct cam_hw_soc_info        *soc_info,
 	struct cam_hw_intf            *hw_intf,
 	void                          *top_hw_info,
-	struct cam_vfe_top            **vfe_top)
+	void                          *vfe_irq_controller,
+	struct cam_vfe_top           **vfe_top)
 {
 	int rc = -EINVAL;
 
 	switch (top_version) {
 	case CAM_VFE_TOP_VER_2_0:
 		rc = cam_vfe_top_ver2_init(soc_info, hw_intf, top_hw_info,
-			vfe_top);
+			vfe_irq_controller, vfe_top);
 		break;
 	case CAM_VFE_TOP_VER_3_0:
 		rc = cam_vfe_top_ver3_init(soc_info, hw_intf, top_hw_info,
-			vfe_top);
+			vfe_irq_controller, vfe_top);
 		break;
 	default:
 		CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 535cf71..3787fa1 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -703,6 +703,7 @@
 	struct cam_hw_soc_info                 *soc_info,
 	struct cam_hw_intf                     *hw_intf,
 	void                                   *top_hw_info,
+	void                                   *vfe_irq_controller,
 	struct cam_vfe_top                    **vfe_top_ptr)
 {
 	int i, j, rc = 0;
@@ -760,7 +761,7 @@
 
 			rc = cam_vfe_camif_ver2_init(hw_intf, soc_info,
 				&ver2_hw_info->camif_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver2_hw_info->mux_type[i] ==
@@ -777,7 +778,7 @@
 
 			rc = cam_vfe_camif_lite_ver2_init(hw_intf, soc_info,
 				&ver2_hw_info->camif_lite_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 
 			if (rc)
 				goto deinit_resources;
@@ -796,7 +797,7 @@
 
 			rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
 				&ver2_hw_info->rdi_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver2_hw_info->mux_type[i] ==
@@ -845,11 +846,9 @@
 				&top_priv->mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "Camif lite deinit failed");
 		} else if (ver2_hw_info->mux_type[i] ==
-			CAM_ISP_HW_VFE_IN_RDI0) {
-			if (cam_vfe_rdi_ver2_init(hw_intf, soc_info,
-				&ver2_hw_info->rdi_hw_info,
-				&top_priv->mux_rsrc[i]))
-				CAM_ERR(CAM_ISP, "RDI deinit failed");
+			CAM_VFE_IN_RD_VER_1_0) {
+			if (cam_vfe_fe_ver1_deinit(&top_priv->mux_rsrc[i]))
+				CAM_ERR(CAM_ISP, "VFE FE deinit failed");
 		} else {
 			if (cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "RDI Deinit failed");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
index aeaa73d..82e30b4 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_TOP_VER2_H_
@@ -56,7 +56,8 @@
 int cam_vfe_top_ver2_init(struct cam_hw_soc_info     *soc_info,
 	struct cam_hw_intf                           *hw_intf,
 	void                                         *top_hw_info,
-	struct cam_vfe_top                          **vfe_top);
+	void                                         *vfe_irq_controller,
+	struct cam_vfe_top                          **vfe_top_ptr);
 
 int cam_vfe_top_ver2_deinit(struct cam_vfe_top      **vfe_top);
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
index 6ff7848..955cbf0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
@@ -13,8 +13,10 @@
 #include "cam_cpas_api.h"
 #include "cam_vfe_soc.h"
 
-#define CAM_VFE_HW_RESET_HW_AND_REG_VAL  0x00000003
-#define CAM_VFE_HW_RESET_HW_VAL          0x007F0000
+#define CAM_VFE_HW_RESET_HW_AND_REG_VAL       0x00000003
+#define CAM_VFE_HW_RESET_HW_VAL               0x007F0000
+#define CAM_VFE_LITE_HW_RESET_AND_REG_VAL     0x00000002
+#define CAM_VFE_LITE_HW_RESET_HW_VAL          0x0000003D
 #define CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
 
 struct cam_vfe_top_ver3_common_data {
@@ -348,6 +350,19 @@
 	return rc;
 }
 
+static int cam_vfe_core_config_control(
+	struct cam_vfe_top_ver3_priv *top_priv,
+	 void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_core_config_args  *core_config = cmd_args;
+
+	if (core_config->node_res->process_cmd)
+		return core_config->node_res->process_cmd(core_config->node_res,
+			CAM_ISP_HW_CMD_CORE_CONFIG, cmd_args, arg_size);
+
+	return -EINVAL;
+}
+
 static int cam_vfe_top_ver3_bw_control(
 	struct cam_vfe_top_ver3_priv *top_priv,
 	 void *cmd_args, uint32_t arg_size)
@@ -436,20 +451,33 @@
 		return -EINVAL;
 	}
 
+	soc_info = top_priv->common_data.soc_info;
+	reg_common = top_priv->common_data.common_reg;
+
 	switch (*reset_reg_args) {
 	case CAM_VFE_HW_RESET_HW_AND_REG:
-		reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+		if (strnstr(soc_info->compatible, "lite",
+			strlen(soc_info->compatible)) == NULL)
+			reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+		else
+			reset_reg_val = CAM_VFE_LITE_HW_RESET_AND_REG_VAL;
 		break;
 	default:
-		reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+		if (strnstr(soc_info->compatible, "lite",
+			strlen(soc_info->compatible)) == NULL)
+			reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+		else
+			reset_reg_val = CAM_VFE_LITE_HW_RESET_HW_VAL;
 		break;
 	}
 	/* override due to hw limitation */
-	reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+	if (strnstr(soc_info->compatible, "lite",
+		strlen(soc_info->compatible)) == NULL)
+		reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+	else
+		reset_reg_val = CAM_VFE_LITE_HW_RESET_AND_REG_VAL;
 
-	CAM_DBG(CAM_ISP, "reset reg value: %x", reset_reg_val);
-	soc_info = top_priv->common_data.soc_info;
-	reg_common = top_priv->common_data.common_reg;
+	CAM_DBG(CAM_ISP, "reset reg value: 0x%x", reset_reg_val);
 
 	/* Mask All the IRQs except RESET */
 	if (strnstr(soc_info->compatible, "lite",
@@ -698,6 +726,9 @@
 	case CAM_ISP_HW_CMD_BW_CONTROL:
 		rc = cam_vfe_top_ver3_bw_control(top_priv, cmd_args, arg_size);
 		break;
+	case CAM_ISP_HW_CMD_CORE_CONFIG:
+		rc = cam_vfe_core_config_control(top_priv, cmd_args, arg_size);
+		break;
 	default:
 		rc = -EINVAL;
 		CAM_ERR(CAM_ISP, "Error, Invalid cmd:%d", cmd_type);
@@ -711,6 +742,7 @@
 	struct cam_hw_soc_info                 *soc_info,
 	struct cam_hw_intf                     *hw_intf,
 	void                                   *top_hw_info,
+	void                                   *vfe_irq_controller,
 	struct cam_vfe_top                    **vfe_top_ptr)
 {
 	int i, j, rc = 0;
@@ -768,7 +800,7 @@
 
 			rc = cam_vfe_camif_ver3_init(hw_intf, soc_info,
 				&ver3_hw_info->camif_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
@@ -779,7 +811,7 @@
 
 			rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info,
 				&ver3_hw_info->pdlib_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
@@ -801,7 +833,7 @@
 
 			rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info,
 				ver3_hw_info->rdi_hw_info[j++],
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
@@ -812,7 +844,7 @@
 
 			rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info,
 				&ver3_hw_info->lcr_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
index 03630f0..1ae8e5d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
@@ -12,6 +12,17 @@
 
 #define CAM_VFE_TOP_VER3_MUX_MAX     6
 
+#define CAM_SHIFT_TOP_CORE_CFG_MUXSEL_PDAF       31
+#define CAM_SHIFT_TOP_CORE_CFG_VID_DS16_R2PD     30
+#define CAM_SHIFT_TOP_CORE_CFG_VID_DS4_R2PD      29
+#define CAM_SHIFT_TOP_CORE_CFG_DISP_DS16_R2PD    28
+#define CAM_SHIFT_TOP_CORE_CFG_DISP_DS4_R2PD     27
+#define CAM_SHIFT_TOP_CORE_CFG_DSP_STREAMING     25
+#define CAM_SHIFT_TOP_CORE_CFG_STATS_IHIST       10
+#define CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BE       9
+#define CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BHIST    8
+#define CAM_SHIFT_TOP_CORE_CFG_INPUTMUX_PP        5
+
 struct cam_vfe_top_ver3_reg_offset_common {
 	uint32_t hw_version;
 	uint32_t titan_version;
@@ -36,6 +47,20 @@
 	uint32_t diag_config;
 	uint32_t diag_sensor_status_0;
 	uint32_t diag_sensor_status_1;
+	uint32_t bus_overflow_status;
+};
+
+struct cam_vfe_camif_common_cfg {
+	uint32_t     vid_ds16_r2pd;
+	uint32_t     vid_ds4_r2pd;
+	uint32_t     disp_ds16_r2pd;
+	uint32_t     disp_ds4_r2pd;
+	uint32_t     dsp_streaming_tap_point;
+	uint32_t     ihist_src_sel;
+	uint32_t     hdr_be_src_sel;
+	uint32_t     hdr_bhist_src_sel;
+	uint32_t     input_mux_sel_pdaf;
+	uint32_t     input_mux_sel_pp;
 };
 
 struct cam_vfe_top_ver3_hw_info {
@@ -52,6 +77,7 @@
 int cam_vfe_top_ver3_init(struct cam_hw_soc_info     *soc_info,
 	struct cam_hw_intf                           *hw_intf,
 	void                                         *top_hw_info,
+	void                                         *vfe_irq_controller,
 	struct cam_vfe_top                          **vfe_top);
 
 int cam_vfe_top_ver3_deinit(struct cam_vfe_top      **vfe_top);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
index 622ca64..a5236e5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -34,7 +34,8 @@
 	struct cam_hw_soc_info        *soc_info,
 	struct cam_hw_intf            *hw_intf,
 	void                          *top_hw_info,
-	struct cam_vfe_top            **vfe_top);
+	void                          *vfe_irq_controller,
+	struct cam_vfe_top           **vfe_top);
 
 int cam_vfe_top_deinit(uint32_t        top_version,
 	struct cam_vfe_top           **vfe_top);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 981d7bba..864b37e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -41,6 +41,7 @@
 	link->last_flush_id = 0;
 	link->initial_sync_req = -1;
 	link->in_msync_mode = false;
+	link->retry_cnt = 0;
 }
 
 void cam_req_mgr_handle_core_shutdown(void)
@@ -167,6 +168,46 @@
 }
 
 /**
+ * __cam_req_mgr_notify_error_on_link()
+ *
+ * @brief : Notify userspace on exceeding max retry
+ *          attempts to apply same req
+ * @link  : link on which the req could not be applied
+ *
+ */
+static int __cam_req_mgr_notify_error_on_link(
+	struct cam_req_mgr_core_link    *link)
+{
+	struct cam_req_mgr_core_session *session = NULL;
+	struct cam_req_mgr_message       msg;
+	int rc = 0;
+
+	session = (struct cam_req_mgr_core_session *)link->parent;
+
+	CAM_ERR(CAM_CRM,
+		"Notifying userspace to trigger recovery on link 0x%x for session %d",
+		link->link_hdl, session->session_hdl);
+
+	memset(&msg, 0, sizeof(msg));
+
+	msg.session_hdl = session->session_hdl;
+	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
+	msg.u.err_msg.request_id = 0;
+	msg.u.err_msg.link_hdl   = link->link_hdl;
+
+	rc = cam_req_mgr_notify_message(&msg,
+		V4L_EVENT_CAM_REQ_MGR_ERROR,
+		V4L_EVENT_CAM_REQ_MGR_EVENT);
+
+	if (rc)
+		CAM_ERR(CAM_CRM,
+			"Error in notifying recovery for session %d link 0x%x rc %d",
+			session->session_hdl, link->link_hdl, rc);
+
+	return rc;
+}
+
+/**
  * __cam_req_mgr_traverse()
  *
  * @brief    : Traverse through pd tables, it will internally cover all linked
@@ -1092,7 +1133,20 @@
 	if (rc < 0) {
 		/* Apply req failed retry at next sof */
 		slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+
+		link->retry_cnt++;
+		if (link->retry_cnt == MAXIMUM_RETRY_ATTEMPTS) {
+			CAM_DBG(CAM_CRM,
+				"Max retry attempts reached on link[0x%x] for req [%lld]",
+				link->link_hdl,
+				in_q->slot[in_q->rd_idx].req_id);
+			__cam_req_mgr_notify_error_on_link(link);
+			link->retry_cnt = 0;
+		}
 	} else {
+		if (link->retry_cnt)
+			link->retry_cnt = 0;
+
 		link->trigger_mask |= trigger;
 
 		CAM_DBG(CAM_CRM, "Applied req[%lld] on link[%x] success",
@@ -1342,7 +1396,7 @@
 	memset(&msg, 0, sizeof(msg));
 
 	msg.session_hdl = session->session_hdl;
-	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_DEVICE;
+	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
 	msg.u.err_msg.request_id = 0;
 	msg.u.err_msg.link_hdl   = link->link_hdl;
 
@@ -1586,6 +1640,7 @@
 	link->req.in_q = NULL;
 	i = link - g_links;
 	CAM_DBG(CAM_CRM, "free link index %d", i);
+	cam_req_mgr_core_link_reset(link);
 	atomic_set(&g_links[i].is_used, 0);
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 8f07b3b..9a6acbc 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -29,6 +29,8 @@
 
 #define MAXIMUM_LINKS_PER_SESSION  4
 
+#define MAXIMUM_RETRY_ATTEMPTS 3
+
 /**
  * enum crm_workq_task_type
  * @codes: to identify which type of task is present
@@ -310,6 +312,8 @@
  * @in_msync_mode        : Flag to determine if a link is in master-slave mode
  * @initial_sync_req     : The initial req which is required to sync with the
  *                         other link
+ * @retry_cnt            : Counter that tracks number of attempts to apply
+ *                         the same req
  */
 struct cam_req_mgr_core_link {
 	int32_t                              link_hdl;
@@ -336,6 +340,7 @@
 	bool                                 initial_skip;
 	bool                                 in_msync_mode;
 	int64_t                              initial_sync_req;
+	uint32_t                             retry_cnt;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
index 66d5fad..6925b3a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
@@ -9,3 +9,4 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_ois/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_1_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_1_hwreg.h
index bbf2798..75e993b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_1_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_1_hwreg.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_CSIPHY_1_2_1_HWREG_H_
@@ -15,7 +15,7 @@
 	.csiphy_common_array_size = 4,
 	.csiphy_reset_array_size = 4,
 	.csiphy_2ph_config_array_size = 20,
-	.csiphy_3ph_config_array_size = 32,
+	.csiphy_3ph_config_array_size = 31,
 	.csiphy_2ph_clock_lane = 0x1,
 	.csiphy_2ph_combo_ck_ln = 0x10,
 };
@@ -308,7 +308,6 @@
 		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0164, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0984, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x09B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
@@ -317,10 +316,10 @@
 		{0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A90, 0x17, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A98, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A8C, 0xBF, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x036C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
@@ -342,8 +341,7 @@
 		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0364, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0AB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -351,10 +349,10 @@
 		{0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B90, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B94, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B98, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B8C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x056C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
@@ -376,8 +374,7 @@
 		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0564, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0BB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 };
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index f71f53d..3cef9c6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -1375,7 +1375,7 @@
 	vfree(e_ctrl->cal_data.map);
 	e_ctrl->cal_data.num_data = 0;
 	e_ctrl->cal_data.num_map = 0;
-	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
 release_buf:
 	if (cam_mem_put_cpu_buf(dev_config.packet_handle))
 		CAM_WARN(CAM_EEPROM, "Put cpu buffer failed : 0x%llx",
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 10673d3..3f5790b 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -673,11 +673,6 @@
 		if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
 			mutex_lock(&iommu_cb_set.cb_info[i].lock);
 			if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
-				CAM_ERR(CAM_SMMU,
-					"Error: %s already got handle 0x%x",
-					name,
-					iommu_cb_set.cb_info[i].handle);
-
 				if (iommu_cb_set.cb_info[i].is_secure)
 					iommu_cb_set.cb_info[i].secure_count++;
 
@@ -686,6 +681,11 @@
 					*hdl = iommu_cb_set.cb_info[i].handle;
 					return 0;
 				}
+
+				CAM_ERR(CAM_SMMU,
+					"Error: %s already got handle 0x%x",
+					name, iommu_cb_set.cb_info[i].handle);
+
 				return -EINVAL;
 			}
 
@@ -1366,6 +1366,42 @@
 }
 EXPORT_SYMBOL(cam_smmu_dealloc_qdss);
 
+int cam_smmu_get_io_region_info(int32_t smmu_hdl,
+	dma_addr_t *iova, size_t *len)
+{
+	int32_t idx;
+
+	if (!iova || !len || (smmu_hdl == HANDLE_INIT)) {
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].io_support) {
+		CAM_ERR(CAM_SMMU,
+			"I/O memory not supported for this SMMU handle");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	*iova = iommu_cb_set.cb_info[idx].io_info.iova_start;
+	*len = iommu_cb_set.cb_info[idx].io_info.iova_len;
+
+	CAM_DBG(CAM_SMMU,
+		"I/O area for hdl = %x start addr = %pK len = %zu",
+		smmu_hdl, *iova, *len);
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+	return 0;
+}
+
 int cam_smmu_get_region_info(int32_t smmu_hdl,
 	enum cam_smmu_region_id region_id,
 	struct cam_smmu_region_info *region_info)
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 2cc91c7..eab27bb 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_SMMU_API_H_
@@ -381,4 +381,16 @@
  */
 int cam_smmu_dealloc_qdss(int32_t smmu_hdl);
 
+/**
+ * @brief Get start addr & len of I/O region for a given cb
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ * @param iova: IOVA address of allocated I/O region
+ * @param len: Length of allocated I/O memory
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_io_region_info(int32_t smmu_hdl,
+	dma_addr_t *iova, size_t *len);
+
 #endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index f039cda..eb4e1f9 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -1718,3 +1718,25 @@
 
 	return 0;
 }
+
+uint32_t cam_soc_util_get_vote_level(struct cam_hw_soc_info *soc_info,
+	uint64_t clock_rate)
+{
+	int i = 0;
+
+	if (!clock_rate)
+		return CAM_SVS_VOTE;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++) {
+		if (soc_info->clk_level_valid[i] &&
+			soc_info->clk_rate[i][soc_info->src_clk_idx] >=
+			clock_rate) {
+			CAM_DBG(CAM_UTIL,
+				"Clock rate %lld, selected clock level %d",
+				clock_rate, i);
+			return i;
+		}
+	}
+
+	return CAM_TURBO_VOTE;
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 76ce930..dc45059 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -36,8 +36,10 @@
 #define CAM_SOC_MAX_CLK             32
 
 /* DDR device types */
-#define DDR_TYPE_LPDDR4       6
-#define DDR_TYPE_LPDDR5       9
+#define DDR_TYPE_LPDDR4        6
+#define DDR_TYPE_LPDDR4X       7
+#define DDR_TYPE_LPDDR5        8
+#define DDR_TYPE_LPDDR5X       9
 
 /**
  * enum cam_vote_level - Enum for voting level
@@ -631,4 +633,7 @@
 int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
 	enum cam_vote_level clk_level);
 
+uint32_t cam_soc_util_get_vote_level(struct cam_hw_soc_info *soc_info,
+	uint64_t clock_rate);
+
 #endif /* _CAM_SOC_UTIL_H_ */
diff --git a/drivers/media/platform/msm/cvp/Makefile b/drivers/media/platform/msm/cvp/Makefile
index 2525565..0d80860 100644
--- a/drivers/media/platform/msm/cvp/Makefile
+++ b/drivers/media/platform/msm/cvp/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 ccflags-y += -I$(srctree)/drivers/media/platform/msm/cvp/
+ccflags-y += -I$(srctree)/drivers/media/platform/msm/synx/
 
 msm-cvp-objs := msm_v4l2_cvp.o \
 				msm_v4l2_private.o \
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index 39cb5b3..a1469fa 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -26,7 +26,6 @@
 #include <linux/soc/qcom/smem.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <linux/dma-mapping.h>
-#include <linux/fastcvpd.h>
 #include "hfi_packetization.h"
 #include "msm_cvp_debug.h"
 #include "cvp_core_hfi.h"
@@ -38,6 +37,58 @@
 #define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
 #define QDSS_IOVA_START 0x80001000
 
+const struct msm_cvp_hfi_defs cvp_hfi_defs[] = {
+	{
+		.size = HFI_DFS_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DFS_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DFS_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DFS_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DFS_FRAME,
+		.buf_offset = HFI_DFS_FRAME_BUFFERS_OFFSET,
+		.buf_num = HFI_DFS_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_DME_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DME_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DME_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DME_BASIC_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DME_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DME_FRAME,
+		.buf_offset = HFI_DME_FRAME_BUFFERS_OFFSET,
+		.buf_num = HFI_DME_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_PERSIST_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS,
+		.buf_offset = HFI_PERSIST_BUFFERS_OFFSET,
+		.buf_num = HFI_PERSIST_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_DS_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DS,
+		.buf_offset = HFI_DS_BUFFERS_OFFSET,
+		.buf_num = HFI_DS_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+};
+
 static struct hal_device_data hal_ctxt;
 
 #define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
@@ -73,7 +124,7 @@
 	.data_count = 0,
 };
 
-const int cvp_max_packets = 1000;
+const int cvp_max_packets = 32;
 
 static void venus_hfi_pm_handler(struct work_struct *work);
 static DECLARE_DELAYED_WORK(venus_hfi_pm_work, venus_hfi_pm_handler);
@@ -101,16 +152,11 @@
 static int __power_collapse(struct venus_hfi_device *device, bool force);
 static int venus_hfi_noc_error_info(void *dev);
 
-static void interrupt_init_vpu4(struct venus_hfi_device *device);
 static void interrupt_init_vpu5(struct venus_hfi_device *device);
 static void setup_dsp_uc_memmap_vpu5(struct venus_hfi_device *device);
 static void clock_config_on_enable_vpu5(struct venus_hfi_device *device);
 
-struct venus_hfi_vpu_ops cvp_vpu4_ops = {
-	.interrupt_init = interrupt_init_vpu4,
-	.setup_dsp_uc_memmap = NULL,
-	.clock_config_on_enable = NULL,
-};
+static int __set_ubwc_config(struct venus_hfi_device *device);
 
 struct venus_hfi_vpu_ops cvp_vpu5_ops = {
 	.interrupt_init = interrupt_init_vpu5,
@@ -146,6 +192,18 @@
 
 #define ROW_SIZE 32
 
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr)
+{
+	int i, pkt_num = ARRAY_SIZE(cvp_hfi_defs);
+
+	for (i = 0; i < pkt_num; i++)
+		if ((cvp_hfi_defs[i].size*sizeof(unsigned int) == hdr->size) &&
+			(cvp_hfi_defs[i].type == hdr->packet_type))
+			return i;
+
+	return -EINVAL;
+}
+
 static void __dump_packet(u8 *packet, enum cvp_msg_prio log_level)
 {
 	u32 c = 0, packet_size = *(u32 *)packet;
@@ -164,107 +222,6 @@
 	}
 }
 
-static void __sim_modify_cmd_packet(u8 *packet, struct venus_hfi_device *device)
-{
-	struct hfi_cmd_sys_session_init_packet *sys_init;
-	struct hal_session *session = NULL;
-	u8 i;
-	phys_addr_t fw_bias = 0;
-
-	if (!device || !packet) {
-		dprintk(CVP_ERR, "Invalid Param\n");
-		return;
-	} else if (!device->hal_data->firmware_base
-			|| is_iommu_present(device->res)) {
-		return;
-	}
-
-	fw_bias = device->hal_data->firmware_base;
-	sys_init = (struct hfi_cmd_sys_session_init_packet *)packet;
-
-	session = __get_session(device, sys_init->session_id);
-	if (!session) {
-		dprintk(CVP_DBG, "%s :Invalid session id: %x\n",
-				__func__, sys_init->session_id);
-		return;
-	}
-
-	switch (sys_init->packet_type) {
-	case HFI_CMD_SESSION_EMPTY_BUFFER:
-		if (session->is_decoder) {
-			struct hfi_cmd_session_empty_buffer_compressed_packet
-			*pkt = (struct
-			hfi_cmd_session_empty_buffer_compressed_packet
-			*) packet;
-			pkt->packet_buffer -= fw_bias;
-		} else {
-			struct
-			hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
-			*pkt = (struct
-			hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
-			*) packet;
-			pkt->packet_buffer -= fw_bias;
-		}
-		break;
-	case HFI_CMD_SESSION_FILL_BUFFER:
-	{
-		struct hfi_cmd_session_fill_buffer_packet *pkt =
-			(struct hfi_cmd_session_fill_buffer_packet *)packet;
-		pkt->packet_buffer -= fw_bias;
-		break;
-	}
-	case HFI_CMD_SESSION_SET_BUFFERS:
-	{
-		struct hfi_cmd_session_set_buffers_packet *pkt =
-			(struct hfi_cmd_session_set_buffers_packet *)packet;
-		if (pkt->buffer_type == HFI_BUFFER_OUTPUT ||
-			pkt->buffer_type == HFI_BUFFER_OUTPUT2) {
-			struct hfi_buffer_info *buff;
-
-			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
-			buff->buffer_addr -= fw_bias;
-			if (buff->extra_data_addr >= fw_bias)
-				buff->extra_data_addr -= fw_bias;
-		} else {
-			for (i = 0; i < pkt->num_buffers; i++)
-				pkt->rg_buffer_info[i] -= fw_bias;
-		}
-		break;
-	}
-	case HFI_CMD_SESSION_RELEASE_BUFFERS:
-	{
-		struct hfi_cmd_session_release_buffer_packet *pkt =
-			(struct hfi_cmd_session_release_buffer_packet *)packet;
-
-		if (pkt->buffer_type == HFI_BUFFER_OUTPUT ||
-			pkt->buffer_type == HFI_BUFFER_OUTPUT2) {
-			struct hfi_buffer_info *buff;
-
-			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
-			buff->buffer_addr -= fw_bias;
-			buff->extra_data_addr -= fw_bias;
-		} else {
-			for (i = 0; i < pkt->num_buffers; i++)
-				pkt->rg_buffer_info[i] -= fw_bias;
-		}
-		break;
-	}
-	case HFI_CMD_SESSION_REGISTER_BUFFERS:
-	{
-		struct hfi_cmd_session_register_buffers_packet *pkt =
-			(struct hfi_cmd_session_register_buffers_packet *)
-			packet;
-		struct hfi_buffer_mapping_type *buf =
-			(struct hfi_buffer_mapping_type *)pkt->buffer;
-		for (i = 0; i < pkt->num_buffers; i++)
-			buf[i].device_addr -= fw_bias;
-		break;
-	}
-	default:
-		break;
-	}
-}
-
 static int __dsp_send_hfi_queue(struct venus_hfi_device *device)
 {
 	int rc;
@@ -328,7 +285,7 @@
 	}
 
 	dprintk(CVP_DBG, "%s: suspend dsp\n", __func__);
-	rc = fastcvpd_video_suspend(flags);
+	rc = cvp_dsp_suspend(flags);
 	if (rc) {
 		dprintk(CVP_ERR, "%s: dsp suspend failed with error %d\n",
 			__func__, rc);
@@ -353,7 +310,7 @@
 	}
 
 	dprintk(CVP_DBG, "%s: resume dsp\n", __func__);
-	rc = fastcvpd_video_resume(flags);
+	rc = cvp_dsp_resume(flags);
 	if (rc) {
 		dprintk(CVP_ERR,
 			"%s: dsp resume failed with error %d\n",
@@ -379,7 +336,7 @@
 	}
 
 	dprintk(CVP_DBG, "%s: shutdown dsp\n", __func__);
-	rc = fastcvpd_video_shutdown(flags);
+	rc = cvp_dsp_shutdown(flags);
 	if (rc) {
 		dprintk(CVP_ERR,
 			"%s: dsp shutdown failed with error %d\n",
@@ -392,87 +349,16 @@
 	return rc;
 }
 
-static int __session_pause(struct venus_hfi_device *device,
-		struct hal_session *session)
-{
-	int rc = 0;
-
-	/* ignore if session paused already */
-	if (session->flags & SESSION_PAUSE)
-		return 0;
-
-	session->flags |= SESSION_PAUSE;
-	dprintk(CVP_DBG, "%s: cvp session %x paused\n", __func__,
-		hash32_ptr(session));
-
-	return rc;
-}
-
-static int __session_resume(struct venus_hfi_device *device,
-		struct hal_session *session)
-{
-	int rc = 0;
-
-	/* ignore if session already resumed */
-	if (!(session->flags & SESSION_PAUSE))
-		return 0;
-
-	session->flags &= ~SESSION_PAUSE;
-	dprintk(CVP_DBG, "%s: cvp session %x resumed\n", __func__,
-		hash32_ptr(session));
-
-	rc = __resume(device);
-	if (rc) {
-		dprintk(CVP_ERR, "%s: resume failed\n", __func__);
-		goto exit;
-	}
-
-	if (device->dsp_flags & DSP_SUSPEND) {
-		dprintk(CVP_ERR, "%s: dsp not resumed\n", __func__);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-exit:
-	return rc;
-}
-
 static int venus_hfi_session_pause(void *sess)
 {
-	int rc;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	device = session->device;
-
-	mutex_lock(&device->lock);
-	rc = __session_pause(device, session);
-	mutex_unlock(&device->lock);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
 static int venus_hfi_session_resume(void *sess)
 {
-	int rc;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	device = session->device;
-
-	mutex_lock(&device->lock);
-	rc = __session_resume(device, session);
-	mutex_unlock(&device->lock);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
 static int __acquire_regulator(struct regulator_info *rinfo,
@@ -632,71 +518,6 @@
 	return 0;
 }
 
-static void __hal_sim_modify_msg_packet(u8 *packet,
-					struct venus_hfi_device *device)
-{
-	struct hfi_msg_sys_session_init_done_packet *init_done;
-	struct hal_session *session = NULL;
-	phys_addr_t fw_bias = 0;
-
-	if (!device || !packet) {
-		dprintk(CVP_ERR, "Invalid Param\n");
-		return;
-	} else if (!device->hal_data->firmware_base
-			|| is_iommu_present(device->res)) {
-		return;
-	}
-
-	fw_bias = device->hal_data->firmware_base;
-	init_done = (struct hfi_msg_sys_session_init_done_packet *)packet;
-	session = __get_session(device, init_done->session_id);
-
-	if (!session) {
-		dprintk(CVP_DBG, "%s: Invalid session id: %x\n",
-				__func__, init_done->session_id);
-		return;
-	}
-
-	switch (init_done->packet_type) {
-	case HFI_MSG_SESSION_FILL_BUFFER_DONE:
-		if (session->is_decoder) {
-			struct
-			hfi_msg_session_fbd_uncompressed_plane0_packet
-			*pkt_uc = (struct
-			hfi_msg_session_fbd_uncompressed_plane0_packet
-			*) packet;
-			pkt_uc->packet_buffer += fw_bias;
-		} else {
-			struct
-			hfi_msg_session_fill_buffer_done_compressed_packet
-			*pkt = (struct
-			hfi_msg_session_fill_buffer_done_compressed_packet
-			*) packet;
-			pkt->packet_buffer += fw_bias;
-		}
-		break;
-	case HFI_MSG_SESSION_EMPTY_BUFFER_DONE:
-	{
-		struct hfi_msg_session_empty_buffer_done_packet *pkt =
-		(struct hfi_msg_session_empty_buffer_done_packet *)packet;
-		pkt->packet_buffer += fw_bias;
-		break;
-	}
-	case HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE:
-	{
-		struct
-		hfi_msg_session_get_sequence_header_done_packet
-		*pkt =
-		(struct hfi_msg_session_get_sequence_header_done_packet *)
-		packet;
-		pkt->sequence_header += fw_bias;
-		break;
-	}
-	default:
-		break;
-	}
-}
-
 static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
 		u32 *pb_tx_req_is_set)
 {
@@ -1302,7 +1123,7 @@
 			}
 
 			trace_msm_cvp_perf_clock_scale(cl->name, freq);
-			dprintk(CVP_PROF, "Scaling clock %s to %u\n",
+			dprintk(CVP_DBG, "Scaling clock %s to %u\n",
 					cl->name, freq);
 		}
 	}
@@ -1387,7 +1208,6 @@
 		goto err_q_null;
 	}
 
-	__sim_modify_cmd_packet((u8 *)pkt, device);
 	if (__resume(device)) {
 		dprintk(CVP_ERR, "%s: Power on failed\n", __func__);
 		goto err_q_write;
@@ -1456,7 +1276,6 @@
 	}
 
 	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
-		__hal_sim_modify_msg_packet((u8 *)pkt, device);
 		if (tx_req_is_set)
 			__write_register(device, CVP_CPU_CS_H2ASOFTINT, 1);
 		rc = 0;
@@ -2076,9 +1895,10 @@
 
 	__enable_subcaches(device);
 	__set_subcaches(device);
-
 	__dsp_send_hfi_queue(device);
 
+	__set_ubwc_config(device);
+
 	if (dev->res->pm_qos_latency_us) {
 #ifdef CONFIG_SMP
 		dev->qos.type = PM_QOS_REQ_AFFINE_IRQ;
@@ -2552,7 +2372,7 @@
 		goto err_create_pkt;
 	}
 
-	dprintk(CVP_INFO, "set buffers: %#x\n", buffer_info->buffer_type);
+	dprintk(CVP_DBG, "set buffers: %#x\n", buffer_info->buffer_type);
 	if (__iface_cmdq_write(session->device, &pkt))
 		rc = -ENOTEMPTY;
 
@@ -2602,129 +2422,29 @@
 	return rc;
 }
 
-static int venus_hfi_session_register_buffer(void *sess,
-		struct cvp_register_buffer *buffer)
-{
-	int rc = 0;
-	u8 packet[CVP_IFACEQ_VAR_LARGE_PKT_SIZE];
-	struct hfi_cmd_session_register_buffers_packet *pkt;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device || !buffer) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	device = session->device;
-
-	mutex_lock(&device->lock);
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto exit;
-	}
-	pkt = (struct hfi_cmd_session_register_buffers_packet *)packet;
-	rc = call_hfi_pkt_op(device, session_register_buffer, pkt,
-			session, buffer);
-	if (rc) {
-		dprintk(CVP_ERR, "%s: failed to create packet\n", __func__);
-		goto exit;
-	}
-	if (__iface_cmdq_write(session->device, pkt))
-		rc = -ENOTEMPTY;
-exit:
-	mutex_unlock(&device->lock);
-
-	return rc;
-}
-
-static int venus_hfi_session_unregister_buffer(void *sess,
-		struct cvp_unregister_buffer *buffer)
-{
-	int rc = 0;
-	u8 packet[CVP_IFACEQ_VAR_LARGE_PKT_SIZE];
-	struct hfi_cmd_session_unregister_buffers_packet *pkt;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device || !buffer) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	device = session->device;
-
-	mutex_lock(&device->lock);
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto exit;
-	}
-	pkt = (struct hfi_cmd_session_unregister_buffers_packet *)packet;
-	rc = call_hfi_pkt_op(device, session_unregister_buffer, pkt,
-			session, buffer);
-	if (rc) {
-		dprintk(CVP_ERR, "%s: failed to create packet\n", __func__);
-		goto exit;
-	}
-	if (__iface_cmdq_write(session->device, pkt))
-		rc = -ENOTEMPTY;
-exit:
-	mutex_unlock(&device->lock);
-
-	return rc;
-}
-
 static int venus_hfi_session_start(void *session)
 {
 	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
-		return -EINVAL;
+	return -EINVAL;
 }
 
 static int venus_hfi_session_continue(void *session)
 {
-	struct hal_session *sess;
-	struct venus_hfi_device *device;
-	int rc = 0;
-
-	if (!session) {
-		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
-		return -EINVAL;
-	}
-
-	sess = session;
-	device = sess->device;
-
-	mutex_lock(&device->lock);
-	rc = __send_session_cmd(sess, HFI_CMD_SESSION_CONTINUE);
-	mutex_unlock(&device->lock);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
 static int venus_hfi_session_stop(void *session)
 {
-	struct hal_session *sess;
-	struct venus_hfi_device *device;
-	int rc = 0;
-
-	if (!session) {
-		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
-		return -EINVAL;
-	}
-
-	sess = session;
-	device = sess->device;
-
-	mutex_lock(&device->lock);
-	rc = __send_session_cmd(sess, HFI_CMD_SESSION_STOP);
-	mutex_unlock(&device->lock);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
-static int venus_hfi_session_cvp_dfs_config(void *sess,
-		struct msm_cvp_dfsconfig *dfs_config)
+static int venus_hfi_session_send(void *sess,
+		struct cvp_kmd_hfi_packet *in_pkt)
 {
 	int rc = 0;
-	struct hfi_cmd_session_cvp_dfs_config pkt;
+	struct cvp_kmd_hfi_packet pkt;
 	struct hal_session *session = sess;
 	struct venus_hfi_device *device;
 
@@ -2738,70 +2458,26 @@
 
 	if (!__is_session_valid(device, session, __func__)) {
 		rc = -EINVAL;
-		goto err_create_pkt;
+		goto err_send_pkt;
 	}
-	rc = call_hfi_pkt_op(device, session_cvp_dfs_config,
-			&pkt, session, dfs_config);
+	rc = call_hfi_pkt_op(device, session_send,
+			&pkt, session, in_pkt);
 	if (rc) {
 		dprintk(CVP_ERR,
-				"Session get buf req: failed to create pkt\n");
-		goto err_create_pkt;
+				"failed to create pkt\n");
+		goto err_send_pkt;
 	}
 
-	dprintk(CVP_DBG, "%s: calling __iface_cmdq_write\n", __func__);
 	if (__iface_cmdq_write(session->device, &pkt))
 		rc = -ENOTEMPTY;
-	dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-err_create_pkt:
+
+err_send_pkt:
 	mutex_unlock(&device->lock);
 	return rc;
 
 	return rc;
 }
 
-static int venus_hfi_session_cvp_dfs_frame(void *sess,
-				struct msm_cvp_dfsframe *dfs_frame)
-{
-	int rc = 0;
-	struct hfi_cmd_session_cvp_dfs_frame pkt;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "invalid session");
-		return -ENODEV;
-	}
-
-	device = session->device;
-	mutex_lock(&device->lock);
-
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto err_create_pkt;
-	}
-	rc = call_hfi_pkt_op(device, session_cvp_dfs_frame,
-			&pkt, session, dfs_frame);
-	if (rc) {
-		dprintk(CVP_ERR,
-				"Session get buf req: failed to create pkt\n");
-		goto err_create_pkt;
-	}
-
-	dprintk(CVP_DBG, "%s: calling __iface_cmdq_write\n", __func__);
-	if (__iface_cmdq_write(session->device, &pkt))
-		rc = -ENOTEMPTY;
-	dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-err_create_pkt:
-	mutex_unlock(&device->lock);
-	return rc;
-}
-
-static int venus_hfi_session_cvp_send_cmd(void *sess,
-	struct cvp_frame_data *input_frame)
-{
-	return 0;
-}
-
 static int venus_hfi_session_get_buf_req(void *sess)
 {
 	struct hfi_cmd_session_get_property_packet pkt;
@@ -2838,35 +2514,8 @@
 
 static int venus_hfi_session_flush(void *sess, enum hal_flush flush_mode)
 {
-	struct hfi_cmd_session_flush_packet pkt;
-	int rc = 0;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "invalid session");
-		return -ENODEV;
-	}
-
-	device = session->device;
-	mutex_lock(&device->lock);
-
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto err_create_pkt;
-	}
-	rc = call_hfi_pkt_op(device, session_flush,
-			&pkt, session, flush_mode);
-	if (rc) {
-		dprintk(CVP_ERR, "Session flush: failed to create pkt\n");
-		goto err_create_pkt;
-	}
-
-	if (__iface_cmdq_write(session->device, &pkt))
-		rc = -ENOTEMPTY;
-err_create_pkt:
-	mutex_unlock(&device->lock);
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
 static int __check_core_registered(struct hal_device_data core,
@@ -3205,6 +2854,81 @@
 	return NULL;
 }
 
+#define _INVALID_MSG_ "Unrecognized MSG (%#x) session (%pK), discarding\n"
+#define _INVALID_STATE_ "Ignore responses from %d to %d invalid state\n"
+#define _DEVFREQ_FAIL_ "Failed to add devfreq device bus %s governor %s: %d\n"
+
+static void process_system_msg(struct msm_cvp_cb_info *info,
+		struct venus_hfi_device *device,
+		void *raw_packet)
+{
+	struct cvp_hal_sys_init_done sys_init_done = {0};
+
+	switch (info->response_type) {
+	case HAL_SYS_ERROR:
+		__process_sys_error(device);
+		break;
+	case HAL_SYS_RELEASE_RESOURCE_DONE:
+		dprintk(CVP_DBG, "Received SYS_RELEASE_RESOURCE\n");
+		break;
+	case HAL_SYS_INIT_DONE:
+		dprintk(CVP_DBG, "Received SYS_INIT_DONE\n");
+
+		sys_init_done.capabilities =
+			device->sys_init_capabilities;
+		cvp_hfi_process_sys_init_done_prop_read(
+			(struct hfi_msg_sys_init_done_packet *)
+				raw_packet, &sys_init_done);
+		info->response.cmd.data.sys_init_done = sys_init_done;
+		break;
+	default:
+		break;
+	}
+}
+
+
+static void **get_session_id(struct msm_cvp_cb_info *info)
+{
+	void **session_id = NULL;
+
+	/* For session-related packets, validate session */
+	switch (info->response_type) {
+	case HAL_SESSION_INIT_DONE:
+	case HAL_SESSION_END_DONE:
+	case HAL_SESSION_ABORT_DONE:
+	case HAL_SESSION_STOP_DONE:
+	case HAL_SESSION_FLUSH_DONE:
+	case HAL_SESSION_SET_BUFFER_DONE:
+	case HAL_SESSION_SUSPEND_DONE:
+	case HAL_SESSION_RESUME_DONE:
+	case HAL_SESSION_SET_PROP_DONE:
+	case HAL_SESSION_GET_PROP_DONE:
+	case HAL_SESSION_RELEASE_BUFFER_DONE:
+	case HAL_SESSION_REGISTER_BUFFER_DONE:
+	case HAL_SESSION_UNREGISTER_BUFFER_DONE:
+	case HAL_SESSION_DFS_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE:
+	case HAL_SESSION_DFS_FRAME_CMD_DONE:
+	case HAL_SESSION_DME_FRAME_CMD_DONE:
+	case HAL_SESSION_PERSIST_CMD_DONE:
+	case HAL_SESSION_PROPERTY_INFO:
+		session_id = &info->response.cmd.session_id;
+		break;
+	case HAL_SESSION_ERROR:
+		session_id = &info->response.data.session_id;
+		break;
+	case HAL_SESSION_EVENT_CHANGE:
+		session_id = &info->response.event.session_id;
+		break;
+	case HAL_RESPONSE_UNUSED:
+	default:
+		session_id = NULL;
+		break;
+	}
+	return session_id;
+}
+
 static int __response_handler(struct venus_hfi_device *device)
 {
 	struct msm_cvp_cb_info *packets;
@@ -3249,7 +2973,6 @@
 	while (!__iface_msgq_read(device, raw_packet)) {
 		void **session_id = NULL;
 		struct msm_cvp_cb_info *info = &packets[packet_count++];
-		struct cvp_hal_sys_init_done sys_init_done = {0};
 		int rc = 0;
 
 		rc = cvp_hfi_process_msg_packet(device->device_id,
@@ -3259,62 +2982,15 @@
 					"Corrupt/unknown packet found, discarding\n");
 			--packet_count;
 			continue;
+		} else if (info->response_type == HAL_NO_RESP) {
+			--packet_count;
+			continue;
 		}
 
 		/* Process the packet types that we're interested in */
-		switch (info->response_type) {
-		case HAL_SYS_ERROR:
-			__process_sys_error(device);
-			break;
-		case HAL_SYS_RELEASE_RESOURCE_DONE:
-			dprintk(CVP_DBG, "Received SYS_RELEASE_RESOURCE\n");
-			break;
-		case HAL_SYS_INIT_DONE:
-			dprintk(CVP_DBG, "Received SYS_INIT_DONE\n");
+		process_system_msg(info, device, raw_packet);
 
-			sys_init_done.capabilities =
-				device->sys_init_capabilities;
-			cvp_hfi_process_sys_init_done_prop_read(
-				(struct hfi_msg_sys_init_done_packet *)
-					raw_packet, &sys_init_done);
-			info->response.cmd.data.sys_init_done = sys_init_done;
-			break;
-		default:
-			break;
-		}
-
-		/* For session-related packets, validate session */
-		switch (info->response_type) {
-		case HAL_SESSION_INIT_DONE:
-		case HAL_SESSION_END_DONE:
-		case HAL_SESSION_ABORT_DONE:
-		case HAL_SESSION_STOP_DONE:
-		case HAL_SESSION_FLUSH_DONE:
-		case HAL_SESSION_SET_BUFFER_DONE:
-		case HAL_SESSION_SUSPEND_DONE:
-		case HAL_SESSION_RESUME_DONE:
-		case HAL_SESSION_SET_PROP_DONE:
-		case HAL_SESSION_GET_PROP_DONE:
-		case HAL_SESSION_RELEASE_BUFFER_DONE:
-		case HAL_SESSION_REGISTER_BUFFER_DONE:
-		case HAL_SESSION_UNREGISTER_BUFFER_DONE:
-		case HAL_SESSION_DFS_CONFIG_CMD_DONE:
-		case HAL_SESSION_DFS_FRAME_CMD_DONE:
-		case HAL_SESSION_PROPERTY_INFO:
-			session_id = &info->response.cmd.session_id;
-			break;
-		case HAL_SESSION_ERROR:
-			session_id = &info->response.data.session_id;
-			break;
-		case HAL_SESSION_EVENT_CHANGE:
-			session_id = &info->response.event.session_id;
-			break;
-		case HAL_RESPONSE_UNUSED:
-		default:
-			session_id = NULL;
-			break;
-		}
-
+		session_id = get_session_id(info);
 		/*
 		 * hfi_process_msg_packet provides a session_id that's a hashed
 		 * value of struct hal_session, we need to coerce the hashed
@@ -3333,8 +3009,7 @@
 			session = __get_session(device,
 					(u32)(uintptr_t)*session_id);
 			if (!session) {
-				dprintk(CVP_ERR,
-						"Received a packet (%#x) for an unrecognized session (%pK), discarding\n",
+				dprintk(CVP_ERR, _INVALID_MSG_,
 						info->response_type,
 						*session_id);
 				--packet_count;
@@ -3347,7 +3022,7 @@
 		if (packet_count >= cvp_max_packets &&
 				__get_q_size(device, CVP_IFACEQ_MSGQ_IDX)) {
 			dprintk(CVP_WARN,
-					"Too many packets in message queue to handle at once, deferring read\n");
+				"Too many packets in message queue!\n");
 			break;
 		}
 
@@ -3420,8 +3095,7 @@
 
 		if (!__core_in_valid_state(device)) {
 			dprintk(CVP_ERR,
-				"Ignore responses from %d to %d as device is in invalid state",
-				(i + 1), num_responses);
+				_INVALID_STATE_, (i + 1), num_responses);
 			break;
 		}
 		dprintk(CVP_DBG, "Processing response %d of %d, type %d\n",
@@ -3732,9 +3406,8 @@
 				&bus->devfreq_prof, bus->governor, NULL);
 		if (IS_ERR_OR_NULL(bus->devfreq)) {
 			rc = PTR_ERR(bus->devfreq) ?: -EBADHANDLE;
-			dprintk(CVP_ERR,
-					"Failed to add devfreq device for bus %s and governor %s: %d\n",
-					bus->name, bus->governor, rc);
+			dprintk(CVP_ERR, _DEVFREQ_FAIL_,
+				bus->name, bus->governor, rc);
 			bus->devfreq = NULL;
 			goto err_add_dev;
 		}
@@ -3971,14 +3644,12 @@
 	venus_hfi_for_each_regulator(device, rinfo) {
 		rc = regulator_enable(rinfo->regulator);
 		if (rc) {
-			dprintk(CVP_ERR,
-					"Failed to enable %s: %d\n",
+			dprintk(CVP_ERR, "Failed to enable %s: %d\n",
 					rinfo->name, rc);
 			goto err_reg_enable_failed;
 		}
 
-		dprintk(CVP_DBG, "Enabled regulator %s\n",
-				rinfo->name);
+		dprintk(CVP_DBG, "Enabled regulator %s\n", rinfo->name);
 		c++;
 	}
 
@@ -3997,8 +3668,12 @@
 
 	dprintk(CVP_DBG, "Disabling regulators\n");
 
-	venus_hfi_for_each_regulator_reverse(device, rinfo)
+	venus_hfi_for_each_regulator_reverse(device, rinfo) {
 		__disable_regulator(rinfo, device);
+		if (rinfo->has_hw_power_collapse)
+			regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_NORMAL);
+	}
 
 	return 0;
 }
@@ -4033,10 +3708,6 @@
 		CVP_WRAPPER_INTR_MASK, mask_val);
 }
 
-static void interrupt_init_vpu4(struct venus_hfi_device *device)
-{
-}
-
 static void setup_dsp_uc_memmap_vpu5(struct venus_hfi_device *device)
 {
 	/* initialize DSP QTBL & UCREGION with CPU queues */
@@ -4053,6 +3724,35 @@
 		__write_register(device, CVP_WRAPPER_CPU_CLOCK_CONFIG, 0);
 }
 
+static int __set_ubwc_config(struct venus_hfi_device *device)
+{
+	u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
+
+	struct hfi_cmd_sys_set_property_packet *pkt =
+		(struct hfi_cmd_sys_set_property_packet *) &packet;
+
+	if (!device->res->ubwc_config)
+		return 0;
+
+	rc = call_hfi_pkt_op(device, sys_ubwc_config, pkt,
+		device->res->ubwc_config);
+	if (rc) {
+		dprintk(CVP_WARN,
+			"ubwc config setting to FW failed\n");
+		rc = -ENOTEMPTY;
+		goto fail_to_set_ubwc_config;
+	}
+
+	if (__iface_cmdq_write(device, pkt)) {
+		rc = -ENOTEMPTY;
+		goto fail_to_set_ubwc_config;
+	}
+
+fail_to_set_ubwc_config:
+	return rc;
+}
+
 static int __venus_power_on(struct venus_hfi_device *device)
 {
 	int rc = 0;
@@ -4085,7 +3785,7 @@
 	rc = __scale_clocks(device);
 	if (rc) {
 		dprintk(CVP_WARN,
-				"Failed to scale clocks, performance might be affected\n");
+			"Failed to scale clocks, perf may regress\n");
 		rc = 0;
 	}
 
@@ -4401,85 +4101,9 @@
 	return rc;
 }
 
-static void __noc_error_info(struct venus_hfi_device *device, u32 core_num)
-{
-	u32 vcodec_core_video_noc_base_offs, val;
-
-	if (!device) {
-		dprintk(CVP_ERR, "%s: null device\n", __func__);
-		return;
-	}
-	if (!core_num) {
-		vcodec_core_video_noc_base_offs =
-			VCODEC_CORE0_VIDEO_NOC_BASE_OFFS;
-	} else if (core_num == 1) {
-		vcodec_core_video_noc_base_offs =
-			VCODEC_CORE1_VIDEO_NOC_BASE_OFFS;
-	} else {
-		dprintk(CVP_ERR, "%s: invalid core_num %u\n",
-			__func__, core_num);
-		return;
-	}
-
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_SWID_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_SWID_LOW:     %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_SWID_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_SWID_HIGH:    %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_MAINCTL_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_MAINCTL_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG0_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG0_HIGH: %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG1_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG1_HIGH: %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG2_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG2_HIGH: %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG3_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG3_HIGH: %#x\n", core_num, val);
-}
-
 static int venus_hfi_noc_error_info(void *dev)
 {
-	struct venus_hfi_device *device;
-	const u32 core0 = 0, core1 = 1;
-
-	if (!dev) {
-		dprintk(CVP_ERR, "%s: null device\n", __func__);
-		return -EINVAL;
-	}
-	device = dev;
-
-	mutex_lock(&device->lock);
-	dprintk(CVP_ERR, "%s: non error information\n", __func__);
-
-	if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
-		__noc_error_info(device, core0);
-
-	if (__read_register(device, VCODEC_CORE1_VIDEO_NOC_BASE_OFFS +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
-		__noc_error_info(device, core1);
-
-	mutex_unlock(&device->lock);
-
+	dprintk(CVP_ERR, "%s not supported yet!\n", __func__);
 	return 0;
 }
 
@@ -4506,10 +4130,7 @@
 
 void __init_cvp_ops(struct venus_hfi_device *device)
 {
-	if (device->res->vpu_ver == VPU_VERSION_4)
-		device->vpu_ops = &cvp_vpu4_ops;
-	else
-		device->vpu_ops = &cvp_vpu5_ops;
+	device->vpu_ops = &cvp_vpu5_ops;
 }
 
 static struct venus_hfi_device *__add_device(u32 device_id,
@@ -4582,6 +4203,8 @@
 	return hdevice;
 
 err_cleanup:
+	if (hdevice->venus_pm_workq)
+		destroy_workqueue(hdevice->venus_pm_workq);
 	if (hdevice->cvp_workq)
 		destroy_workqueue(hdevice->cvp_workq);
 	kfree(hdevice->response_pkt);
@@ -4647,15 +4270,11 @@
 	hdev->session_clean = venus_hfi_session_clean;
 	hdev->session_set_buffers = venus_hfi_session_set_buffers;
 	hdev->session_release_buffers = venus_hfi_session_release_buffers;
-	hdev->session_register_buffer = venus_hfi_session_register_buffer;
-	hdev->session_unregister_buffer = venus_hfi_session_unregister_buffer;
 	hdev->session_start = venus_hfi_session_start;
 	hdev->session_continue = venus_hfi_session_continue;
 	hdev->session_stop = venus_hfi_session_stop;
 	hdev->session_get_buf_req = venus_hfi_session_get_buf_req;
-	hdev->session_cvp_dfs_config = venus_hfi_session_cvp_dfs_config;
-	hdev->session_cvp_dfs_frame = venus_hfi_session_cvp_dfs_frame;
-	hdev->session_cvp_send_cmd = venus_hfi_session_cvp_send_cmd;
+	hdev->session_send = venus_hfi_session_send;
 	hdev->session_flush = venus_hfi_session_flush;
 	hdev->session_set_property = venus_hfi_session_set_property;
 	hdev->session_get_property = venus_hfi_session_get_property;
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.h b/drivers/media/platform/msm/cvp/cvp_hfi.h
index e19532d..b2aa67a 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.h
@@ -80,26 +80,6 @@
 #define HFI_INDEX_EXTRADATA_OUTPUT_CROP		0x0700000F
 #define HFI_INDEX_EXTRADATA_ASPECT_RATIO	0x7F100003
 
-struct hfi_buffer_alloc_mode {
-	u32 buffer_type;
-	u32 buffer_mode;
-};
-
-
-struct hfi_index_extradata_config {
-	int enable;
-	u32 index_extra_data_id;
-};
-
-struct hfi_extradata_header {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 type;
-	u32 data_size;
-	u8 rg_data[1];
-};
-
 #define HFI_INTERLACE_FRAME_PROGRESSIVE					0x01
 #define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST	0x02
 #define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST	0x04
@@ -222,60 +202,6 @@
 #define HFI_PROPERTY_CONFIG_VPE_OX_START				\
 	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000)
 
-struct hfi_batch_info {
-	u32 input_batch_count;
-	u32 output_batch_count;
-};
-
-struct hfi_buffer_count_actual {
-	u32 buffer_type;
-	u32 buffer_count_actual;
-	u32 buffer_count_min_host;
-};
-
-struct hfi_buffer_size_minimum {
-	u32 buffer_type;
-	u32 buffer_size;
-};
-
-struct hfi_buffer_requirements {
-	u32 buffer_type;
-	u32 buffer_size;
-	u32 buffer_region_size;
-	u32 buffer_count_min;
-	u32 buffer_count_min_host;
-	u32 buffer_count_actual;
-	u32 contiguous;
-	u32 buffer_alignment;
-};
-
-struct hfi_data_payload {
-	u32 size;
-	u8 rg_data[1];
-};
-
-struct hfi_enable_picture {
-	u32 picture_type;
-};
-
-struct hfi_mb_error_map {
-	u32 error_map_size;
-	u8 rg_error_map[1];
-};
-
-struct hfi_metadata_pass_through {
-	int enable;
-	u32 size;
-};
-
-struct hfi_multi_view_select {
-	u32 view_index;
-};
-
-struct hfi_hybrid_hierp {
-	u32 layers;
-};
-
 #define HFI_PRIORITY_LOW		10
 #define HFI_PRIOIRTY_MEDIUM		20
 #define HFI_PRIORITY_HIGH		30
@@ -292,13 +218,6 @@
 #define HFI_RATE_CONTROL_MBR_VFR	(HFI_OX_BASE + 0x7)
 #define HFI_RATE_CONTROL_CQ		(HFI_OX_BASE + 0x8)
 
-
-struct hfi_uncompressed_plane_actual_constraints_info {
-	u32 buffer_type;
-	u32 num_planes;
-	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
-};
-
 #define HFI_CMD_SYS_OX_START		\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_CMD_START_OFFSET + 0x0000)
 #define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_OX_START + 0x001)
@@ -326,10 +245,6 @@
 #define HFI_CMD_SESSION_CVP_START	\
 	(HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET +	\
 	HFI_CMD_START_OFFSET + 0x1000)
-#define HFI_CMD_SESSION_REGISTER_BUFFERS	\
-	(HFI_CMD_SESSION_CVP_START + 0x0A0)
-#define HFI_CMD_SESSION_UNREGISTER_BUFFERS	\
-	(HFI_CMD_SESSION_CVP_START + 0x0A1)
 
 /* =============BASIC OPERATIONS=================*/
 #define  HFI_CMD_SESSION_CVP_SET_BUFFERS\
@@ -384,6 +299,15 @@
 #define HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG\
 	(HFI_CMD_SESSION_CVP_START + 0x017)
 
+#define  HFI_CMD_SESSION_CVP_DME_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x039)
+#define  HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x03B)
+#define  HFI_CMD_SESSION_CVP_DME_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x03A)
+
+#define  HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x04D)
 
 #define HFI_MSG_SYS_OX_START			\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x0000)
@@ -409,10 +333,6 @@
 #define HFI_MSG_SESSION_CVP_START	\
 	(HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET +	\
 	HFI_MSG_START_OFFSET + 0x1000)
-#define HFI_MSG_SESSION_REGISTER_BUFFERS_DONE	\
-	(HFI_MSG_SESSION_CVP_START + 0x0A0)
-#define HFI_MSG_SESSION_UNREGISTER_BUFFERS_DONE	\
-	(HFI_MSG_SESSION_CVP_START + 0x0A1)
 
 /* =================BASIC OPERATIONS=================*/
 #define HFI_MSG_SESSION_CVP_SET_BUFFERS\
@@ -436,7 +356,12 @@
 #define HFI_MSG_SESSION_CVP_FTEXT\
 	(HFI_MSG_SESSION_CVP_START + 0x00A)
 
-#define HFI_MSG_SESSION_CVP_OPERATION_CONFIG (HFI_MSG_SESSION_CVP_START + 0x010)
+#define HFI_MSG_SESSION_CVP_DME\
+	(HFI_MSG_SESSION_CVP_START + 0x023)
+#define HFI_MSG_SESSION_CVP_OPERATION_CONFIG (HFI_MSG_SESSION_CVP_START + 0x030)
+
+#define  HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x034)
 
 #define CVP_IFACEQ_MAX_PKT_SIZE       1024
 #define CVP_IFACEQ_MED_PKT_SIZE       768
@@ -445,37 +370,12 @@
 #define CVP_IFACEQ_VAR_LARGE_PKT_SIZE 512
 #define CVP_IFACEQ_VAR_HUGE_PKT_SIZE  (1024*12)
 
-
-struct hfi_cmd_sys_session_abort_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
 struct hfi_cmd_sys_ping_packet {
 	u32 size;
 	u32 packet_type;
 	u32 client_data;
 };
 
-struct hfi_cmd_session_load_resources_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
-struct hfi_cmd_session_start_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
-struct hfi_cmd_session_stop_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
 struct hfi_cmd_session_empty_buffer_compressed_packet {
 	u32 size;
 	u32 packet_type;
@@ -513,24 +413,6 @@
 	u32 rgData[1];
 };
 
-struct hfi_cmd_session_empty_buffer_uncompressed_plane1_packet {
-	u32 flags;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 packet_buffer2;
-	u32 rgData[1];
-};
-
-struct hfi_cmd_session_empty_buffer_uncompressed_plane2_packet {
-	u32 flags;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 packet_buffer3;
-	u32 rgData[1];
-};
-
 struct hfi_cmd_session_fill_buffer_packet {
 	u32 size;
 	u32 packet_type;
@@ -552,18 +434,6 @@
 	u32 flush_type;
 };
 
-struct hfi_cmd_session_suspend_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
-struct hfi_cmd_session_resume_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
 struct hfi_cmd_session_get_property_packet {
 	u32 size;
 	u32 packet_type;
@@ -577,38 +447,6 @@
 	u32 buf_size[HFI_MAX_PLANES];
 };
 
-struct hfi_cmd_session_cvp_dfs_config {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 srcbuffer_format;
-	struct HFI_CVP_COLOR_PLANE_INFO left_plane_info;
-	struct HFI_CVP_COLOR_PLANE_INFO right_plane_info;
-	u32 width;
-	u32 height;
-	u32 occlusionmask_enable;
-	u32 occlusioncost;
-	u32 occlusionbound;
-	u32 occlusionshift;
-	u32 maxdisparity;
-	u32 disparityoffset;
-	u32 medianfilter_enable;
-	u32 occlusionfilling_enable;
-	u32 occlusionmaskdump;
-	struct hfi_cvp_client_data  clientdata;
-};
-
-struct hfi_cmd_session_cvp_dfs_frame {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 left_buffer_index;
-	u32 right_buffer_index;
-	u32 disparitymap_buffer_idx;
-	u32 occlusionmask_buffer_idx;
-	struct hfi_cvp_client_data  clientdata;
-};
-
 struct hfi_cmd_session_release_buffer_packet {
 	u32 size;
 	u32 packet_type;
@@ -647,295 +485,6 @@
 	u32 rg_property_data[1];
 };
 
-struct hfi_msg_session_load_resources_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_start_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_stop_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_suspend_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_resume_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_flush_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-	u32 flush_type;
-};
-
-struct hfi_ubwc_cr_stats_info_type {
-	u32 cr_stats_info0;
-	u32 cr_stats_info1;
-	u32 cr_stats_info2;
-	u32 cr_stats_info3;
-	u32 cr_stats_info4;
-	u32 cr_stats_info5;
-	u32 cr_stats_info6;
-};
-
-struct hfi_frame_cr_stats_type {
-	u32 frame_index;
-	struct hfi_ubwc_cr_stats_info_type ubwc_stats_info;
-	u32 complexity_number;
-};
-
-struct hfi_msg_session_empty_buffer_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-	u32 offset;
-	u32 filled_len;
-	u32 input_tag;
-	u32 packet_buffer;
-	u32 extra_data_buffer;
-	u32 flags;
-	struct hfi_frame_cr_stats_type ubwc_cr_stats;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_fill_buffer_done_compressed_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 time_stamp_hi;
-	u32 time_stamp_lo;
-	u32 error_type;
-	u32 flags;
-	u32 mark_target;
-	u32 mark_data;
-	u32 stats;
-	u32 offset;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 input_tag;
-	u32 output_tag;
-	u32 picture_type;
-	u32 packet_buffer;
-	u32 extra_data_buffer;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_fbd_uncompressed_plane0_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 stream_id;
-	u32 view_id;
-	u32 error_type;
-	u32 time_stamp_hi;
-	u32 time_stamp_lo;
-	u32 flags;
-	u32 mark_target;
-	u32 mark_data;
-	u32 stats;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 frame_width;
-	u32 frame_height;
-	u32 start_x_coord;
-	u32 start_y_coord;
-	u32 input_tag;
-	u32 input_tag2;
-	u32 output_tag;
-	u32 picture_type;
-	u32 packet_buffer;
-	u32 extra_data_buffer;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_fill_buffer_done_uncompressed_plane1_packet {
-	u32 flags;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 packet_buffer2;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_fill_buffer_done_uncompressed_plane2_packet {
-	u32 flags;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 packet_buffer3;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_property_info_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 num_properties;
-	u32 rg_property_data[1];
-};
-
-struct hfi_msg_session_release_resources_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_release_buffers_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-	u32 num_buffers;
-	u32 rg_buffer_info[1];
-};
-
-struct hfi_msg_session_register_buffers_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 client_data;
-	u32 error_type;
-};
-
-struct hfi_msg_session_unregister_buffers_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 client_data;
-	u32 error_type;
-};
-
-struct hfi_extradata_mb_quantization_payload {
-	u8 rg_mb_qp[1];
-};
-
-struct hfi_extradata_timestamp_payload {
-	u32 time_stamp_low;
-	u32 time_stamp_high;
-};
-
-
-struct hfi_extradata_s3d_frame_packing_payload {
-	u32 fpa_id;
-	int cancel_flag;
-	u32 fpa_type;
-	int quin_cunx_flag;
-	u32 content_interprtation_type;
-	int spatial_flipping_flag;
-	int frame0_flipped_flag;
-	int field_views_flag;
-	int current_frame_isFrame0_flag;
-	int frame0_self_contained_flag;
-	int frame1_self_contained_flag;
-	u32 frame0_graid_pos_x;
-	u32 frame0_graid_pos_y;
-	u32 frame1_graid_pos_x;
-	u32 frame1_graid_pos_y;
-	u32 fpa_reserved_byte;
-	u32 fpa_repetition_period;
-	int fpa_extension_flag;
-};
-
-struct hfi_extradata_interlace_video_payload {
-	u32 format;
-};
-
-struct hfi_conceal_color_type {
-	u32 value_8bit;
-	u32 value_10bit;
-};
-
-struct hfi_extradata_num_concealed_mb_payload {
-	u32 num_mb_concealed;
-};
-
-struct hfi_extradata_sliceinfo {
-	u32 offset_in_stream;
-	u32 slice_length;
-};
-
-struct hfi_extradata_multislice_info_payload {
-	u32 num_slices;
-	struct hfi_extradata_sliceinfo rg_slice_info[1];
-};
-
-struct hfi_index_extradata_input_crop_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 left;
-	u32 top;
-	u32 width;
-	u32 height;
-};
-
-struct hfi_index_extradata_output_crop_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 left;
-	u32 top;
-	u32 display_width;
-	u32 display_height;
-	u32 width;
-	u32 height;
-};
-
-struct hfi_index_extradata_digital_zoom_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	int width;
-	int height;
-};
-
-struct hfi_index_extradata_aspect_ratio_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 aspect_width;
-	u32 aspect_height;
-};
-
-struct hfi_extradata_frame_type_payload {
-	u32 frame_rate;
-};
-
-struct hfi_extradata_recovery_point_sei_payload {
-	u32 flag;
-};
-
-struct hfi_cmd_session_continue_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
 enum session_flags {
 	SESSION_PAUSE = BIT(1),
 };
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_api.h b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
index 1e3ef88..7801764 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_api.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
@@ -13,6 +13,7 @@
 #include <linux/hash.h>
 #include "msm_cvp_core.h"
 #include "msm_cvp_resources.h"
+#include "cvp_hfi_helper.h"
 
 #define CONTAINS(__a, __sz, __t) (\
 	(__t >= __a) && \
@@ -56,6 +57,26 @@
 /* 16 encoder and 16 decoder sessions */
 #define CVP_MAX_SESSIONS               32
 
+#define HFI_DFS_CONFIG_CMD_SIZE	38
+#define HFI_DFS_FRAME_CMD_SIZE	16
+#define HFI_DFS_FRAME_BUFFERS_OFFSET 8
+#define HFI_DFS_BUF_NUM 4
+
+#define HFI_DME_CONFIG_CMD_SIZE	194
+#define HFI_DME_BASIC_CONFIG_CMD_SIZE	51
+#define HFI_DME_FRAME_CMD_SIZE	28
+#define HFI_DME_FRAME_BUFFERS_OFFSET 12
+#define HFI_DME_BUF_NUM 8
+
+#define HFI_PERSIST_CMD_SIZE	11
+#define HFI_PERSIST_BUFFERS_OFFSET 7
+#define HFI_PERSIST_BUF_NUM     2
+
+#define HFI_DS_CMD_SIZE	54
+#define HFI_DS_BUFFERS_OFFSET	48
+#define HFI_DS_BUF_NUM	3
+
+
 enum cvp_status {
 	CVP_ERR_NONE = 0x0,
 	CVP_ERR_FAIL = 0x80000000,
@@ -904,24 +925,6 @@
 	void *resource_handle;
 };
 
-struct cvp_register_buffer {
-	enum hal_buffer type;
-	u32 index;
-	u32 size;
-	u32 device_addr;
-	u32 response_required;
-	u32 client_data;
-};
-
-struct cvp_unregister_buffer {
-	enum hal_buffer type;
-	u32 index;
-	u32 size;
-	u32 device_addr;
-	u32 response_required;
-	u32 client_data;
-};
-
 struct cvp_buffer_addr_info {
 	enum hal_buffer buffer_type;
 	u32 buffer_size;
@@ -1100,7 +1103,7 @@
 #define IS_HAL_SESSION_CMD(cmd) ((cmd) >= HAL_SESSION_EVENT_CHANGE && \
 		(cmd) <= HAL_SESSION_ERROR)
 enum hal_command_response {
-	/* SYSTEM COMMANDS_DONE*/
+	HAL_NO_RESP,
 	HAL_SYS_INIT_DONE,
 	HAL_SYS_SET_RESOURCE_DONE,
 	HAL_SYS_RELEASE_RESOURCE_DONE,
@@ -1118,7 +1121,6 @@
 	HAL_SESSION_ABORT_DONE,
 	HAL_SESSION_STOP_DONE,
 	HAL_SESSION_CVP_OPERATION_CONFIG,
-	HAL_SESSION_CVP_DFS,
 	HAL_SESSION_FLUSH_DONE,
 	HAL_SESSION_SUSPEND_DONE,
 	HAL_SESSION_RESUME_DONE,
@@ -1130,6 +1132,10 @@
 	HAL_SESSION_RELEASE_RESOURCE_DONE,
 	HAL_SESSION_DFS_CONFIG_CMD_DONE,
 	HAL_SESSION_DFS_FRAME_CMD_DONE,
+	HAL_SESSION_DME_CONFIG_CMD_DONE,
+	HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE,
+	HAL_SESSION_DME_FRAME_CMD_DONE,
+	HAL_SESSION_PERSIST_CMD_DONE,
 	HAL_SESSION_PROPERTY_INFO,
 	HAL_SESSION_ERROR,
 	HAL_RESPONSE_UNUSED = 0x10000000,
@@ -1268,6 +1274,7 @@
 	enum cvp_status status;
 	u32 size;
 	union {
+		struct hfi_msg_session_hdr msg_hdr;
 		struct cvp_resource_hdr resource_hdr;
 		struct cvp_buffer_addr_info buffer_addr_info;
 		struct cvp_frame_plane_config frame_plane_config;
@@ -1278,8 +1285,6 @@
 		struct cvp_hal_sys_init_done sys_init_done;
 		struct cvp_hal_session_init_done session_init_done;
 		struct hal_buffer_info buffer_info;
-		struct cvp_register_buffer regbuf;
-		struct cvp_unregister_buffer unregbuf;
 		union hal_get_property property;
 		enum hal_flush flush_type;
 	} data;
@@ -1428,6 +1433,20 @@
 #define call_hfi_op(q, op, args...)			\
 	(((q) && (q)->op) ? ((q)->op(args)) : 0)
 
+/* DFS related structures */
+struct	buf_desc {
+	u32 fd;
+	u32 size;
+};
+
+struct msm_cvp_hfi_defs {
+	unsigned int size;
+	unsigned int type;
+	unsigned int buf_offset;
+	unsigned int buf_num;
+	enum hal_command_response resp;
+};
+
 struct hfi_device {
 	void *hfi_device_data;
 
@@ -1445,23 +1464,13 @@
 				struct cvp_buffer_addr_info *buffer_info);
 	int (*session_release_buffers)(void *sess,
 				struct cvp_buffer_addr_info *buffer_info);
-	int (*session_register_buffer)(void *sess,
-				struct cvp_register_buffer *buffer);
-	int (*session_unregister_buffer)(void *sess,
-				struct cvp_unregister_buffer *buffer);
 	int (*session_load_res)(void *sess);
 	int (*session_release_res)(void *sess);
 	int (*session_start)(void *sess);
 	int (*session_continue)(void *sess);
 	int (*session_stop)(void *sess);
-	int (*session_cvp_operation_config)(void *sess,
-		struct cvp_frame_data *input_frame);
-	int (*session_cvp_dfs_config)(void *sess,
-		struct msm_cvp_dfsconfig *dfs_config);
-	int (*session_cvp_dfs_frame)(void *sess,
-		struct msm_cvp_dfsframe *dfs_frame);
-	int (*session_cvp_send_cmd)(void *sess,
-		struct cvp_frame_data *input_frame);
+	int (*session_send)(void *sess,
+		struct cvp_kmd_hfi_packet *in_pkt);
 	int (*session_get_buf_req)(void *sess);
 	int (*session_flush)(void *sess, enum hal_flush flush_mode);
 	int (*session_set_property)(void *sess, enum hal_property ptype,
@@ -1490,9 +1499,8 @@
 		hfi_cmd_response_callback callback);
 void cvp_hfi_deinitialize(enum msm_cvp_hfi_type hfi_type,
 			struct hfi_device *hdev);
-u32 cvp_get_hfi_domain(enum hal_domain hal_domain);
-u32 cvp_get_hfi_codec(enum hal_video_codec hal_codec);
-enum hal_domain cvp_get_hal_domain(u32 hfi_domain);
-enum hal_video_codec cvp_get_hal_codec(u32 hfi_codec);
+
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr);
+extern const struct msm_cvp_hfi_defs cvp_hfi_defs[];
 
 #endif /*__CVP_HFI_API_H__ */
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
index f43be67..4707ec1 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
@@ -196,6 +196,8 @@
 	(HFI_PROPERTY_SYS_COMMON_START + 0x006)
 #define  HFI_PROPERTY_SYS_CONFIG_COVERAGE    \
 	(HFI_PROPERTY_SYS_COMMON_START + 0x007)
+#define  HFI_PROPERTY_SYS_UBWC_CONFIG    \
+	(HFI_PROPERTY_SYS_COMMON_START + 0x008)
 
 #define HFI_PROPERTY_PARAM_COMMON_START	\
 	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
@@ -966,6 +968,25 @@
 	u32 rg_buffer_addr[1];
 };
 
+struct hfi_cmd_sys_set_ubwc_config_packet_type {
+	u32 size;
+	u32 packet_type;
+	struct {
+		u32 max_channel_override : 1;
+		u32 mal_length_override : 1;
+		u32 hb_override : 1;
+		u32 bank_swzl_level_override : 1;
+		u32 bank_spreading_override : 1;
+		u32 reserved : 27;
+	} override_bit_info;
+	u32 max_channels;
+	u32 mal_length;
+	u32 highest_bank_bit;
+	u32 bank_swzl_level;
+	u32 bank_spreading;
+	u32 reserved[2];
+};
+
 struct hfi_cmd_session_set_property_packet {
 	u32 size;
 	u32 packet_type;
@@ -999,7 +1020,7 @@
 	u32 buffer_idx;
 };
 
-struct hfi_msg_session_cvp_release_buffers_done_packet {
+struct hfi_msg_session_hdr {
 	u32 size;
 	u32 packet_type;
 	u32 session_id;
@@ -1036,26 +1057,6 @@
 	u32 size;
 };
 
-struct hfi_cmd_session_register_buffers_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 client_data;
-	u32 response_req;
-	u32 num_buffers;
-	struct hfi_buffer_mapping_type buffer[1];
-};
-
-struct hfi_cmd_session_unregister_buffers_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 client_data;
-	u32 response_req;
-	u32 num_buffers;
-	struct hfi_buffer_mapping_type buffer[1];
-};
-
 struct hfi_cmd_session_sync_process_packet {
 	u32 size;
 	u32 packet_type;
@@ -1084,24 +1085,65 @@
 	u32 size;
 	u32 packet_type;
 	u32 session_id;
+	struct hfi_cvp_client_data  client_data;
 	u32 op_conf_id;
 	u32 error_type;
-	struct hfi_cvp_client_data  client_data;
 };
 
 struct hfi_msg_session_cvp_dfs_packet_type {
 	u32 size;
 	u32 packet_type;
 	u32 session_id;
+	struct hfi_cvp_client_data  client_data;
 	u32 error_type;
-	u32 left_view_buffer_index;
-	u32 right_view_buffer_index;
 	u32 width;
 	u32 height;
 	u32 occlusionmask_enable;
-	u32 disparitymap_buffer_idx;
-	u32 occlusionmask_buffer_idx;
+	u32 left_view_buf_addr;
+	u32 left_view_buf_size;
+	u32 right_view_buf_addr;
+	u32 right_view_buf_size;
+	u32 disparity_map_buf_addr;
+	u32 disparity_map_buf_size;
+	u32 occ_mask_buf_addr;
+	u32 occ_mask_buf_size;
+};
+
+struct hfi_msg_session_cvp_dme_packet_type {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
 	struct hfi_cvp_client_data  client_data;
+	u32 error_type;
+	u32 skip_mv_calc;
+	u32 src_buf_addr;
+	u32 src_buf_size;
+	u32 src_frame_ctx_buf_addr;
+	u32 src_frame_ctx_buf_size;
+	u32 ref_buf_addr;
+	u32 ref_buf_size;
+	u32 ref_frame_ctxt_buf_addr;
+	u32 ref_frame_ctxt_buf_size;
+	u32 video_spatial_temp_status_buf_addr;
+	u32 video_spatial_temp_status_buf_size;
+	u32 full_res_height;
+	u32 full_res_width;
+	u32 proc_frame_width;
+	u32 proc_frame_height;
+	u32 transform_confidence;
+};
+
+struct hfi_msg_session_cvp_persist_packet_type {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	struct hfi_cvp_client_data  client_data;
+	u32 error_type;
+	u32 cvp_op;
+	u32 persist1_buffer_fd;
+	u32 persist1_size;
+	u32 persist2_fd;
+	u32 persist2_size;
 };
 
 struct hfi_msg_release_buffer_ref_event_packet {
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.c b/drivers/media/platform/msm/cvp/hfi_packetization.c
index e675f8a..d8b7cc3 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.c
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.c
@@ -15,222 +15,6 @@
  * sensible index.
  */
 
-static int entropy_mode[] = {
-	[ilog2(HAL_H264_ENTROPY_CAVLC)] = HFI_H264_ENTROPY_CAVLC,
-	[ilog2(HAL_H264_ENTROPY_CABAC)] = HFI_H264_ENTROPY_CABAC,
-};
-
-static int statistics_mode[] = {
-	[ilog2(HAL_STATISTICS_MODE_DEFAULT)] = HFI_STATISTICS_MODE_DEFAULT,
-	[ilog2(HAL_STATISTICS_MODE_1)] = HFI_STATISTICS_MODE_1,
-	[ilog2(HAL_STATISTICS_MODE_2)] = HFI_STATISTICS_MODE_2,
-	[ilog2(HAL_STATISTICS_MODE_3)] = HFI_STATISTICS_MODE_3,
-};
-
-static int color_format[] = {
-	[ilog2(HAL_COLOR_FORMAT_MONOCHROME)] = HFI_COLOR_FORMAT_MONOCHROME,
-	[ilog2(HAL_COLOR_FORMAT_NV12)] = HFI_COLOR_FORMAT_NV12,
-	[ilog2(HAL_COLOR_FORMAT_NV21)] = HFI_COLOR_FORMAT_NV21,
-	[ilog2(HAL_COLOR_FORMAT_NV12_4x4TILE)] = HFI_COLOR_FORMAT_NV12_4x4TILE,
-	[ilog2(HAL_COLOR_FORMAT_NV21_4x4TILE)] = HFI_COLOR_FORMAT_NV21_4x4TILE,
-	[ilog2(HAL_COLOR_FORMAT_YUYV)] = HFI_COLOR_FORMAT_YUYV,
-	[ilog2(HAL_COLOR_FORMAT_YVYU)] = HFI_COLOR_FORMAT_YVYU,
-	[ilog2(HAL_COLOR_FORMAT_UYVY)] = HFI_COLOR_FORMAT_UYVY,
-	[ilog2(HAL_COLOR_FORMAT_VYUY)] = HFI_COLOR_FORMAT_VYUY,
-	[ilog2(HAL_COLOR_FORMAT_RGB565)] = HFI_COLOR_FORMAT_RGB565,
-	[ilog2(HAL_COLOR_FORMAT_BGR565)] = HFI_COLOR_FORMAT_BGR565,
-	[ilog2(HAL_COLOR_FORMAT_RGB888)] = HFI_COLOR_FORMAT_RGB888,
-	[ilog2(HAL_COLOR_FORMAT_BGR888)] = HFI_COLOR_FORMAT_BGR888,
-	/* UBWC Color formats*/
-	[ilog2(HAL_COLOR_FORMAT_NV12_UBWC)] =  HFI_COLOR_FORMAT_NV12_UBWC,
-	[ilog2(HAL_COLOR_FORMAT_NV12_TP10_UBWC)] =
-			HFI_COLOR_FORMAT_YUV420_TP10_UBWC,
-	/*P010 10bit format*/
-	[ilog2(HAL_COLOR_FORMAT_P010)] =  HFI_COLOR_FORMAT_P010,
-	[ilog2(HAL_COLOR_FORMAT_NV12_512)] = HFI_COLOR_FORMAT_NV12,
-};
-
-static int nal_type[] = {
-	[ilog2(HAL_NAL_FORMAT_STARTCODES)] = HFI_NAL_FORMAT_STARTCODES,
-	[ilog2(HAL_NAL_FORMAT_ONE_NAL_PER_BUFFER)] =
-		HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER,
-	[ilog2(HAL_NAL_FORMAT_ONE_BYTE_LENGTH)] =
-		HFI_NAL_FORMAT_ONE_BYTE_LENGTH,
-	[ilog2(HAL_NAL_FORMAT_TWO_BYTE_LENGTH)] =
-		HFI_NAL_FORMAT_TWO_BYTE_LENGTH,
-	[ilog2(HAL_NAL_FORMAT_FOUR_BYTE_LENGTH)] =
-		HFI_NAL_FORMAT_FOUR_BYTE_LENGTH,
-};
-
-static inline int hal_to_hfi_type(int property, int hal_type)
-{
-	if (hal_type <= 0 || roundup_pow_of_two(hal_type) != hal_type) {
-		/*
-		 * Not a power of 2, it's not going
-		 * to be in any of the tables anyway
-		 */
-		return -EINVAL;
-	}
-
-	if (hal_type)
-		hal_type = ilog2(hal_type);
-
-	switch (property) {
-	case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
-		return (hal_type >= ARRAY_SIZE(entropy_mode)) ?
-			-ENOTSUPP : entropy_mode[hal_type];
-	case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
-		return (hal_type >= ARRAY_SIZE(color_format)) ?
-			-ENOTSUPP : color_format[hal_type];
-	case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
-		return (hal_type >= ARRAY_SIZE(nal_type)) ?
-			-ENOTSUPP : nal_type[hal_type];
-	case HAL_PARAM_VENC_MBI_STATISTICS_MODE:
-		return (hal_type >= ARRAY_SIZE(statistics_mode)) ?
-			-ENOTSUPP : statistics_mode[hal_type];
-	default:
-		return -ENOTSUPP;
-	}
-}
-
-enum hal_domain cvp_get_hal_domain(u32 hfi_domain)
-{
-	enum hal_domain hal_domain = 0;
-
-	switch (hfi_domain) {
-	case HFI_VIDEO_DOMAIN_VPE:
-		hal_domain = HAL_VIDEO_DOMAIN_VPE;
-		break;
-	case HFI_VIDEO_DOMAIN_ENCODER:
-		hal_domain = HAL_VIDEO_DOMAIN_ENCODER;
-		break;
-	case HFI_VIDEO_DOMAIN_DECODER:
-		hal_domain = HAL_VIDEO_DOMAIN_DECODER;
-		break;
-	case HFI_VIDEO_DOMAIN_CVP:
-		hal_domain = HAL_VIDEO_DOMAIN_CVP;
-		break;
-	default:
-		dprintk(CVP_ERR, "%s: invalid domain %x\n",
-			__func__, hfi_domain);
-		hal_domain = 0;
-		break;
-	}
-	return hal_domain;
-}
-
-enum hal_video_codec cvp_get_hal_codec(u32 hfi_codec)
-{
-	enum hal_video_codec hal_codec = 0;
-
-	switch (hfi_codec) {
-	case HFI_VIDEO_CODEC_H264:
-		hal_codec = HAL_VIDEO_CODEC_H264;
-		break;
-	case HFI_VIDEO_CODEC_MPEG1:
-		hal_codec = HAL_VIDEO_CODEC_MPEG1;
-		break;
-	case HFI_VIDEO_CODEC_MPEG2:
-		hal_codec = HAL_VIDEO_CODEC_MPEG2;
-		break;
-	case HFI_VIDEO_CODEC_VP8:
-		hal_codec = HAL_VIDEO_CODEC_VP8;
-		break;
-	case HFI_VIDEO_CODEC_HEVC:
-		hal_codec = HAL_VIDEO_CODEC_HEVC;
-		break;
-	case HFI_VIDEO_CODEC_VP9:
-		hal_codec = HAL_VIDEO_CODEC_VP9;
-		break;
-	case HFI_VIDEO_CODEC_TME:
-		hal_codec = HAL_VIDEO_CODEC_TME;
-		break;
-	case HFI_VIDEO_CODEC_CVP:
-		hal_codec = HAL_VIDEO_CODEC_CVP;
-		break;
-	default:
-		dprintk(CVP_INFO, "%s: invalid codec 0x%x\n",
-			__func__, hfi_codec);
-		hal_codec = 0;
-		break;
-	}
-	return hal_codec;
-}
-
-u32 cvp_get_hfi_domain(enum hal_domain hal_domain)
-{
-	u32 hfi_domain;
-
-	switch (hal_domain) {
-	case HAL_VIDEO_DOMAIN_VPE:
-		hfi_domain = HFI_VIDEO_DOMAIN_VPE;
-		break;
-	case HAL_VIDEO_DOMAIN_ENCODER:
-		hfi_domain = HFI_VIDEO_DOMAIN_ENCODER;
-		break;
-	case HAL_VIDEO_DOMAIN_DECODER:
-		hfi_domain = HFI_VIDEO_DOMAIN_DECODER;
-		break;
-	case HAL_VIDEO_DOMAIN_CVP:
-		hfi_domain = HFI_VIDEO_DOMAIN_CVP;
-		break;
-	default:
-		dprintk(CVP_ERR, "%s: invalid domain 0x%x\n",
-			__func__, hal_domain);
-		hfi_domain = 0;
-		break;
-	}
-	return hfi_domain;
-}
-
-u32 cvp_get_hfi_codec(enum hal_video_codec hal_codec)
-{
-	u32 hfi_codec = 0;
-
-	switch (hal_codec) {
-	case HAL_VIDEO_CODEC_H264:
-		hfi_codec = HFI_VIDEO_CODEC_H264;
-		break;
-	case HAL_VIDEO_CODEC_MPEG1:
-		hfi_codec = HFI_VIDEO_CODEC_MPEG1;
-		break;
-	case HAL_VIDEO_CODEC_MPEG2:
-		hfi_codec = HFI_VIDEO_CODEC_MPEG2;
-		break;
-	case HAL_VIDEO_CODEC_VP8:
-		hfi_codec = HFI_VIDEO_CODEC_VP8;
-		break;
-	case HAL_VIDEO_CODEC_HEVC:
-		hfi_codec = HFI_VIDEO_CODEC_HEVC;
-		break;
-	case HAL_VIDEO_CODEC_VP9:
-		hfi_codec = HFI_VIDEO_CODEC_VP9;
-		break;
-	case HAL_VIDEO_CODEC_TME:
-		hfi_codec = HFI_VIDEO_CODEC_TME;
-		break;
-	case HAL_VIDEO_CODEC_CVP:
-		hfi_codec = HFI_VIDEO_CODEC_CVP;
-		break;
-	default:
-		dprintk(CVP_INFO, "%s: invalid codec 0x%x\n",
-			__func__, hal_codec);
-		hfi_codec = 0;
-		break;
-	}
-	return hfi_codec;
-}
-
-static void create_pkt_enable(void *pkt, u32 type, bool enable)
-{
-	u32 *pkt_header = pkt;
-	u32 *pkt_type = &pkt_header[0];
-	struct hfi_enable *hfi_enable = (struct hfi_enable *)&pkt_header[1];
-
-	*pkt_type = type;
-	hfi_enable->enable = enable;
-}
-
 int cvp_create_pkt_cmd_sys_init(struct hfi_cmd_sys_init_packet *pkt,
 			   u32 arch_type)
 {
@@ -426,6 +210,50 @@
 	return rc;
 }
 
+static int create_pkt_cmd_sys_ubwc_config(
+		struct hfi_cmd_sys_set_property_packet *pkt,
+		struct msm_cvp_ubwc_config_data *ubwc_config)
+{
+	int rc = 0;
+	struct hfi_cmd_sys_set_ubwc_config_packet_type *hfi;
+
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
+		sizeof(struct hfi_cmd_sys_set_ubwc_config_packet_type)
+		+ sizeof(u32);
+
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_UBWC_CONFIG;
+	hfi = (struct hfi_cmd_sys_set_ubwc_config_packet_type *)
+		&pkt->rg_property_data[1];
+
+	hfi->max_channels = ubwc_config->max_channels;
+	hfi->override_bit_info.max_channel_override =
+		ubwc_config->override_bit_info.max_channel_override;
+
+	hfi->mal_length = ubwc_config->mal_length;
+	hfi->override_bit_info.mal_length_override =
+		ubwc_config->override_bit_info.mal_length_override;
+
+	hfi->highest_bank_bit = ubwc_config->highest_bank_bit;
+	hfi->override_bit_info.hb_override =
+		ubwc_config->override_bit_info.hb_override;
+
+	hfi->bank_swzl_level = ubwc_config->bank_swzl_level;
+	hfi->override_bit_info.bank_swzl_level_override =
+		ubwc_config->override_bit_info.bank_swzl_level_override;
+
+	hfi->bank_spreading = ubwc_config->bank_spreading;
+	hfi->override_bit_info.bank_spreading_override =
+		ubwc_config->override_bit_info.bank_spreading_override;
+	hfi->size = sizeof(struct hfi_cmd_sys_set_ubwc_config_packet_type);
+
+	return rc;
+}
+
 int cvp_create_pkt_cmd_session_cmd(struct cvp_hal_session_cmd_pkt *pkt,
 			int pkt_type, struct hal_session *session)
 {
@@ -508,135 +336,6 @@
 	return buffer;
 }
 
-static int get_hfi_extradata_index(enum hal_extradata_id index)
-{
-	int ret = 0;
-
-	switch (index) {
-	case HAL_EXTRADATA_INTERLACE_VIDEO:
-		ret = HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_TIMESTAMP:
-		ret = HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_S3D_FRAME_PACKING:
-		ret = HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_FRAME_RATE:
-		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_PANSCAN_WINDOW:
-		ret = HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_RECOVERY_POINT_SEI:
-		ret = HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_NUM_CONCEALED_MB:
-		ret = HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB;
-		break;
-	case HAL_EXTRADATA_ASPECT_RATIO:
-	case HAL_EXTRADATA_OUTPUT_CROP:
-		ret = HFI_PROPERTY_PARAM_INDEX_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_MPEG2_SEQDISP:
-		ret = HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_STREAM_USERDATA:
-		ret = HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_FRAME_QP:
-		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_LTR_INFO:
-		ret = HFI_PROPERTY_PARAM_VENC_LTR_INFO;
-		break;
-	case HAL_EXTRADATA_ROI_QP:
-		ret = HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI:
-		ret =
-		HFI_PROPERTY_PARAM_VDEC_MASTERING_DISPLAY_COLOUR_SEI_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
-		ret = HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_VUI_DISPLAY_INFO:
-		ret = HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_VPX_COLORSPACE:
-		ret = HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_UBWC_CR_STATS_INFO:
-		ret = HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_HDR10PLUS_METADATA:
-		ret = HFI_PROPERTY_PARAM_VENC_HDR10PLUS_METADATA_EXTRADATA;
-		break;
-	default:
-		dprintk(CVP_WARN, "Extradata index not found: %d\n", index);
-		break;
-	}
-	return ret;
-}
-
-static int get_hfi_extradata_id(enum hal_extradata_id index)
-{
-	int ret = 0;
-
-	switch (index) {
-	case HAL_EXTRADATA_ASPECT_RATIO:
-		ret = MSM_CVP_EXTRADATA_ASPECT_RATIO;
-		break;
-	case HAL_EXTRADATA_OUTPUT_CROP:
-		ret = MSM_CVP_EXTRADATA_OUTPUT_CROP;
-		break;
-	default:
-		ret = get_hfi_extradata_index(index);
-		break;
-	}
-	return ret;
-}
-
-static u32 get_hfi_ltr_mode(enum ltr_mode ltr_mode_type)
-{
-	u32 ltrmode;
-
-	switch (ltr_mode_type) {
-	case HAL_LTR_MODE_DISABLE:
-		ltrmode = HFI_LTR_MODE_DISABLE;
-		break;
-	case HAL_LTR_MODE_MANUAL:
-		ltrmode = HFI_LTR_MODE_MANUAL;
-		break;
-	default:
-		dprintk(CVP_ERR, "Invalid ltr mode: %#x\n",
-			ltr_mode_type);
-		ltrmode = HFI_LTR_MODE_DISABLE;
-		break;
-	}
-	return ltrmode;
-}
-
-static u32 get_hfi_work_mode(enum hal_work_mode work_mode)
-{
-	u32 hfi_work_mode;
-
-	switch (work_mode) {
-	case CVP_WORK_MODE_1:
-		hfi_work_mode = HFI_WORKMODE_1;
-		break;
-	case CVP_WORK_MODE_2:
-		hfi_work_mode = HFI_WORKMODE_2;
-		break;
-	default:
-		dprintk(CVP_ERR, "Invalid work mode: %#x\n",
-			work_mode);
-		hfi_work_mode = HFI_WORKMODE_2;
-		break;
-	}
-	return hfi_work_mode;
-}
-
 int cvp_create_pkt_cmd_session_set_buffers(
 		struct hfi_cmd_session_cvp_set_buffers_packet *pkt,
 		struct hal_session *session,
@@ -666,186 +365,24 @@
 
 	pkt->packet_type = HFI_CMD_SESSION_CVP_RELEASE_BUFFERS;
 	pkt->session_id = hash32_ptr(session);
-	pkt->buffer_type = 0xdeadbeef;
-	pkt->num_buffers = 0;
-	pkt->size = sizeof(struct hfi_cmd_session_cvp_release_buffers_packet);
+	pkt->num_buffers = buffer_info->num_buffers;
+
+	if (buffer_info->buffer_type == HAL_BUFFER_OUTPUT ||
+		buffer_info->buffer_type == HAL_BUFFER_OUTPUT2) {
+		dprintk(CVP_ERR, "%s: deprecated buffer_type\n", __func__);
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
+		((buffer_info->num_buffers - 1) * sizeof(u32));
+
+	pkt->buffer_type = get_hfi_buffer(buffer_info->buffer_type);
+	if (!pkt->buffer_type)
+		return -EINVAL;
 
 	return 0;
 }
 
-int cvp_create_pkt_cmd_session_register_buffer(
-		struct hfi_cmd_session_register_buffers_packet *pkt,
-		struct hal_session *session,
-		struct cvp_register_buffer *buffer)
-{
-	int rc = 0, i;
-	struct hfi_buffer_mapping_type *buf;
-
-	if (!pkt || !session) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	pkt->packet_type = HFI_CMD_SESSION_REGISTER_BUFFERS;
-	pkt->session_id = hash32_ptr(session);
-	pkt->client_data = buffer->client_data;
-	pkt->response_req = buffer->response_required;
-	pkt->num_buffers = 1;
-	pkt->size = sizeof(struct hfi_cmd_session_register_buffers_packet) -
-			sizeof(u32) + (pkt->num_buffers *
-			sizeof(struct hfi_buffer_mapping_type));
-
-	buf = (struct hfi_buffer_mapping_type *)pkt->buffer;
-	for (i = 0; i < pkt->num_buffers; i++) {
-		buf->index = buffer->index;
-		buf->device_addr = buffer->device_addr;
-		buf->size = buffer->size;
-		buf++;
-	}
-
-	return rc;
-}
-
-int cvp_create_pkt_cmd_session_unregister_buffer(
-		struct hfi_cmd_session_unregister_buffers_packet *pkt,
-		struct hal_session *session,
-		struct cvp_unregister_buffer *buffer)
-{
-	int rc = 0, i;
-	struct hfi_buffer_mapping_type *buf;
-
-	if (!pkt || !session) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	pkt->packet_type = HFI_CMD_SESSION_UNREGISTER_BUFFERS;
-	pkt->session_id = hash32_ptr(session);
-	pkt->client_data = buffer->client_data;
-	pkt->response_req = buffer->response_required;
-	pkt->num_buffers = 1;
-	pkt->size = sizeof(struct hfi_cmd_session_unregister_buffers_packet) -
-			sizeof(u32) + (pkt->num_buffers *
-			sizeof(struct hfi_buffer_mapping_type));
-
-	buf = (struct hfi_buffer_mapping_type *)pkt->buffer;
-	for (i = 0; i < pkt->num_buffers; i++) {
-		buf->index = buffer->index;
-		buf->device_addr = buffer->device_addr;
-		buf->size = buffer->size;
-		buf++;
-	}
-
-	return rc;
-}
-
-int cvp_create_pkt_cmd_session_etb_decoder(
-	struct hfi_cmd_session_empty_buffer_compressed_packet *pkt,
-	struct hal_session *session, struct cvp_frame_data *input_frame)
-{
-	int rc = 0;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	pkt->size =
-		sizeof(struct hfi_cmd_session_empty_buffer_compressed_packet);
-	pkt->packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
-	pkt->session_id = hash32_ptr(session);
-	pkt->time_stamp_hi = upper_32_bits(input_frame->timestamp);
-	pkt->time_stamp_lo = lower_32_bits(input_frame->timestamp);
-	pkt->flags = input_frame->flags;
-	pkt->mark_target = input_frame->mark_target;
-	pkt->mark_data = input_frame->mark_data;
-	pkt->offset = input_frame->offset;
-	pkt->alloc_len = input_frame->alloc_len;
-	pkt->filled_len = input_frame->filled_len;
-	pkt->input_tag = input_frame->clnt_data;
-	pkt->packet_buffer = (u32)input_frame->device_addr;
-
-	trace_msm_v4l2_cvp_buffer_event_start("ETB",
-		input_frame->device_addr, input_frame->timestamp,
-		input_frame->alloc_len, input_frame->filled_len,
-		input_frame->offset);
-
-	if (!pkt->packet_buffer)
-		rc = -EINVAL;
-	return rc;
-}
-
-int cvp_create_pkt_cmd_session_etb_encoder(
-	struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet *pkt,
-	struct hal_session *session, struct cvp_frame_data *input_frame)
-{
-	int rc = 0;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	pkt->size = sizeof(struct
-		hfi_cmd_session_empty_buffer_uncompressed_plane0_packet);
-	pkt->packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
-	pkt->session_id = hash32_ptr(session);
-	pkt->view_id = 0;
-	pkt->time_stamp_hi = upper_32_bits(input_frame->timestamp);
-	pkt->time_stamp_lo = lower_32_bits(input_frame->timestamp);
-	pkt->flags = input_frame->flags;
-	pkt->mark_target = input_frame->mark_target;
-	pkt->mark_data = input_frame->mark_data;
-	pkt->offset = input_frame->offset;
-	pkt->alloc_len = input_frame->alloc_len;
-	pkt->filled_len = input_frame->filled_len;
-	pkt->input_tag = input_frame->clnt_data;
-	pkt->packet_buffer = (u32)input_frame->device_addr;
-	pkt->extra_data_buffer = (u32)input_frame->extradata_addr;
-
-	trace_msm_v4l2_cvp_buffer_event_start("ETB",
-		input_frame->device_addr, input_frame->timestamp,
-		input_frame->alloc_len, input_frame->filled_len,
-		input_frame->offset);
-
-	if (!pkt->packet_buffer)
-		rc = -EINVAL;
-	return rc;
-}
-
-int cvp_create_pkt_cmd_session_ftb(
-		struct hfi_cmd_session_fill_buffer_packet *pkt,
-		struct hal_session *session,
-		struct cvp_frame_data *output_frame)
-{
-	int rc = 0;
-
-	if (!pkt || !session || !output_frame)
-		return -EINVAL;
-
-	pkt->size = sizeof(struct hfi_cmd_session_fill_buffer_packet);
-	pkt->packet_type = HFI_CMD_SESSION_FILL_BUFFER;
-	pkt->session_id = hash32_ptr(session);
-
-	if (output_frame->buffer_type == HAL_BUFFER_OUTPUT)
-		pkt->stream_id = 0;
-	else if (output_frame->buffer_type == HAL_BUFFER_OUTPUT2)
-		pkt->stream_id = 1;
-
-	if (!output_frame->device_addr)
-		return -EINVAL;
-
-	pkt->packet_buffer = (u32)output_frame->device_addr;
-	pkt->extra_data_buffer = (u32)output_frame->extradata_addr;
-	pkt->alloc_len = output_frame->alloc_len;
-	pkt->filled_len = output_frame->filled_len;
-	pkt->offset = output_frame->offset;
-	pkt->rgData[0] = output_frame->extradata_size;
-
-	trace_msm_v4l2_cvp_buffer_event_start("FTB",
-		output_frame->device_addr, output_frame->timestamp,
-		output_frame->alloc_len, output_frame->filled_len,
-		output_frame->offset);
-
-	return rc;
-}
-
 int cvp_create_pkt_cmd_session_get_buf_req(
 		struct hfi_cmd_session_get_property_packet *pkt,
 		struct hal_session *session)
@@ -864,161 +401,40 @@
 	return rc;
 }
 
-int cvp_create_pkt_cmd_session_cvp_dfs_config(
-		struct hfi_cmd_session_cvp_dfs_config *pkt,
+int cvp_create_pkt_cmd_session_send(
+		struct cvp_kmd_hfi_packet *out_pkt,
 		struct hal_session *session,
-		struct msm_cvp_dfsconfig *dfs_config)
+		struct cvp_kmd_hfi_packet *in_pkt)
 {
-	int rc = 0, i = 0;
+	int def_idx;
+	struct cvp_hal_session_cmd_pkt *ptr =
+		(struct cvp_hal_session_cmd_pkt *)in_pkt;
 
-	if (!pkt || !session)
+	if (!out_pkt || !in_pkt || !session)
 		return -EINVAL;
 
-	pkt->size = sizeof(struct hfi_cmd_session_cvp_dfs_config);
-	pkt->packet_type = HFI_CMD_SESSION_CVP_DFS_CONFIG;
-	pkt->session_id = hash32_ptr(session);
-	pkt->srcbuffer_format = dfs_config->srcbuffer_format;
-	for (i = 0; i < HFI_MAX_PLANES; i++) {
-		pkt->left_plane_info.stride[i] =
-			dfs_config->left_plane_info.stride[i];
-		pkt->left_plane_info.buf_size[i] =
-			dfs_config->left_plane_info.buf_size[i];
-		pkt->right_plane_info.stride[i] =
-			dfs_config->right_plane_info.stride[i];
-		pkt->right_plane_info.buf_size[i] =
-			dfs_config->right_plane_info.buf_size[i];
-	}
-	pkt->width = dfs_config->width;
-	pkt->height = dfs_config->height;
-	pkt->occlusionmask_enable = dfs_config->occlusionmask_enable;
-	pkt->occlusioncost = dfs_config->occlusioncost;
-	pkt->occlusionbound = dfs_config->occlusionbound;
-	pkt->occlusionshift = dfs_config->occlusionshift;
-	pkt->maxdisparity = dfs_config->maxdisparity;
-	pkt->disparityoffset = dfs_config->disparityoffset;
-	pkt->medianfilter_enable = dfs_config->medianfilter_enable;
-	pkt->occlusionfilling_enable = dfs_config->occlusionfilling_enable;
-	pkt->occlusionmaskdump = dfs_config->occlusionmaskdump;
-	pkt->clientdata.transactionid =
-		dfs_config->clientdata.transactionid;
-	pkt->clientdata.client_data1  =
-		dfs_config->clientdata.client_data1;
-	pkt->clientdata.client_data2  =
-		dfs_config->clientdata.client_data2;
+	def_idx = get_pkt_index(ptr);
+	if (def_idx < 0)
+		goto error_hfi_packet;
 
-	dprintk(CVP_DBG,
-		"%s: size=%d packet_type=%d session_id=%d height=%d", __func__,
-		pkt->size, pkt->packet_type, pkt->session_id, pkt->height);
-	dprintk(CVP_DBG,
-		"occlusionmask_enable=%d occlusioncost=%d occlusionbound=%d",
-		pkt->occlusionmask_enable, pkt->occlusioncost,
-		pkt->occlusionbound);
-	dprintk(CVP_DBG,
-		"occlusionshift=%d maxdisparity=%d disparityoffset=%d",
-		pkt->occlusionshift, pkt->maxdisparity,
-		pkt->disparityoffset);
-	dprintk(CVP_DBG,
-		"medianfilter_enable=%d occlusionfilling_enable=%d occlusionmaskdump=%d",
-		pkt->medianfilter_enable, pkt->occlusionfilling_enable,
-		pkt->occlusionmaskdump);
-	dprintk(CVP_DBG,
-		"left_plane_info.ActualStride[HFI_COLOR_PLANE_PICDATA]:%u",
-		pkt->left_plane_info.stride[HFI_COLOR_PLANE_PICDATA]
-		);
-	dprintk(CVP_DBG,
-		"LeftViewColPlaneInfo.ActualStride[HFI_COLOR_PLANE_METADATA]:%u",
-		pkt->left_plane_info.stride[HFI_COLOR_PLANE_METADATA]
-		);
-	dprintk(CVP_DBG,
-		"LeftViewColPlaneInfo.ActualBufSize[HFI_COLOR_PLANE_PICDATA]:%u,",
-		pkt->left_plane_info.buf_size[HFI_COLOR_PLANE_PICDATA]
-		);
-	dprintk(CVP_DBG,
-		"LeftViewColPlaneInfo.ActualBufSize[HFI_COLOR_PLANE_METADATA]%u",
-		pkt->left_plane_info.buf_size[HFI_COLOR_PLANE_METADATA]
-		);
-	dprintk(CVP_DBG,
-		"RightViewColPlaneInfo:stride[HFI_COLOR_PLANE_PICDATA]:%u",
-		pkt->right_plane_info.stride[HFI_COLOR_PLANE_PICDATA]
-		);
-	dprintk(CVP_DBG,
-		"RightViewColPlaneInfo.stride[HFI_COLOR_PLANE_METADATA]:%u",
-		pkt->right_plane_info.stride[HFI_COLOR_PLANE_METADATA]
-		);
-	dprintk(CVP_DBG,
-		"RightViewColPlaneInfo.ActualBufSize[HFI_COLOR_PLANE_PICDATA] :%u",
-		pkt->right_plane_info.buf_size[HFI_COLOR_PLANE_PICDATA]
-		);
-	dprintk(CVP_DBG,
-	"RightViewColPlaneInfo.ActualBufSize[HFI_COLOR_PLANE_METADATA] %u",
-	pkt->right_plane_info.buf_size[HFI_COLOR_PLANE_METADATA]
-	);
+	if (cvp_hfi_defs[def_idx].type != ptr->packet_type)
+		goto error_hfi_packet;
 
-	return rc;
-}
+	if ((cvp_hfi_defs[def_idx].size*sizeof(unsigned int)) != ptr->size)
+		goto error_hfi_packet;
 
+	if (ptr->session_id != hash32_ptr(session))
+		goto error_hfi_packet;
 
-int cvp_create_pkt_cmd_session_cvp_dfs_frame(
-		struct hfi_cmd_session_cvp_dfs_frame *pkt,
-		struct hal_session *session,
-		struct msm_cvp_dfsframe *dfs_frame)
-{
-	int rc = 0;
+	memcpy(out_pkt, in_pkt, ptr->size);
 
-	if (!pkt || !session)
-		return -EINVAL;
+	return 0;
 
-	pkt->size = sizeof(struct hfi_cmd_session_cvp_dfs_frame);
-	pkt->packet_type = HFI_CMD_SESSION_CVP_DFS_FRAME;
-	pkt->session_id = hash32_ptr(session);
-	pkt->left_buffer_index = dfs_frame->left_buffer_index;
-	pkt->right_buffer_index = dfs_frame->right_buffer_index;
-	pkt->disparitymap_buffer_idx = dfs_frame->disparitymap_buffer_idx;
-	pkt->occlusionmask_buffer_idx = dfs_frame->occlusionmask_buffer_idx;
-	pkt->clientdata.transactionid = dfs_frame->clientdata.transactionid;
-	pkt->clientdata.client_data1 = dfs_frame->clientdata.client_data1;
+error_hfi_packet:
+	dprintk(CVP_ERR, "%s incorrect packet: size=%d type=%d sessionid=%d\n",
+		__func__, ptr->size, ptr->packet_type, ptr->session_id);
 
-	dprintk(CVP_DBG,
-		"%s: size=%d, packet_type=%d session_id=%d left_buffer_index=%d",
-		__func__, pkt->size, pkt->packet_type, pkt->session_id,
-		pkt->left_buffer_index);
-	dprintk(CVP_DBG,
-		"right_buffer_index=%d disparitymap_buffer_idx=%d",
-		pkt->right_buffer_index, pkt->disparitymap_buffer_idx);
-	dprintk(CVP_DBG,
-		"occlusionmask_buffer_idx=%d ",
-			pkt->occlusionmask_buffer_idx);
-
-	return rc;
-}
-
-
-int cvp_create_pkt_cmd_session_flush(struct hfi_cmd_session_flush_packet *pkt,
-			struct hal_session *session, enum hal_flush flush_mode)
-{
-	int rc = 0;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	pkt->size = sizeof(struct hfi_cmd_session_flush_packet);
-	pkt->packet_type = HFI_CMD_SESSION_FLUSH;
-	pkt->session_id = hash32_ptr(session);
-	switch (flush_mode) {
-	case HAL_FLUSH_INPUT:
-		pkt->flush_type = HFI_FLUSH_INPUT;
-		break;
-	case HAL_FLUSH_OUTPUT:
-		pkt->flush_type = HFI_FLUSH_OUTPUT;
-		break;
-	case HAL_FLUSH_ALL:
-		pkt->flush_type = HFI_FLUSH_ALL;
-		break;
-	default:
-		dprintk(CVP_ERR, "Invalid flush mode: %#x\n", flush_mode);
-		return -EINVAL;
-	}
-	return rc;
+	return -EINVAL;
 }
 
 int cvp_create_pkt_cmd_session_get_property(
@@ -1036,1076 +452,10 @@
 		struct hal_session *session,
 		enum hal_property ptype, void *pdata)
 {
-	int rc = 0;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	pkt->size = sizeof(struct hfi_cmd_session_set_property_packet);
-	pkt->packet_type = HFI_CMD_SESSION_SET_PROPERTY;
-	pkt->session_id = hash32_ptr(session);
-	pkt->num_properties = 1;
-
-	dprintk(CVP_DBG, "Setting HAL Property = 0x%x\n", ptype);
-
-	switch (ptype) {
-	case HAL_CONFIG_FRAME_RATE:
-	{
-		u32 buffer_type;
-		struct hfi_frame_rate *hfi;
-		struct hal_frame_rate *prop = (struct hal_frame_rate *) pdata;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_FRAME_RATE;
-		hfi = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		hfi->frame_rate = prop->frame_rate;
-		pkt->size += sizeof(struct hfi_frame_rate);
-		break;
-	}
-	case HAL_CONFIG_OPERATING_RATE:
-	{
-		struct hfi_operating_rate *hfi;
-		struct hal_operating_rate *prop =
-			(struct hal_operating_rate *) pdata;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_OPERATING_RATE;
-		hfi = (struct hfi_operating_rate *) &pkt->rg_property_data[1];
-		hfi->operating_rate = prop->operating_rate;
-		pkt->size += sizeof(struct hfi_operating_rate);
-		break;
-	}
-	case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
-	{
-		u32 buffer_type;
-		struct hfi_uncompressed_format_select *hfi;
-		struct hal_uncompressed_format_select *prop =
-			(struct hal_uncompressed_format_select *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
-
-		hfi = (struct hfi_uncompressed_format_select *)
-					&pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-		hfi->format = hal_to_hfi_type(
-				HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
-				prop->format);
-		pkt->size += sizeof(struct hfi_uncompressed_format_select);
-		break;
-	}
-	case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO:
-	{
-		struct hfi_uncompressed_plane_actual_constraints_info *hfi;
-		struct hal_uncompressed_plane_actual_constraints_info *prop =
-		(struct hal_uncompressed_plane_actual_constraints_info *) pdata;
-		u32 buffer_type;
-		u32 num_plane = prop->num_planes;
-		u32 hfi_pkt_size =
-			2 * sizeof(u32)
-			+ num_plane
-			* sizeof(struct hal_uncompressed_plane_constraints);
-
-		pkt->rg_property_data[0] =
-		HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO;
-
-		hfi = (struct hfi_uncompressed_plane_actual_constraints_info *)
-					&pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		hfi->num_planes = prop->num_planes;
-		memcpy(hfi->rg_plane_format, prop->rg_plane_format,
-			hfi->num_planes
-			*sizeof(struct hal_uncompressed_plane_constraints));
-		pkt->size += hfi_pkt_size;
-		break;
-	}
-	case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO:
-		break;
-	case HAL_PARAM_FRAME_SIZE:
-	{
-		struct hfi_frame_size *hfi;
-		struct hal_frame_size *prop = (struct hal_frame_size *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_FRAME_SIZE;
-		hfi = (struct hfi_frame_size *) &pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		hfi->height = prop->height;
-		hfi->width = prop->width;
-		pkt->size += sizeof(struct hfi_frame_size);
-		break;
-	}
-	case HAL_CONFIG_REALTIME:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_CONFIG_REALTIME,
-			(((struct hal_enable *) pdata)->enable));
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_BUFFER_COUNT_ACTUAL:
-	{
-		struct hfi_buffer_count_actual *hfi;
-		struct hal_buffer_count_actual *prop =
-			(struct hal_buffer_count_actual *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
-		hfi = (struct hfi_buffer_count_actual *)
-			&pkt->rg_property_data[1];
-		hfi->buffer_count_actual = prop->buffer_count_actual;
-		hfi->buffer_count_min_host = prop->buffer_count_min_host;
-
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		pkt->size += sizeof(struct hfi_buffer_count_actual);
-
-		break;
-	}
-	case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
-	{
-		struct hfi_nal_stream_format_select *hfi;
-		struct hal_nal_stream_format_select *prop =
-			(struct hal_nal_stream_format_select *)pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT;
-		hfi = (struct hfi_nal_stream_format_select *)
-			&pkt->rg_property_data[1];
-		dprintk(CVP_DBG, "data is :%d\n",
-				prop->nal_stream_format_select);
-		hfi->nal_stream_format_select = hal_to_hfi_type(
-				HAL_PARAM_NAL_STREAM_FORMAT_SELECT,
-				prop->nal_stream_format_select);
-		pkt->size += sizeof(struct hfi_nal_stream_format_select);
-		break;
-	}
-	case HAL_PARAM_VDEC_OUTPUT_ORDER:
-	{
-		int *data = (int *) pdata;
-
-		pkt->rg_property_data[0] =
-				HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
-		switch (*data) {
-		case HAL_OUTPUT_ORDER_DECODE:
-			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DECODE;
-			break;
-		case HAL_OUTPUT_ORDER_DISPLAY:
-			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DISPLAY;
-			break;
-		default:
-			dprintk(CVP_ERR, "invalid output order: %#x\n",
-						  *data);
-			break;
-		}
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VDEC_PICTURE_TYPE_DECODE:
-	{
-		struct hfi_enable_picture *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE;
-		hfi = (struct hfi_enable_picture *) &pkt->rg_property_data[1];
-		hfi->picture_type =
-			((struct hfi_enable_picture *)pdata)->picture_type;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VDEC_MULTI_STREAM:
-	{
-		struct hfi_multi_stream *hfi;
-		struct hal_multi_stream *prop =
-			(struct hal_multi_stream *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
-		hfi = (struct hfi_multi_stream *) &pkt->rg_property_data[1];
-
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-		hfi->enable = prop->enable;
-		pkt->size += sizeof(struct hfi_multi_stream);
-		break;
-	}
-	case HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VDEC_SYNC_FRAME_DECODE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_SECURE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			  HFI_PROPERTY_PARAM_SECURE_SESSION,
-			  ((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_CONFIG_VENC_REQUEST_IFRAME:
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
-		break;
-	case HAL_CONFIG_HEIC_FRAME_QUALITY:
-	{
-		struct hfi_heic_frame_quality *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY;
-		hfi =
-		(struct hfi_heic_frame_quality *) &pkt->rg_property_data[1];
-		hfi->frame_quality =
-			((struct hal_heic_frame_quality *)pdata)->frame_quality;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_heic_frame_quality);
-		break;
-	}
-	case HAL_CONFIG_HEIC_GRID_ENABLE:
-	{
-		struct hfi_heic_grid_enable *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_HEIC_GRID_ENABLE;
-		hfi = (struct hfi_heic_grid_enable *) &pkt->rg_property_data[1];
-		hfi->grid_enable =
-			((struct hal_heic_grid_enable *)pdata)->grid_enable;
-		pkt->size += sizeof(u32) + sizeof(struct hfi_heic_grid_enable);
-		break;
-	}
-	case HAL_CONFIG_VENC_TARGET_BITRATE:
-	{
-		struct hfi_bitrate *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
-		hfi = (struct hfi_bitrate *) &pkt->rg_property_data[1];
-		hfi->bit_rate = ((struct hal_bitrate *)pdata)->bit_rate;
-		hfi->layer_id = ((struct hal_bitrate *)pdata)->layer_id;
-		pkt->size += sizeof(struct hfi_bitrate);
-		break;
-	}
-	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
-	{
-		struct hfi_profile_level *hfi;
-		struct hal_profile_level *prop =
-			(struct hal_profile_level *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
-		hfi = (struct hfi_profile_level *)
-			&pkt->rg_property_data[1];
-
-		/* There is an assumption here that HAL level is same as
-		 * HFI level
-		 */
-		hfi->level = prop->level;
-		hfi->profile = prop->profile;
-		if (hfi->profile <= 0) {
-			hfi->profile = HFI_H264_PROFILE_HIGH;
-			dprintk(CVP_WARN,
-					"Profile %d not supported, falling back to high\n",
-					prop->profile);
-		}
-
-		pkt->size += sizeof(struct hfi_profile_level);
-		break;
-	}
-	case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
-	{
-		struct hfi_h264_entropy_control *hfi;
-		struct hal_h264_entropy_control *prop =
-			(struct hal_h264_entropy_control *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
-		hfi = (struct hfi_h264_entropy_control *)
-			&pkt->rg_property_data[1];
-		hfi->entropy_mode = hal_to_hfi_type(
-		   HAL_PARAM_VENC_H264_ENTROPY_CONTROL,
-		   prop->entropy_mode);
-
-		hfi->cabac_model = HFI_H264_CABAC_MODEL_0;
-		pkt->size += sizeof(struct hfi_h264_entropy_control);
-		break;
-	}
-	case HAL_PARAM_VENC_RATE_CONTROL:
-	{
-		u32 *rc;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
-		rc = (u32 *)pdata;
-		switch ((enum hal_rate_control) *rc) {
-		case HAL_RATE_CONTROL_OFF:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_OFF;
-			break;
-		case HAL_RATE_CONTROL_CBR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_CFR;
-			break;
-		case HAL_RATE_CONTROL_VBR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_CFR;
-			break;
-		case HAL_RATE_CONTROL_MBR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_MBR_CFR;
-			break;
-		case HAL_RATE_CONTROL_CBR_VFR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_VFR;
-			break;
-		case HAL_RATE_CONTROL_MBR_VFR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_MBR_VFR;
-			break;
-		case HAL_RATE_CONTROL_CQ:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_CQ;
-			break;
-		default:
-			dprintk(CVP_ERR,
-					"Invalid Rate control setting: %pK\n",
-					pdata);
-			break;
-		}
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
-	{
-		struct hfi_h264_db_control *hfi;
-		struct hal_h264_db_control *prop =
-			(struct hal_h264_db_control *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
-		hfi = (struct hfi_h264_db_control *) &pkt->rg_property_data[1];
-		switch (prop->mode) {
-		case HAL_H264_DB_MODE_DISABLE:
-			hfi->mode = HFI_H264_DB_MODE_DISABLE;
-			break;
-		case HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY:
-			hfi->mode = HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
-			break;
-		case HAL_H264_DB_MODE_ALL_BOUNDARY:
-			hfi->mode = HFI_H264_DB_MODE_ALL_BOUNDARY;
-			break;
-		default:
-			dprintk(CVP_ERR, "Invalid deblocking mode: %#x\n",
-						  prop->mode);
-			break;
-		}
-		hfi->slice_alpha_offset = prop->slice_alpha_offset;
-		hfi->slice_beta_offset = prop->slice_beta_offset;
-		pkt->size += sizeof(struct hfi_h264_db_control);
-		break;
-	}
-	case HAL_CONFIG_VENC_FRAME_QP:
-	{
-		struct hfi_quantization *hfi;
-		struct hal_quantization *hal_quant =
-			(struct hal_quantization *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_FRAME_QP;
-		hfi = (struct hfi_quantization *) &pkt->rg_property_data[1];
-		hfi->qp_packed = hal_quant->qpi | hal_quant->qpp << 8 |
-			hal_quant->qpb << 16;
-		hfi->layer_id = hal_quant->layer_id;
-		hfi->enable = hal_quant->enable;
-		pkt->size += sizeof(struct hfi_quantization);
-		break;
-	}
-	case HAL_PARAM_VENC_SESSION_QP_RANGE:
-	{
-		struct hfi_quantization_range *hfi;
-		struct hal_quantization_range *hal_range =
-			(struct hal_quantization_range *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
-		hfi = (struct hfi_quantization_range *)
-				&pkt->rg_property_data[1];
-
-		/*
-		 * When creating the packet, pack the qp value as
-		 * 0xbbppii, where ii = qp range for I-frames,
-		 * pp = qp range for P-frames, etc.
-		 */
-		hfi->min_qp.qp_packed = hal_range->qpi_min |
-			hal_range->qpp_min << 8 |
-			hal_range->qpb_min << 16;
-		hfi->max_qp.qp_packed = hal_range->qpi_max |
-			hal_range->qpp_max << 8 |
-			hal_range->qpb_max << 16;
-		hfi->max_qp.layer_id = hal_range->layer_id;
-		hfi->min_qp.layer_id = hal_range->layer_id;
-
-		pkt->size += sizeof(struct hfi_quantization_range);
-		break;
-	}
-	case HAL_CONFIG_VENC_INTRA_PERIOD:
-	{
-		struct hfi_intra_period *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD;
-		hfi = (struct hfi_intra_period *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_intra_period *) pdata,
-				sizeof(struct hfi_intra_period));
-		pkt->size += sizeof(struct hfi_intra_period);
-
-		if (hfi->bframes) {
-			struct hfi_enable *hfi_enable;
-			u32 *prop_type;
-
-			prop_type = (u32 *)((u8 *)&pkt->rg_property_data[0] +
-				sizeof(u32) + sizeof(struct hfi_intra_period));
-			*prop_type =  HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B;
-			hfi_enable = (struct hfi_enable *)(prop_type + 1);
-			hfi_enable->enable = true;
-			pkt->num_properties = 2;
-			pkt->size += sizeof(struct hfi_enable) + sizeof(u32);
-		}
-		break;
-	}
-	case HAL_CONFIG_VENC_IDR_PERIOD:
-	{
-		struct hfi_idr_period *hfi;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
-		hfi = (struct hfi_idr_period *) &pkt->rg_property_data[1];
-		hfi->idr_period = ((struct hfi_idr_period *) pdata)->idr_period;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_ADAPTIVE_B:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VDEC_CONCEAL_COLOR:
-	{
-		struct hfi_conceal_color *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR;
-		hfi = (struct hfi_conceal_color *) &pkt->rg_property_data[1];
-		if (hfi) {
-			hfi->conceal_color_8bit =
-				((struct hfi_conceal_color *) pdata)->
-				conceal_color_8bit;
-			hfi->conceal_color_10bit =
-				((struct hfi_conceal_color *) pdata)->
-				conceal_color_10bit;
-		}
-		pkt->size += sizeof(struct hfi_conceal_color);
-		break;
-	}
-	case HAL_PARAM_VPE_ROTATION:
-	{
-		struct hfi_vpe_rotation_type *hfi;
-		struct hal_vpe_rotation *prop =
-			(struct hal_vpe_rotation *) pdata;
-		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VPE_ROTATION;
-		hfi = (struct hfi_vpe_rotation_type *)&pkt->rg_property_data[1];
-		switch (prop->rotate) {
-		case 0:
-			hfi->rotation = HFI_ROTATE_NONE;
-			break;
-		case 90:
-			hfi->rotation = HFI_ROTATE_90;
-			break;
-		case 180:
-			hfi->rotation = HFI_ROTATE_180;
-			break;
-		case 270:
-			hfi->rotation = HFI_ROTATE_270;
-			break;
-		default:
-			dprintk(CVP_ERR, "Invalid rotation setting: %#x\n",
-				prop->rotate);
-			rc = -EINVAL;
-			break;
-		}
-		switch (prop->flip) {
-		case HAL_FLIP_NONE:
-			hfi->flip = HFI_FLIP_NONE;
-			break;
-		case HAL_FLIP_HORIZONTAL:
-			hfi->flip = HFI_FLIP_HORIZONTAL;
-			break;
-		case HAL_FLIP_VERTICAL:
-			hfi->flip = HFI_FLIP_VERTICAL;
-			break;
-		case HAL_FLIP_BOTH:
-			hfi->flip = HFI_FLIP_HORIZONTAL | HFI_FLIP_VERTICAL;
-			break;
-		default:
-			dprintk(CVP_ERR, "Invalid flip setting: %#x\n",
-				prop->flip);
-			rc = -EINVAL;
-			break;
-		}
-		pkt->size += sizeof(struct hfi_vpe_rotation_type);
-		break;
-	}
-	case HAL_PARAM_VENC_INTRA_REFRESH:
-	{
-		struct hfi_intra_refresh *hfi;
-		struct hal_intra_refresh *prop =
-			(struct hal_intra_refresh *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
-		hfi = (struct hfi_intra_refresh *) &pkt->rg_property_data[1];
-		hfi->mbs = 0;
-		switch (prop->mode) {
-		case HAL_INTRA_REFRESH_NONE:
-			hfi->mode = HFI_INTRA_REFRESH_NONE;
-			break;
-		case HAL_INTRA_REFRESH_CYCLIC:
-			hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
-			hfi->mbs = prop->ir_mbs;
-			break;
-		case HAL_INTRA_REFRESH_RANDOM:
-			hfi->mode = HFI_INTRA_REFRESH_RANDOM;
-			hfi->mbs = prop->ir_mbs;
-			break;
-		default:
-			dprintk(CVP_ERR,
-					"Invalid intra refresh setting: %#x\n",
-					prop->mode);
-			break;
-		}
-		pkt->size += sizeof(struct hfi_intra_refresh);
-		break;
-	}
-	case HAL_PARAM_VENC_MULTI_SLICE_CONTROL:
-	{
-		struct hfi_multi_slice_control *hfi;
-		struct hal_multi_slice_control *prop =
-			(struct hal_multi_slice_control *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL;
-		hfi = (struct hfi_multi_slice_control *)
-			&pkt->rg_property_data[1];
-		switch (prop->multi_slice) {
-		case HAL_MULTI_SLICE_OFF:
-			hfi->multi_slice = HFI_MULTI_SLICE_OFF;
-			break;
-		case HAL_MULTI_SLICE_BY_MB_COUNT:
-			hfi->multi_slice = HFI_MULTI_SLICE_BY_MB_COUNT;
-			break;
-		case HAL_MULTI_SLICE_BY_BYTE_COUNT:
-			hfi->multi_slice = HFI_MULTI_SLICE_BY_BYTE_COUNT;
-			break;
-		default:
-			dprintk(CVP_ERR, "Invalid slice settings: %#x\n",
-				prop->multi_slice);
-			break;
-		}
-		hfi->slice_size = prop->slice_size;
-		pkt->size += sizeof(struct
-					hfi_multi_slice_control);
-		break;
-	}
-	case HAL_PARAM_INDEX_EXTRADATA:
-	{
-		struct hfi_index_extradata_config *hfi;
-		struct hal_extradata_enable *extra = pdata;
-		int id = 0;
-
-		pkt->rg_property_data[0] =
-			get_hfi_extradata_index(extra->index);
-		hfi = (struct hfi_index_extradata_config *)
-			&pkt->rg_property_data[1];
-		hfi->enable = extra->enable;
-		id = get_hfi_extradata_id(extra->index);
-		if (id)
-			hfi->index_extra_data_id = id;
-		else {
-			dprintk(CVP_WARN,
-				"Failed to find extradata id: %d\n",
-				id);
-			rc = -EINVAL;
-		}
-		pkt->size += sizeof(struct hfi_index_extradata_config);
-		break;
-	}
-	case HAL_PARAM_VENC_SLICE_DELIVERY_MODE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_VUI_TIMING_INFO:
-	{
-		struct hfi_vui_timing_info *hfi;
-		struct hal_vui_timing_info *timing_info = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_VUI_TIMING_INFO;
-
-		hfi = (struct hfi_vui_timing_info *)&pkt->rg_property_data[1];
-		hfi->enable = timing_info->enable;
-		hfi->fixed_frame_rate = timing_info->fixed_frame_rate;
-		hfi->time_scale = timing_info->time_scale;
-
-		pkt->size += sizeof(struct hfi_vui_timing_info);
-		break;
-	}
-	case HAL_PARAM_VENC_GENERATE_AUDNAL:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_LTRMODE:
-	{
-		struct hfi_ltr_mode *hfi;
-		struct hal_ltr_mode *hal = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_LTRMODE;
-		hfi = (struct hfi_ltr_mode *) &pkt->rg_property_data[1];
-		hfi->ltr_mode = get_hfi_ltr_mode(hal->mode);
-		hfi->ltr_count = hal->count;
-		hfi->trust_mode = hal->trust_mode;
-		pkt->size += sizeof(struct hfi_ltr_mode);
-		break;
-	}
-	case HAL_CONFIG_VENC_USELTRFRAME:
-	{
-		struct hfi_ltr_use *hfi;
-		struct hal_ltr_use *hal = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_USELTRFRAME;
-		hfi = (struct hfi_ltr_use *) &pkt->rg_property_data[1];
-		hfi->frames = hal->frames;
-		hfi->ref_ltr = hal->ref_ltr;
-		hfi->use_constrnt = hal->use_constraint;
-		pkt->size += sizeof(struct hfi_ltr_use);
-		break;
-	}
-	case HAL_CONFIG_VENC_MARKLTRFRAME:
-	{
-		struct hfi_ltr_mark *hfi;
-		struct hal_ltr_mark *hal = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME;
-		hfi = (struct hfi_ltr_mark *) &pkt->rg_property_data[1];
-		hfi->mark_frame = hal->mark_frame;
-		pkt->size += sizeof(struct hfi_ltr_mark);
-		break;
-	}
-	case HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER;
-		pkt->rg_property_data[1] = *(u32 *)pdata;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_CONFIG_VENC_HIER_P_NUM_FRAMES:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER;
-		pkt->rg_property_data[1] = *(u32 *)pdata;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_DISABLE_RC_TIMESTAMP:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VPE_COLOR_SPACE_CONVERSION:
-	{
-		struct hfi_vpe_color_space_conversion *hfi = NULL;
-		struct hal_vpe_color_space_conversion *hal = pdata;
-
-		pkt->rg_property_data[0] =
-				HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION;
-		hfi = (struct hfi_vpe_color_space_conversion *)
-			&pkt->rg_property_data[1];
-
-		hfi->input_color_primaries = hal->input_color_primaries;
-		if (hal->custom_matrix_enabled)
-			/* Bit Mask to enable all custom values */
-			hfi->custom_matrix_enabled = 0x7;
-		else
-			hfi->custom_matrix_enabled = 0x0;
-		memcpy(hfi->csc_matrix, hal->csc_matrix,
-				sizeof(hfi->csc_matrix));
-		memcpy(hfi->csc_bias, hal->csc_bias, sizeof(hfi->csc_bias));
-		memcpy(hfi->csc_limit, hal->csc_limit, sizeof(hfi->csc_limit));
-		pkt->size += sizeof(struct hfi_vpe_color_space_conversion);
-		break;
-	}
-	case HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_CONFIG_VENC_PERF_MODE:
-	{
-		u32 hfi_perf_mode = 0;
-		enum hal_perf_mode hal_perf_mode = *(enum hal_perf_mode *)pdata;
-
-		switch (hal_perf_mode) {
-		case HAL_PERF_MODE_POWER_SAVE:
-			hfi_perf_mode = HFI_VENC_PERFMODE_POWER_SAVE;
-			break;
-		case HAL_PERF_MODE_POWER_MAX_QUALITY:
-			hfi_perf_mode = HFI_VENC_PERFMODE_MAX_QUALITY;
-			break;
-		default:
-			return -ENOTSUPP;
-		}
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_PERF_MODE;
-		pkt->rg_property_data[1] = hfi_perf_mode;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_HIER_P_HYBRID_MODE:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE;
-		pkt->rg_property_data[1] =
-			((struct hfi_hybrid_hierp *)pdata)->layers ?: 0xFF;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_hybrid_hierp);
-		break;
-	}
-	case HAL_PARAM_VENC_MBI_STATISTICS_MODE:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_MBI_DUMPING;
-		pkt->rg_property_data[1] = hal_to_hfi_type(
-			HAL_PARAM_VENC_MBI_STATISTICS_MODE,
-				*(u32 *)pdata);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_CONFIG_VENC_BASELAYER_PRIORITYID:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID;
-		pkt->rg_property_data[1] = *(u32 *)pdata;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO:
-	{
-		struct hfi_aspect_ratio *hfi = NULL;
-		struct hal_aspect_ratio *hal = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO;
-		hfi = (struct hfi_aspect_ratio *)
-			&pkt->rg_property_data[1];
-		memcpy(hfi, hal,
-			sizeof(struct hfi_aspect_ratio));
-		pkt->size += sizeof(struct hfi_aspect_ratio);
-		break;
-	}
-	case HAL_PARAM_VENC_BITRATE_TYPE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_H264_TRANSFORM_8x8:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_H264_8X8_TRANSFORM,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_VIDEO_SIGNAL_INFO:
-	{
-		struct hal_video_signal_info *hal = pdata;
-		struct hfi_video_signal_metadata *signal_info =
-			(struct hfi_video_signal_metadata *)
-			&pkt->rg_property_data[1];
-
-		signal_info->enable = true;
-		signal_info->video_format = MSM_CVP_NTSC;
-		signal_info->video_full_range = hal->full_range;
-		signal_info->color_description = MSM_CVP_COLOR_DESC_PRESENT;
-		signal_info->color_primaries = hal->color_space;
-		signal_info->transfer_characteristics = hal->transfer_chars;
-		signal_info->matrix_coeffs = hal->matrix_coeffs;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO;
-		pkt->size += sizeof(*signal_info);
-		break;
-	}
-	case HAL_PARAM_VENC_IFRAMESIZE_TYPE:
-	{
-		enum hal_iframesize_type hal =
-			*(enum hal_iframesize_type *)pdata;
-		struct hfi_iframe_size *hfi = (struct hfi_iframe_size *)
-			&pkt->rg_property_data[1];
-
-		switch (hal) {
-		case HAL_IFRAMESIZE_TYPE_DEFAULT:
-			hfi->type = HFI_IFRAME_SIZE_DEFAULT;
-			break;
-		case HAL_IFRAMESIZE_TYPE_MEDIUM:
-			hfi->type = HFI_IFRAME_SIZE_MEDIUM;
-			break;
-		case HAL_IFRAMESIZE_TYPE_HUGE:
-			hfi->type = HFI_IFRAME_SIZE_HIGH;
-			break;
-		case HAL_IFRAMESIZE_TYPE_UNLIMITED:
-			hfi->type = HFI_IFRAME_SIZE_UNLIMITED;
-			break;
-		default:
-			return -ENOTSUPP;
-		}
-		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VENC_IFRAMESIZE;
-		pkt->size += sizeof(struct hfi_iframe_size);
-		break;
-	}
-	case HAL_PARAM_BUFFER_SIZE_MINIMUM:
-	{
-		struct hfi_buffer_size_minimum *hfi;
-		struct hal_buffer_size_minimum *prop =
-			(struct hal_buffer_size_minimum *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM;
-
-		hfi = (struct hfi_buffer_size_minimum *)
-			&pkt->rg_property_data[1];
-		hfi->buffer_size = prop->buffer_size;
-
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		pkt->size += sizeof(struct hfi_buffer_size_minimum);
-		break;
-	}
-	case HAL_PARAM_SYNC_BASED_INTERRUPT:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_SYNC_BASED_INTERRUPT,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_LOW_LATENCY:
-	{
-		struct hfi_enable *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE;
-		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		hfi->enable = ((struct hal_enable *) pdata)->enable;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_CONFIG_VENC_BLUR_RESOLUTION:
-	{
-		struct hfi_frame_size *hfi;
-		struct hal_frame_size *prop = (struct hal_frame_size *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE;
-		hfi = (struct hfi_frame_size *) &pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		hfi->height = prop->height;
-		hfi->width = prop->width;
-		pkt->size += sizeof(struct hfi_frame_size);
-		break;
-	}
-	case HAL_PARAM_VIDEO_CORES_USAGE:
-	{
-		struct hal_videocores_usage_info *hal = pdata;
-		struct hfi_videocores_usage_type *core_info =
-			(struct hfi_videocores_usage_type *)
-			&pkt->rg_property_data[1];
-
-		core_info->video_core_enable_mask = hal->video_core_enable_mask;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
-		pkt->size += sizeof(*core_info);
-		break;
-	}
-	case HAL_PARAM_VIDEO_WORK_MODE:
-	{
-		struct hal_video_work_mode *hal = pdata;
-		struct hfi_video_work_mode *work_mode =
-			(struct hfi_video_work_mode *)
-			&pkt->rg_property_data[1];
-
-		work_mode->video_work_mode = get_hfi_work_mode(
-						hal->video_work_mode);
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_WORK_MODE;
-		pkt->size += sizeof(*work_mode);
-		break;
-	}
-	case HAL_PARAM_VIDEO_WORK_ROUTE:
-	{
-		struct hal_video_work_route *hal = pdata;
-		struct hfi_video_work_route *prop =
-			(struct hfi_video_work_route *)
-			&pkt->rg_property_data[1];
-		prop->video_work_route =
-			hal->video_work_route;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_WORK_ROUTE;
-		pkt->size += sizeof(*prop);
-		break;
-	}
-	case HAL_PARAM_VENC_HDR10_PQ_SEI:
-	{
-		struct hfi_hdr10_pq_sei *hfi;
-		struct hal_hdr10_pq_sei *prop =
-			(struct hal_hdr10_pq_sei *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI;
-		hfi = (struct hfi_hdr10_pq_sei *)
-			&pkt->rg_property_data[1];
-
-		memcpy(hfi, prop, sizeof(*hfi));
-		pkt->size += sizeof(struct hfi_hdr10_pq_sei);
-		break;
-	}
-	case HAL_CONFIG_VENC_VBV_HRD_BUF_SIZE:
-	{
-		struct hfi_vbv_hdr_buf_size *hfi;
-		struct hal_vbv_hdr_buf_size *prop =
-			(struct hal_vbv_hdr_buf_size *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE;
-		hfi = (struct hfi_vbv_hdr_buf_size *)
-			&pkt->rg_property_data[1];
-
-		hfi->vbv_hdr_buf_size = prop->vbv_hdr_buf_size;
-		pkt->size += sizeof(struct hfi_vbv_hdr_buf_size);
-		break;
-	}
-	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
-	case HAL_CONFIG_BUFFER_REQUIREMENTS:
-	case HAL_CONFIG_PRIORITY:
-	case HAL_CONFIG_BATCH_INFO:
-	case HAL_PARAM_METADATA_PASS_THROUGH:
-	case HAL_SYS_IDLE_INDICATOR:
-	case HAL_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
-	case HAL_PARAM_INTERLACE_FORMAT_SUPPORTED:
-	case HAL_PARAM_CHROMA_SITE:
-	case HAL_PARAM_PROPERTIES_SUPPORTED:
-	case HAL_PARAM_PROFILE_LEVEL_SUPPORTED:
-	case HAL_PARAM_CAPABILITY_SUPPORTED:
-	case HAL_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
-	case HAL_PARAM_MULTI_VIEW_FORMAT:
-	case HAL_PARAM_MAX_SEQUENCE_HEADER_SIZE:
-	case HAL_PARAM_CODEC_SUPPORTED:
-	case HAL_PARAM_VDEC_MULTI_VIEW_SELECT:
-	case HAL_PARAM_VDEC_MB_QUANTIZATION:
-	case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
-	case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
-	case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
-	case HAL_CONFIG_VDEC_MULTI_STREAM:
-	case HAL_PARAM_VENC_MULTI_SLICE_INFO:
-	case HAL_CONFIG_VENC_TIMESTAMP_SCALE:
-	default:
-		dprintk(CVP_ERR, "DEFAULT: Calling %#x\n", ptype);
-		rc = -ENOTSUPP;
-		break;
-	}
-	return rc;
+	/* Currently no set property is supported */
+	dprintk(CVP_ERR, "%s cmd:%#x not supported\n", __func__,
+			ptype);
+	return -EINVAL;
 }
 
 static int get_hfi_ssr_type(enum hal_ssr_trigger_type type)
@@ -2156,22 +506,6 @@
 	return 0;
 }
 
-int cvp_create_pkt_cmd_session_sync_process(
-		struct hfi_cmd_session_sync_process_packet *pkt,
-		struct hal_session *session)
-{
-	if (!pkt || !session)
-		return -EINVAL;
-
-	*pkt = (struct hfi_cmd_session_sync_process_packet) {0};
-	pkt->size = sizeof(*pkt);
-	pkt->packet_type = HFI_CMD_SESSION_SYNC;
-	pkt->session_id = hash32_ptr(session);
-	pkt->sync_id = 0;
-
-	return 0;
-}
-
 static struct hfi_packetization_ops hfi_default = {
 	.sys_init = cvp_create_pkt_cmd_sys_init,
 	.sys_pc_prep = cvp_create_pkt_cmd_sys_pc_prep,
@@ -2182,6 +516,7 @@
 	.sys_release_resource = cvp_create_pkt_cmd_sys_release_resource,
 	.sys_ping = cvp_create_pkt_cmd_sys_ping,
 	.sys_image_version = cvp_create_pkt_cmd_sys_image_version,
+	.sys_ubwc_config = create_pkt_cmd_sys_ubwc_config,
 	.ssr_cmd = cvp_create_pkt_ssr_cmd,
 	.session_init = cvp_create_pkt_cmd_sys_session_init,
 	.session_cmd = cvp_create_pkt_cmd_session_cmd,
@@ -2189,21 +524,10 @@
 		cvp_create_pkt_cmd_session_set_buffers,
 	.session_release_buffers =
 		cvp_create_pkt_cmd_session_release_buffers,
-	.session_register_buffer =
-		cvp_create_pkt_cmd_session_register_buffer,
-	.session_unregister_buffer =
-		cvp_create_pkt_cmd_session_unregister_buffer,
-	.session_etb_decoder = cvp_create_pkt_cmd_session_etb_decoder,
-	.session_etb_encoder = cvp_create_pkt_cmd_session_etb_encoder,
-	.session_ftb = cvp_create_pkt_cmd_session_ftb,
 	.session_get_buf_req = cvp_create_pkt_cmd_session_get_buf_req,
-	.session_flush = cvp_create_pkt_cmd_session_flush,
 	.session_get_property = cvp_create_pkt_cmd_session_get_property,
 	.session_set_property = cvp_create_pkt_cmd_session_set_property,
-	.session_cvp_dfs_config =
-		cvp_create_pkt_cmd_session_cvp_dfs_config,
-	.session_cvp_dfs_frame =
-		cvp_create_pkt_cmd_session_cvp_dfs_frame,
+	.session_send = cvp_create_pkt_cmd_session_send,
 };
 
 struct hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.h b/drivers/media/platform/msm/cvp/hfi_packetization.h
index c876ac1..47af610 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.h
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.h
@@ -36,6 +36,8 @@
 		struct cvp_resource_hdr *resource_hdr);
 	int (*sys_ping)(struct hfi_cmd_sys_ping_packet *pkt);
 	int (*sys_image_version)(struct hfi_cmd_sys_get_property_packet *pkt);
+	int (*sys_ubwc_config)(struct hfi_cmd_sys_set_property_packet *pkt,
+		struct msm_cvp_ubwc_config_data *ubwc_config);
 	int (*ssr_cmd)(enum hal_ssr_trigger_type type,
 		struct hfi_cmd_sys_test_ssr_packet *pkt);
 	int (*session_init)(
@@ -51,14 +53,6 @@
 		struct hfi_cmd_session_cvp_release_buffers_packet *pkt,
 		struct hal_session *session,
 		struct cvp_buffer_addr_info *buffer_info);
-	int (*session_register_buffer)(
-		struct hfi_cmd_session_register_buffers_packet *pkt,
-		struct hal_session *session,
-		struct cvp_register_buffer *buffer);
-	int (*session_unregister_buffer)(
-		struct hfi_cmd_session_unregister_buffers_packet *pkt,
-		struct hal_session *session,
-		struct cvp_unregister_buffer *buffer);
 	int (*session_etb_decoder)(
 		struct hfi_cmd_session_empty_buffer_compressed_packet *pkt,
 		struct hal_session *session,
@@ -85,14 +79,10 @@
 	int (*session_sync_process)(
 		struct hfi_cmd_session_sync_process_packet *pkt,
 		struct hal_session *session);
-	int (*session_cvp_dfs_config)(
-			struct hfi_cmd_session_cvp_dfs_config *pkt,
+	int (*session_send)(
+			struct cvp_kmd_hfi_packet *out_pkt,
 			struct hal_session *session,
-			struct msm_cvp_dfsconfig *dfs_config);
-	int (*session_cvp_dfs_frame)(
-			struct hfi_cmd_session_cvp_dfs_frame *pkt,
-			struct hal_session *session,
-			struct msm_cvp_dfsframe *dfs_frame);
+			struct cvp_kmd_hfi_packet *in_pkt);
 };
 
 struct hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index 041d763..a6874bb 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -14,6 +14,9 @@
 #include "cvp_hfi_io.h"
 #include "msm_cvp_debug.h"
 #include "cvp_hfi.h"
+#include "msm_cvp_common.h"
+
+extern struct msm_cvp_drv *cvp_driver;
 
 static enum cvp_status hfi_map_err_status(u32 hfi_err)
 {
@@ -80,201 +83,6 @@
 	return cvp_err;
 }
 
-static int get_hal_pixel_depth(u32 hfi_bit_depth)
-{
-	switch (hfi_bit_depth) {
-	case HFI_BITDEPTH_8: return MSM_CVP_BIT_DEPTH_8;
-	case HFI_BITDEPTH_9:
-	case HFI_BITDEPTH_10: return MSM_CVP_BIT_DEPTH_10;
-	}
-	dprintk(CVP_ERR, "Unsupported bit depth: %d\n", hfi_bit_depth);
-	return MSM_CVP_BIT_DEPTH_UNSUPPORTED;
-}
-
-static int hfi_process_sess_evt_seq_changed(u32 device_id,
-		struct hfi_msg_event_notify_packet *pkt,
-		struct msm_cvp_cb_info *info)
-{
-	struct msm_cvp_cb_event event_notify = {0};
-	int num_properties_changed;
-	struct hfi_frame_size *frame_sz;
-	struct hfi_profile_level *profile_level;
-	struct hfi_bit_depth *pixel_depth;
-	struct hfi_pic_struct *pic_struct;
-	struct hfi_buffer_requirements *buf_req;
-	struct hfi_index_extradata_input_crop_payload *crop_info;
-	u32 entropy_mode = 0;
-	u8 *data_ptr;
-	int prop_id;
-	int luma_bit_depth, chroma_bit_depth;
-	struct hfi_colour_space *colour_info;
-
-	if (sizeof(struct hfi_msg_event_notify_packet) > pkt->size) {
-		dprintk(CVP_ERR,
-				"hal_process_session_init_done: bad_pkt_size\n");
-		return -E2BIG;
-	}
-
-	event_notify.device_id = device_id;
-	event_notify.session_id = (void *)(uintptr_t)pkt->session_id;
-	event_notify.status = CVP_ERR_NONE;
-	num_properties_changed = pkt->event_data2;
-	switch (pkt->event_data1) {
-	case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES:
-		event_notify.hal_event_type =
-			HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES;
-		break;
-	case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES:
-		event_notify.hal_event_type =
-			HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES;
-		break;
-	default:
-		break;
-	}
-
-	if (num_properties_changed) {
-		data_ptr = (u8 *) &pkt->rg_ext_event_data[0];
-		do {
-			prop_id = (int) *((u32 *)data_ptr);
-			switch (prop_id) {
-			case HFI_PROPERTY_PARAM_FRAME_SIZE:
-				data_ptr = data_ptr + sizeof(u32);
-				frame_sz =
-					(struct hfi_frame_size *) data_ptr;
-				event_notify.width = frame_sz->width;
-				event_notify.height = frame_sz->height;
-				dprintk(CVP_DBG, "height: %d width: %d\n",
-					frame_sz->height, frame_sz->width);
-				data_ptr +=
-					sizeof(struct hfi_frame_size);
-				break;
-			case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
-				data_ptr = data_ptr + sizeof(u32);
-				profile_level =
-					(struct hfi_profile_level *) data_ptr;
-				event_notify.profile = profile_level->profile;
-				event_notify.level = profile_level->level;
-				dprintk(CVP_DBG, "profile: %d level: %d\n",
-					profile_level->profile,
-					profile_level->level);
-				data_ptr +=
-					sizeof(struct hfi_profile_level);
-				break;
-			case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
-				data_ptr = data_ptr + sizeof(u32);
-				pixel_depth = (struct hfi_bit_depth *) data_ptr;
-				/*
-				 * Luma and chroma can have different bitdepths.
-				 * Driver should rely on luma and chroma
-				 * bitdepth for determining output bitdepth
-				 * type.
-				 *
-				 * pixel_depth->bitdepth will include luma
-				 * bitdepth info in bits 0..15 and chroma
-				 * bitdept in bits 16..31.
-				 */
-				luma_bit_depth = get_hal_pixel_depth(
-					pixel_depth->bit_depth &
-					GENMASK(15, 0));
-				chroma_bit_depth = get_hal_pixel_depth(
-					(pixel_depth->bit_depth &
-					GENMASK(31, 16)) >> 16);
-				if (luma_bit_depth == MSM_CVP_BIT_DEPTH_10 ||
-					chroma_bit_depth ==
-						MSM_CVP_BIT_DEPTH_10)
-					event_notify.bit_depth =
-						MSM_CVP_BIT_DEPTH_10;
-				else
-					event_notify.bit_depth = luma_bit_depth;
-				dprintk(CVP_DBG,
-					"bitdepth(%d), luma_bit_depth(%d), chroma_bit_depth(%d)\n",
-					event_notify.bit_depth, luma_bit_depth,
-					chroma_bit_depth);
-				data_ptr += sizeof(struct hfi_bit_depth);
-				break;
-			case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
-				data_ptr = data_ptr + sizeof(u32);
-				pic_struct = (struct hfi_pic_struct *) data_ptr;
-				event_notify.pic_struct =
-					pic_struct->progressive_only;
-				dprintk(CVP_DBG,
-					"Progressive only flag: %d\n",
-						pic_struct->progressive_only);
-				data_ptr +=
-					sizeof(struct hfi_pic_struct);
-				break;
-			case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
-				data_ptr = data_ptr + sizeof(u32);
-				colour_info =
-					(struct hfi_colour_space *) data_ptr;
-				event_notify.colour_space =
-					colour_info->colour_space;
-				dprintk(CVP_DBG,
-					"Colour space value is: %d\n",
-						colour_info->colour_space);
-				data_ptr +=
-					sizeof(struct hfi_colour_space);
-				break;
-			case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
-				data_ptr = data_ptr + sizeof(u32);
-				entropy_mode = *(u32 *)data_ptr;
-				event_notify.entropy_mode = entropy_mode;
-				dprintk(CVP_DBG,
-					"Entropy Mode: 0x%x\n", entropy_mode);
-				data_ptr +=
-					sizeof(u32);
-				break;
-			case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
-				data_ptr = data_ptr + sizeof(u32);
-				buf_req =
-					(struct hfi_buffer_requirements *)
-						data_ptr;
-				event_notify.capture_buf_count =
-					buf_req->buffer_count_min;
-				dprintk(CVP_DBG,
-					"Capture Count : 0x%x\n",
-						event_notify.capture_buf_count);
-				data_ptr +=
-					sizeof(struct hfi_buffer_requirements);
-				break;
-			case HFI_INDEX_EXTRADATA_INPUT_CROP:
-				data_ptr = data_ptr + sizeof(u32);
-				crop_info = (struct
-				hfi_index_extradata_input_crop_payload *)
-						data_ptr;
-				event_notify.crop_data.left = crop_info->left;
-				event_notify.crop_data.top = crop_info->top;
-				event_notify.crop_data.width = crop_info->width;
-				event_notify.crop_data.height =
-					crop_info->height;
-				dprintk(CVP_DBG,
-					"CROP info : Left = %d Top = %d\n",
-						crop_info->left,
-						crop_info->top);
-				dprintk(CVP_DBG,
-					"CROP info : Width = %d Height = %d\n",
-						crop_info->width,
-						crop_info->height);
-				data_ptr +=
-					sizeof(struct
-					hfi_index_extradata_input_crop_payload);
-				break;
-			default:
-				dprintk(CVP_ERR,
-					"%s cmd: %#x not supported\n",
-					__func__, prop_id);
-				break;
-			}
-			num_properties_changed--;
-		} while (num_properties_changed > 0);
-	}
-
-	info->response_type = HAL_SESSION_EVENT_CHANGE;
-	info->response.event = event_notify;
-
-	return 0;
-}
-
 static int hfi_process_evt_release_buffer_ref(u32 device_id,
 		struct hfi_msg_event_notify_packet *pkt,
 		struct msm_cvp_cb_info *info)
@@ -374,9 +182,9 @@
 		return hfi_process_session_error(device_id, pkt, info);
 
 	case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
-		dprintk(CVP_INFO, "HFI_EVENT_SESSION_SEQUENCE_CHANGED[%#x]\n",
+		dprintk(CVP_WARN, "HFI_EVENT_SESSION_SEQUENCE_CHANGED [%#x]\n",
 			pkt->session_id);
-		return hfi_process_sess_evt_seq_changed(device_id, pkt, info);
+		return 0;
 
 	case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
 		dprintk(CVP_INFO, "HFI_EVENT_RELEASE_BUFFER_REFERENCE[%#x]\n",
@@ -743,61 +551,6 @@
 
 	return 0;
 }
-
-static int hfi_process_session_register_buffer_done(u32 device_id,
-		struct hfi_msg_session_register_buffers_done_packet *pkt,
-		struct msm_cvp_cb_info *info)
-{
-	struct msm_cvp_cb_cmd_done cmd_done = {0};
-
-	if (!pkt || pkt->size <
-		sizeof(struct hfi_msg_session_register_buffers_done_packet)) {
-		dprintk(CVP_ERR, "%s: bad packet/packet size %d\n",
-			__func__, pkt ? pkt->size : 0);
-		return -E2BIG;
-	}
-	dprintk(CVP_DBG, "RECEIVED: SESSION_REGISTER_BUFFERS_DONE[%#x]\n",
-			pkt->session_id);
-
-	cmd_done.device_id = device_id;
-	cmd_done.size = sizeof(struct msm_cvp_cb_cmd_done);
-	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
-	cmd_done.status = hfi_map_err_status(pkt->error_type);
-	cmd_done.data.regbuf.client_data = pkt->client_data;
-
-	info->response_type = HAL_SESSION_REGISTER_BUFFER_DONE;
-	info->response.cmd = cmd_done;
-
-	return 0;
-}
-
-static int hfi_process_session_unregister_buffer_done(u32 device_id,
-		struct hfi_msg_session_unregister_buffers_done_packet *pkt,
-		struct msm_cvp_cb_info *info)
-{
-	struct msm_cvp_cb_cmd_done cmd_done = {0};
-
-	if (!pkt || pkt->size <
-		sizeof(struct hfi_msg_session_unregister_buffers_done_packet)) {
-		dprintk(CVP_ERR, "%s: bad packet/packet size %d\n",
-			__func__, pkt ? pkt->size : 0);
-		return -E2BIG;
-	}
-	dprintk(CVP_DBG, "RECEIVED: SESSION_UNREGISTER_BUFFERS_DONE[%#x]\n",
-			pkt->session_id);
-
-	cmd_done.device_id = device_id;
-	cmd_done.size = sizeof(struct msm_cvp_cb_cmd_done);
-	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
-	cmd_done.status = hfi_map_err_status(pkt->error_type);
-	cmd_done.data.unregbuf.client_data = pkt->client_data;
-
-	info->response_type = HAL_SESSION_UNREGISTER_BUFFER_DONE;
-	info->response.cmd = cmd_done;
-
-	return 0;
-}
-
 static int hfi_process_session_end_done(u32 device_id,
 		struct hfi_msg_sys_session_end_done_packet *pkt,
 		struct msm_cvp_cb_info *info)
@@ -878,12 +631,12 @@
 
 
 static int hfi_process_session_rel_buf_done(u32 device_id,
-		struct hfi_msg_session_cvp_release_buffers_done_packet *pkt,
+		struct hfi_msg_session_hdr *pkt,
 		struct msm_cvp_cb_info *info)
 {
 	struct msm_cvp_cb_cmd_done cmd_done = {0};
 	unsigned int pkt_size =
-		sizeof(struct hfi_msg_session_cvp_release_buffers_done_packet);
+		sizeof(struct hfi_msg_session_hdr);
 
 	if (!pkt || pkt->size < pkt_size) {
 		dprintk(CVP_ERR, "bad packet/packet size %d\n",
@@ -924,9 +677,25 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	dprintk(CVP_DBG, "%s: device_id=%d cmd_done.status=%d\n",
-		__func__, device_id, cmd_done.status);
+	dprintk(CVP_DBG,
+		"%s: device_id=%d status=%d, sessionid=%x config=%x\n",
+		__func__, device_id, cmd_done.status,
+		cmd_done.session_id, pkt->op_conf_id);
+	switch (pkt->op_conf_id) {
+	case HFI_CMD_SESSION_CVP_DFS_CONFIG:
 	info->response_type = HAL_SESSION_DFS_CONFIG_CMD_DONE;
+		break;
+	case HFI_CMD_SESSION_CVP_DME_CONFIG:
+		info->response_type = HAL_SESSION_DME_CONFIG_CMD_DONE;
+		break;
+	case HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG:
+		info->response_type = HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE;
+		break;
+	default:
+		dprintk(CVP_ERR, "%s Invalid op config id\n", __func__);
+		return -EINVAL;
+	}
+
 	info->response.cmd = cmd_done;
 	return 0;
 }
@@ -951,14 +720,150 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	dprintk(CVP_DBG, "%s: device_id=%d cmd_done.status=%d\n",
-		__func__, device_id, cmd_done.status);
+	dprintk(CVP_DBG,
+		"%s: device_id=%d cmd_done.status=%d sessionid=%x\n",
+		__func__, device_id, cmd_done.status, cmd_done.session_id);
 	info->response_type = HAL_SESSION_DFS_FRAME_CMD_DONE;
 	info->response.cmd = cmd_done;
 
 	return 0;
 }
 
+static struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
+	void *session_id)
+{
+	struct msm_cvp_inst *inst = NULL;
+	bool match = false;
+
+	if (!core || !session_id)
+		return NULL;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		if (hash32_ptr(inst->session) == (unsigned int)session_id) {
+			match = true;
+			break;
+		}
+	}
+
+	inst = match ? inst : NULL;
+	mutex_unlock(&core->lock);
+
+	return inst;
+
+}
+
+static int hfi_process_session_cvp_msg(u32 device_id,
+	struct hfi_msg_session_hdr *pkt,
+	struct msm_cvp_cb_info *info)
+{
+	struct session_msg *sess_msg;
+	struct msm_cvp_inst *inst = NULL;
+	struct msm_cvp_core *core;
+	void *session_id;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int)) {
+		dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
+		return -E2BIG;
+	}
+	session_id = (void *)(uintptr_t)pkt->session_id;
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	inst = cvp_get_inst_from_id(core, session_id);
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	sess_msg = kmem_cache_alloc(inst->session_queue.msg_cache, GFP_KERNEL);
+	if (sess_msg == NULL) {
+		dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(&sess_msg->pkt, pkt, sizeof(struct hfi_msg_session_hdr));
+
+	spin_lock(&inst->session_queue.lock);
+	if (inst->session_queue.msg_count >= MAX_NUM_MSGS_PER_SESSION) {
+		dprintk(CVP_ERR, "Reached session queue size limit\n");
+		goto error_handle_msg;
+	}
+	list_add_tail(&sess_msg->node, &inst->session_queue.msgs);
+	inst->session_queue.msg_count++;
+	spin_unlock(&inst->session_queue.lock);
+
+	wake_up_all(&inst->session_queue.wq);
+
+	info->response_type = HAL_NO_RESP;
+
+	return 0;
+
+error_handle_msg:
+	spin_unlock(&inst->session_queue.lock);
+	kmem_cache_free(inst->session_queue.msg_cache, sess_msg);
+	return -ENOMEM;
+}
+
+static int hfi_process_session_cvp_dme(u32 device_id,
+	struct hfi_msg_session_cvp_dme_packet_type *pkt,
+	struct msm_cvp_cb_info *info)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size > sizeof(*pkt)) {
+		dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	dprintk(CVP_DBG,
+		"%s: device_id=%d cmd_done.status=%d sessionid=%x\n",
+		__func__, device_id, cmd_done.status, cmd_done.session_id);
+	info->response_type = HAL_SESSION_DME_FRAME_CMD_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_cvp_persist(u32 device_id,
+	struct hfi_msg_session_cvp_persist_packet_type *pkt,
+	struct msm_cvp_cb_info *info)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size < sizeof(*pkt)) {
+		dprintk(CVP_ERR,
+				"%s: bad_pkt_size\n", __func__);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	dprintk(CVP_DBG,
+		"%s: device_id=%d cmd_done.status=%d sessionid=%x\n",
+		__func__, device_id, cmd_done.status, cmd_done.session_id);
+	info->response_type = HAL_SESSION_PERSIST_CMD_DONE,
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
 static void hfi_process_sys_get_prop_image_version(
 		struct hfi_msg_sys_property_info_packet *pkt)
 {
@@ -1013,7 +918,7 @@
 				"%s: bad_pkt_size\n", __func__);
 		return -E2BIG;
 	} else if (!pkt->num_properties) {
-		dprintk(CVP_ERR,
+		dprintk(CVP_WARN,
 				"%s: no_properties\n", __func__);
 		return -EINVAL;
 	}
@@ -1048,7 +953,7 @@
 		return -EINVAL;
 	}
 
-	dprintk(CVP_DBG, "Parse response %#x\n", msg_hdr->packet);
+	dprintk(CVP_DBG, "Received HFI MSG with type %d\n", msg_hdr->packet);
 	switch (msg_hdr->packet) {
 	case HFI_MSG_EVENT_NOTIFY:
 		pkt_func = (pkt_func_def)hfi_process_event_notify;
@@ -1065,34 +970,31 @@
 	case HFI_MSG_SYS_SESSION_END_DONE:
 		pkt_func = (pkt_func_def)hfi_process_session_end_done;
 		break;
-	case HFI_MSG_SESSION_REGISTER_BUFFERS_DONE:
-		pkt_func = (pkt_func_def)
-			hfi_process_session_register_buffer_done;
-		break;
 	case HFI_MSG_SESSION_CVP_SET_BUFFERS:
 		pkt_func = (pkt_func_def) hfi_process_session_set_buf_done;
 		break;
 	case HFI_MSG_SESSION_CVP_RELEASE_BUFFERS:
 		pkt_func = (pkt_func_def)hfi_process_session_rel_buf_done;
 		break;
-	case HFI_MSG_SESSION_UNREGISTER_BUFFERS_DONE:
-		pkt_func = (pkt_func_def)
-			hfi_process_session_unregister_buffer_done;
-		break;
 	case HFI_MSG_SYS_SESSION_ABORT_DONE:
 		pkt_func = (pkt_func_def)hfi_process_session_abort_done;
 		break;
 	case HFI_MSG_SESSION_CVP_OPERATION_CONFIG:
-		dprintk(CVP_DBG,
-			"Received HFI_MSG_SESSION_CVP_OPERATION_CONFIG from firmware");
 		pkt_func =
 			(pkt_func_def)hfi_process_session_cvp_operation_config;
 		break;
 	case HFI_MSG_SESSION_CVP_DFS:
-		dprintk(CVP_DBG,
-			"Received HFI_MSG_SESSION_CVP_DFS from firmware");
 		pkt_func = (pkt_func_def)hfi_process_session_cvp_dfs;
 		break;
+	case HFI_MSG_SESSION_CVP_DME:
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_dme;
+		break;
+	case HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS:
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_persist;
+		break;
+	case HFI_MSG_SESSION_CVP_DS:
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_msg;
+		break;
 	default:
 		dprintk(CVP_DBG, "Unable to parse message: %#x\n",
 				msg_hdr->packet);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index eb54cad..a6afeb6 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -4,6 +4,7 @@
  */
 
 #include "msm_cvp.h"
+#include <synx_api.h>
 
 #define MSM_CVP_NOMINAL_CYCLES		(444 * 1000 * 1000)
 #define MSM_CVP_UHD60E_VPSS_CYCLES	(111 * 1000 * 1000)
@@ -13,8 +14,17 @@
 #define MAX_CVP_ISE_CYCLES		(MSM_CVP_NOMINAL_CYCLES - \
 		MSM_CVP_UHD60E_ISE_CYCLES)
 
+struct msm_cvp_fence_thread_data {
+	struct msm_cvp_inst *inst;
+	unsigned int device_id;
+	struct cvp_kmd_hfi_fence_packet in_fence_pkt;
+	unsigned int arg_type;
+};
+
+static struct msm_cvp_fence_thread_data fence_thread_data;
+
 static void print_client_buffer(u32 tag, const char *str,
-		struct msm_cvp_inst *inst, struct msm_cvp_buffer *cbuf)
+		struct msm_cvp_inst *inst, struct cvp_kmd_buffer *cbuf)
 {
 	if (!(tag & msm_cvp_debug) || !inst || !cbuf)
 		return;
@@ -42,13 +52,13 @@
 {
 	enum hal_buffer buftype = HAL_BUFFER_NONE;
 
-	if (type == MSM_CVP_BUFTYPE_INPUT)
+	if (type == CVP_KMD_BUFTYPE_INPUT)
 		buftype = HAL_BUFFER_INPUT;
-	else if (type == MSM_CVP_BUFTYPE_OUTPUT)
+	else if (type == CVP_KMD_BUFTYPE_OUTPUT)
 		buftype = HAL_BUFFER_OUTPUT;
-	else if (type == MSM_CVP_BUFTYPE_INTERNAL_1)
+	else if (type == CVP_KMD_BUFTYPE_INTERNAL_1)
 		buftype = HAL_BUFFER_INTERNAL_SCRATCH_1;
-	else if (type == MSM_CVP_BUFTYPE_INTERNAL_2)
+	else if (type == CVP_KMD_BUFTYPE_INTERNAL_2)
 		buftype = HAL_BUFFER_INTERNAL_SCRATCH_1;
 	else
 		dprintk(CVP_ERR, "%s: unknown buffer type %#x\n",
@@ -57,171 +67,6 @@
 	return buftype;
 }
 
-void cvp_handle_session_register_buffer_done(enum hal_command_response cmd,
-		void *resp)
-{
-	struct msm_cvp_cb_cmd_done *response = resp;
-	struct msm_cvp_inst *inst;
-	struct msm_cvp_internal_buffer *cbuf;
-	struct v4l2_event event = {0};
-	u32 *data;
-	bool found;
-
-	if (!response) {
-		dprintk(CVP_ERR, "%s: invalid response\n", __func__);
-		return;
-	}
-	inst = cvp_get_inst(get_cvp_core(response->device_id),
-			response->session_id);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid session %pK\n", __func__,
-			response->session_id);
-		return;
-	}
-
-	mutex_lock(&inst->cvpbufs.lock);
-	found = false;
-	list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
-		if (response->data.regbuf.client_data ==
-				cbuf->smem.device_addr) {
-			found = true;
-			break;
-		}
-	}
-	mutex_unlock(&inst->cvpbufs.lock);
-	if (!found) {
-		dprintk(CVP_ERR, "%s: client_data %x not found\n",
-			__func__, response->data.regbuf.client_data);
-		goto exit;
-	}
-	print_cvp_internal_buffer(CVP_DBG, "register_done", inst, cbuf);
-
-	event.type = V4L2_EVENT_MSM_CVP_REGISTER_BUFFER_DONE;
-	data = (u32 *)event.u.data;
-	data[0] = cbuf->buf.index;
-	data[1] = cbuf->buf.type;
-	data[2] = cbuf->buf.fd;
-	data[3] = cbuf->buf.offset;
-	v4l2_event_queue_fh(&inst->event_handler, &event);
-
-exit:
-	cvp_put_inst(inst);
-}
-
-void cvp_handle_session_unregister_buffer_done(enum hal_command_response cmd,
-		void *resp)
-{
-	int rc;
-	struct msm_cvp_cb_cmd_done *response = resp;
-	struct msm_cvp_inst *inst;
-	struct msm_cvp_internal_buffer *cbuf, *dummy;
-	struct v4l2_event event = {0};
-	u32 *data;
-	bool found;
-
-	if (!response) {
-		dprintk(CVP_ERR, "%s: invalid response\n", __func__);
-		return;
-	}
-	inst = cvp_get_inst(get_cvp_core(response->device_id),
-			response->session_id);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid session %pK\n", __func__,
-			response->session_id);
-		return;
-	}
-
-	mutex_lock(&inst->cvpbufs.lock);
-	found = false;
-	list_for_each_entry_safe(cbuf, dummy, &inst->cvpbufs.list, list) {
-		if (response->data.unregbuf.client_data ==
-				cbuf->smem.device_addr) {
-			found = true;
-			break;
-		}
-	}
-	mutex_unlock(&inst->cvpbufs.lock);
-	if (!found) {
-		dprintk(CVP_ERR, "%s: client_data %x not found\n",
-			__func__, response->data.unregbuf.client_data);
-		goto exit;
-	}
-	print_cvp_internal_buffer(CVP_DBG, "unregister_done", inst, cbuf);
-
-	rc = msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
-	if (rc) {
-		print_cvp_internal_buffer(CVP_ERR, "unmap fail", inst, cbuf);
-		goto exit;
-	}
-
-	event.type = V4L2_EVENT_MSM_CVP_UNREGISTER_BUFFER_DONE;
-	data = (u32 *)event.u.data;
-	data[0] = cbuf->buf.index;
-	data[1] = cbuf->buf.type;
-	data[2] = cbuf->buf.fd;
-	data[3] = cbuf->buf.offset;
-	v4l2_event_queue_fh(&inst->event_handler, &event);
-
-	mutex_lock(&inst->cvpbufs.lock);
-	list_del(&cbuf->list);
-	mutex_unlock(&inst->cvpbufs.lock);
-	kfree(cbuf);
-	cbuf = NULL;
-exit:
-	cvp_put_inst(inst);
-}
-
-static void print_cvp_cycles(struct msm_cvp_inst *inst)
-{
-	struct msm_cvp_core *core;
-	struct msm_cvp_inst *temp;
-
-	if (!inst || !inst->core)
-		return;
-	core = inst->core;
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp->session_type == MSM_CVP_CORE) {
-			dprintk(CVP_ERR, "session %#x, vpss %d ise %d\n",
-				hash32_ptr(temp->session),
-				temp->clk_data.vpss_cycles,
-				temp->clk_data.ise_cycles);
-		}
-	}
-	mutex_unlock(&core->lock);
-}
-
-static bool msm_cvp_clock_aggregation(struct msm_cvp_inst *inst,
-		u32 vpss_cycles, u32 ise_cycles)
-{
-	struct msm_cvp_core *core;
-	struct msm_cvp_inst *temp;
-	u32 total_vpss_cycles = 0;
-	u32 total_ise_cycles = 0;
-
-	if (!inst || !inst->core) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return false;
-	}
-	core = inst->core;
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp->session_type == MSM_CVP_CORE) {
-			total_vpss_cycles += inst->clk_data.vpss_cycles;
-			total_ise_cycles += inst->clk_data.ise_cycles;
-		}
-	}
-	mutex_unlock(&core->lock);
-
-	if ((total_vpss_cycles > MAX_CVP_VPSS_CYCLES) ||
-		(total_ise_cycles > MAX_CVP_ISE_CYCLES))
-		return false;
-
-	return true;
-}
-
 static int msm_cvp_scale_clocks_and_bus(struct msm_cvp_inst *inst)
 {
 	int rc = 0;
@@ -252,7 +97,7 @@
 }
 
 static int msm_cvp_get_session_info(struct msm_cvp_inst *inst,
-		struct msm_cvp_session_info *session)
+		struct cvp_kmd_session_info *session)
 {
 	int rc = 0;
 
@@ -267,238 +112,55 @@
 	return rc;
 }
 
-static int msm_cvp_session_cvp_dfs_config(
+static int msm_cvp_session_get_iova_addr(
 	struct msm_cvp_inst *inst,
-	struct msm_cvp_dfsconfig *dfs_config)
+	struct msm_cvp_internal_buffer *cbuf,
+	unsigned int search_fd, unsigned int search_size,
+	unsigned int *iova,
+	unsigned int *iova_size)
 {
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_dfsconfig vdfs_config;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
-
-	if (!inst || !inst->core || !dfs_config) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-	memcpy(&vdfs_config, dfs_config, sizeof(struct msm_cvp_dfsconfig));
-
-	rc = call_hfi_op(hdev, session_cvp_dfs_config,
-			(void *)inst->session, &vdfs_config);
-	if (!rc) {
-		rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_DFS_CONFIG_CMD_DONE);
-		if (rc)
-			dprintk(CVP_ERR,
-				"%s: wait for signal failed, rc %d\n",
-				__func__, rc);
-	} else {
-		dprintk(CVP_ERR,
-			"%s: Failed in call_hfi_op for session_cvp_dfs_config\n",
-			__func__);
-	}
-	return rc;
-}
-
-static int msm_cvp_session_cvp_dfs_frame(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_dfsframe *dfs_frame)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_dfsframe vdfs_frame;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
-
-	if (!inst || !inst->core || !dfs_frame) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-	memcpy(&vdfs_frame, dfs_frame, sizeof(vdfs_frame));
-
-	rc = call_hfi_op(hdev, session_cvp_dfs_frame,
-			(void *)inst->session, &vdfs_frame);
-
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: Failed in call_hfi_op for session_cvp_dfs_frame\n",
-			__func__);
-	}
-
-	return rc;
-}
-
-static int msm_cvp_session_cvp_dfs_frame_response(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_dfsframe *dfs_frame)
-{
-	int rc = 0;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
-
-	if (!inst || !inst->core || !dfs_frame) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_DFS_FRAME_CMD_DONE);
-	if (rc)
-		dprintk(CVP_ERR,
-			"%s: wait for signal failed, rc %d\n",
-			__func__, rc);
-
-	return rc;
-}
-
-
-static int msm_cvp_send_cmd(struct msm_cvp_inst *inst,
-		struct msm_cvp_send_cmd *send_cmd)
-{
-	int rc = 0;
-	bool found;
-	struct hfi_device *hdev;
-	struct msm_cvp_internal_send_cmd  *csend_cmd;
-	//struct cvp_register_buffer vbuf;
-	struct cvp_frame_data input_frame;
-
-	dprintk(CVP_DBG, "%s:: Enter 1", __func__);
-	if (!inst || !inst->core || !send_cmd) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-	//print_client_buffer(CVP_DBG, "register", inst, send_cmd);
+	bool found = false;
 
 	mutex_lock(&inst->cvpbufs.lock);
-	found = false;
-	list_for_each_entry(csend_cmd, &inst->cvpbufs.list, list) {
-		if (csend_cmd->send_cmd.cmd_address_fd ==
-				send_cmd->cmd_address_fd &&
-			csend_cmd->send_cmd.cmd_size == send_cmd->cmd_size) {
+	list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
+		if (cbuf->buf.fd == search_fd) {
 			found = true;
 			break;
 		}
 	}
 	mutex_unlock(&inst->cvpbufs.lock);
-	if (found)
+	if (!found)
 		return -EINVAL;
 
-	csend_cmd = kzalloc(
-		sizeof(struct msm_cvp_internal_send_cmd), GFP_KERNEL);
-	if (!csend_cmd) {
-		dprintk(CVP_ERR, "%s: csend_cmd alloc failed\n", __func__);
-		return -ENOMEM;
-	}
-	mutex_lock(&inst->cvpbufs.lock);
-	list_add_tail(&csend_cmd->list, &inst->cvpbufs.list);
-	mutex_unlock(&inst->cvpbufs.lock);
-
-	memset(&input_frame, 0, sizeof(struct cvp_frame_data));
-
-	rc = call_hfi_op(hdev, session_cvp_send_cmd,
-			(void *)inst->session, &input_frame);
-	if (rc)
-		goto exit;
-
-	return rc;
-
-exit:
-	if (csend_cmd->smem.device_addr)
-		msm_cvp_smem_unmap_dma_buf(inst, &csend_cmd->smem);
-	mutex_lock(&inst->cvpbufs.lock);
-	list_del(&csend_cmd->list);
-	mutex_unlock(&inst->cvpbufs.lock);
-	kfree(csend_cmd);
-	csend_cmd = NULL;
-
-	return rc;
-}
-static int msm_cvp_request_power(struct msm_cvp_inst *inst,
-		struct msm_cvp_request_power *power)
-{
-	int rc = 0;
-
-	if (!inst || !power) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+	*iova = cbuf->smem.device_addr;
+	if (search_size != cbuf->buf.size) {
+		dprintk(CVP_ERR,
+			"%s:: invalid size received fd = %d\n",
+			__func__, search_fd);
 		return -EINVAL;
 	}
-
-	dprintk(CVP_DBG,
-		"%s: clock_cycles_a %d, clock_cycles_b %d, ddr_bw %d sys_cache_bw %d\n",
-		__func__, power->clock_cycles_a, power->clock_cycles_b,
-		power->ddr_bw, power->sys_cache_bw);
-
-	rc = msm_cvp_clock_aggregation(inst, power->clock_cycles_a,
-			power->clock_cycles_b);
-	if (!rc) {
-		dprintk(CVP_ERR,
-			"%s: session %#x rejected, cycles: vpss %d, ise %d\n",
-			__func__, hash32_ptr(inst->session),
-			power->clock_cycles_a, power->clock_cycles_b);
-		print_cvp_cycles(inst);
-		msm_cvp_comm_kill_session(inst);
-		return -EOVERFLOW;
-	}
-
-	inst->clk_data.min_freq = max(power->clock_cycles_a,
-		power->clock_cycles_b);
-	/* convert client provided bps into kbps as expected by driver */
-	inst->clk_data.ddr_bw = power->ddr_bw / 1000;
-	inst->clk_data.sys_cache_bw = power->sys_cache_bw / 1000;
-	rc = msm_cvp_scale_clocks_and_bus(inst);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: failed to scale clocks and bus for inst %pK (%#x)\n",
-			__func__, inst, hash32_ptr(inst->session));
-		goto exit;
-	}
-
-	if (!inst->clk_data.min_freq && !inst->clk_data.ddr_bw &&
-		!inst->clk_data.sys_cache_bw) {
-		rc = msm_cvp_session_pause(inst);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s: failed to pause inst %pK (%#x)\n",
-				__func__, inst, hash32_ptr(inst->session));
-			goto exit;
-		}
-	} else {
-		rc = msm_cvp_session_resume(inst);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s: failed to resume inst %pK (%#x)\n",
-				__func__, inst, hash32_ptr(inst->session));
-			goto exit;
-		}
-	}
-
-exit:
-	return rc;
+	*iova_size = cbuf->buf.size;
+	return 0;
 }
 
-static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
-		struct msm_cvp_buffer *buf)
+static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
+	struct cvp_kmd_buffer *buf)
 {
 	int rc = 0;
 	bool found;
-	struct hfi_device *hdev;
 	struct msm_cvp_internal_buffer *cbuf;
-	struct cvp_register_buffer vbuf;
+	struct hal_session *session;
 
 	if (!inst || !inst->core || !buf) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
-	hdev = inst->core->device;
-	print_client_buffer(CVP_DBG, "register", inst, buf);
 
+	session = (struct hal_session *)inst->session;
 	mutex_lock(&inst->cvpbufs.lock);
 	found = false;
 	list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
-		if (cbuf->buf.index == buf->index &&
-			cbuf->buf.fd == buf->fd &&
+		if (cbuf->buf.fd == buf->fd &&
 			cbuf->buf.offset == buf->offset) {
 			found = true;
 			break;
@@ -519,30 +181,27 @@
 	list_add_tail(&cbuf->list, &inst->cvpbufs.list);
 	mutex_unlock(&inst->cvpbufs.lock);
 
-	memcpy(&cbuf->buf, buf, sizeof(struct msm_cvp_buffer));
+	memcpy(&cbuf->buf, buf, sizeof(struct cvp_kmd_buffer));
 	cbuf->smem.buffer_type = get_hal_buftype(__func__, buf->type);
 	cbuf->smem.fd = buf->fd;
 	cbuf->smem.offset = buf->offset;
 	cbuf->smem.size = buf->size;
+	cbuf->smem.flags = buf->flags;
 	rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
 	if (rc) {
 		print_client_buffer(CVP_ERR, "map failed", inst, buf);
 		goto exit;
 	}
 
-	memset(&vbuf, 0, sizeof(struct cvp_register_buffer));
-	vbuf.index = buf->index;
-	vbuf.type = get_hal_buftype(__func__, buf->type);
-	vbuf.size = buf->size;
-	vbuf.device_addr = cbuf->smem.device_addr;
-	vbuf.client_data = cbuf->smem.device_addr;
-	vbuf.response_required = true;
-	rc = call_hfi_op(hdev, session_register_buffer,
-			(void *)inst->session, &vbuf);
-	if (rc) {
-		print_cvp_internal_buffer(CVP_ERR,
-			"register failed", inst, cbuf);
-		goto exit;
+	if (buf->index) {
+		rc = cvp_dsp_register_buffer((uint32_t)cbuf->smem.device_addr,
+			buf->index, buf->size, hash32_ptr(session));
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: failed dsp registration for fd=%d rc=%d",
+				__func__, buf->fd, rc);
+			goto exit;
+		}
 	}
 	return rc;
 
@@ -558,27 +217,452 @@
 	return rc;
 }
 
+static bool _cvp_msg_pending(struct msm_cvp_inst *inst,
+			struct cvp_session_queue *sq,
+			struct session_msg **msg)
+{
+	struct session_msg *mptr = NULL;
+	bool result = false;
+
+	spin_lock(&sq->lock);
+	if (!kref_read(&inst->kref)) {
+		/* The session is being deleted */
+		spin_unlock(&sq->lock);
+		*msg = NULL;
+		return true;
+	}
+	result = list_empty(&sq->msgs);
+	if (!result) {
+		mptr = list_first_entry(&sq->msgs, struct session_msg, node);
+		list_del_init(&mptr->node);
+		sq->msg_count--;
+	}
+	spin_unlock(&sq->lock);
+	*msg = mptr;
+	return !result;
+}
+
+
+static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
+			struct cvp_kmd_hfi_packet *out_pkt)
+{
+	unsigned long wait_time;
+	struct session_msg *msg = NULL;
+	struct cvp_session_queue *sq;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	sq = &inst->session_queue;
+
+	wait_time = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
+
+	if (wait_event_timeout(sq->wq,
+		_cvp_msg_pending(inst, sq, &msg), wait_time) == 0) {
+		dprintk(CVP_ERR, "session queue wait timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	if (msg == NULL) {
+		dprintk(CVP_ERR, "%s: session is deleted, no msg\n", __func__);
+		return -EINVAL;
+	}
+
+	memcpy(out_pkt, &msg->pkt, sizeof(struct hfi_msg_session_hdr));
+	kmem_cache_free(inst->session_queue.msg_cache, msg);
+
+	return 0;
+}
+
+static int msm_cvp_session_process_hfi(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *in_pkt)
+{
+	int i, pkt_idx, rc = 0;
+	struct hfi_device *hdev;
+	struct msm_cvp_internal_buffer *cbuf;
+	struct buf_desc *buf_ptr;
+	unsigned int offset, buf_num;
+
+	if (!inst || !inst->core || !in_pkt) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
+	if (pkt_idx < 0) {
+		dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
+				in_pkt->pkt_data[0],
+				in_pkt->pkt_data[1]);
+		return pkt_idx;
+	}
+	offset = cvp_hfi_defs[pkt_idx].buf_offset;
+	buf_num = cvp_hfi_defs[pkt_idx].buf_num;
+
+	if (offset != 0 && buf_num != 0) {
+		buf_ptr = (struct buf_desc *)&in_pkt->pkt_data[offset];
+
+		for (i = 0; i < buf_num; i++) {
+			if (!buf_ptr[i].fd)
+				continue;
+
+			rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+						buf_ptr[i].fd,
+						buf_ptr[i].size,
+						&buf_ptr[i].fd,
+						&buf_ptr[i].size);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s: buf %d unregistered. rc=%d\n",
+					__func__, i, rc);
+				return rc;
+			}
+		}
+	}
+	rc = call_hfi_op(hdev, session_send,
+			(void *)inst->session, in_pkt);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: Failed in call_hfi_op %d, %x\n",
+			__func__, in_pkt->pkt_data[0], in_pkt->pkt_data[1]);
+	}
+
+	if (cvp_hfi_defs[pkt_idx].resp != HAL_NO_RESP) {
+		rc = wait_for_sess_signal_receipt(inst,
+			cvp_hfi_defs[pkt_idx].resp);
+		if (rc)
+			dprintk(CVP_ERR,
+				"%s: wait for signal failed, rc %d %d, %x %d\n",
+				__func__, rc,
+				in_pkt->pkt_data[0],
+				in_pkt->pkt_data[1],
+				cvp_hfi_defs[pkt_idx].resp);
+
+	}
+
+	return rc;
+}
+
+static int msm_cvp_thread_fence_run(void *data)
+{
+	int i, pkt_idx, rc = 0;
+	unsigned long timeout_ms = 1000;
+	int synx_obj;
+	struct hfi_device *hdev;
+	struct msm_cvp_fence_thread_data *fence_thread_data;
+	struct cvp_kmd_hfi_fence_packet *in_fence_pkt;
+	struct cvp_kmd_hfi_packet *in_pkt;
+	struct msm_cvp_inst *inst;
+	int *fence;
+	struct msm_cvp_internal_buffer *cbuf;
+	struct buf_desc *buf_ptr;
+	unsigned int offset, buf_num;
+
+	if (!data) {
+		dprintk(CVP_ERR, "%s Wrong input data %pK\n", __func__, data);
+		do_exit(-EINVAL);
+	}
+
+	fence_thread_data = data;
+	inst = cvp_get_inst(get_cvp_core(fence_thread_data->device_id),
+				(void *)fence_thread_data->inst);
+	if (!inst) {
+		dprintk(CVP_ERR, "%s Wrong inst %pK\n", __func__, inst);
+		do_exit(-EINVAL);
+	}
+	in_fence_pkt = (struct cvp_kmd_hfi_fence_packet *)
+					&fence_thread_data->in_fence_pkt;
+	in_pkt = (struct cvp_kmd_hfi_packet *)(in_fence_pkt);
+	fence = (int *)(in_fence_pkt->fence_data);
+	hdev = inst->core->device;
+
+	pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
+	if (pkt_idx < 0) {
+		dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
+			in_pkt->pkt_data[0],
+			in_pkt->pkt_data[1]);
+		do_exit(pkt_idx);
+	}
+
+	offset = cvp_hfi_defs[pkt_idx].buf_offset;
+	buf_num = cvp_hfi_defs[pkt_idx].buf_num;
+
+	if (offset != 0 && buf_num != 0) {
+		buf_ptr = (struct buf_desc *)&in_pkt->pkt_data[offset];
+
+		for (i = 0; i < buf_num; i++) {
+			if (!buf_ptr[i].fd)
+				continue;
+
+			rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+				buf_ptr[i].fd,
+				buf_ptr[i].size,
+				&buf_ptr[i].fd,
+				&buf_ptr[i].size);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s: buf %d unregistered. rc=%d\n",
+					__func__, i, rc);
+				do_exit(rc);
+			}
+		}
+	}
+
+	//wait on synx before signaling HFI
+	switch (fence_thread_data->arg_type) {
+	case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
+	{
+		for (i = 0; i < HFI_DME_BUF_NUM-1; i++) {
+			if (fence[(i<<1)]) {
+				rc = synx_import(fence[(i<<1)],
+					fence[((i<<1)+1)], &synx_obj);
+				if (rc) {
+					dprintk(CVP_ERR,
+						"%s: synx_import failed\n",
+						__func__);
+					do_exit(rc);
+				}
+				rc = synx_wait(synx_obj, timeout_ms);
+				if (rc) {
+					dprintk(CVP_ERR,
+						"%s: synx_wait failed\n",
+						__func__);
+					do_exit(rc);
+				}
+				rc = synx_release(synx_obj);
+				if (rc) {
+					dprintk(CVP_ERR,
+						"%s: synx_release failed\n",
+						__func__);
+					do_exit(rc);
+				}
+			}
+		}
+
+		rc = call_hfi_op(hdev, session_send,
+				(void *)inst->session, in_pkt);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: Failed in call_hfi_op %d, %x\n",
+				__func__, in_pkt->pkt_data[0],
+				in_pkt->pkt_data[1]);
+			do_exit(rc);
+		}
+
+		rc = wait_for_sess_signal_receipt(inst,
+				HAL_SESSION_DME_FRAME_CMD_DONE);
+		if (rc)	{
+			dprintk(CVP_ERR, "%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+			do_exit(rc);
+		}
+		rc = synx_import(fence[((HFI_DME_BUF_NUM-1)<<1)],
+				fence[((HFI_DME_BUF_NUM-1)<<1)+1],
+				&synx_obj);
+		if (rc) {
+			dprintk(CVP_ERR, "%s: synx_import failed\n", __func__);
+			do_exit(rc);
+		}
+		rc = synx_signal(synx_obj, SYNX_STATE_SIGNALED_SUCCESS);
+		if (rc) {
+			dprintk(CVP_ERR, "%s: synx_signal failed\n", __func__);
+			do_exit(rc);
+		}
+		if (synx_get_status(synx_obj) != SYNX_STATE_SIGNALED_SUCCESS) {
+			dprintk(CVP_ERR, "%s: synx_get_status failed\n",
+					__func__);
+			do_exit(rc);
+		}
+		rc = synx_release(synx_obj);
+		if (rc) {
+			dprintk(CVP_ERR, "%s: synx_release failed\n", __func__);
+			do_exit(rc);
+		}
+		break;
+	}
+	default:
+		dprintk(CVP_ERR, "%s: unknown hfi cmd type 0x%x\n",
+			__func__, fence_thread_data->arg_type);
+		rc = -EINVAL;
+		do_exit(rc);
+		break;
+	}
+
+	do_exit(0);
+}
+
+static int msm_cvp_session_process_hfifence(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_arg *arg)
+{
+	static int thread_num;
+	struct task_struct *thread;
+	int rc = 0;
+	char thread_fence_name[32];
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+	if (!inst || !inst->core || !arg) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	thread_num = thread_num + 1;
+	fence_thread_data.inst = inst;
+	fence_thread_data.device_id = (unsigned int)inst->core->id;
+	memcpy(&fence_thread_data.in_fence_pkt, &arg->data.hfi_fence_pkt,
+				sizeof(struct cvp_kmd_hfi_fence_packet));
+	fence_thread_data.arg_type = arg->type;
+	snprintf(thread_fence_name, sizeof(thread_fence_name),
+				"thread_fence_%d", thread_num);
+	thread = kthread_run(msm_cvp_thread_fence_run,
+			&fence_thread_data, thread_fence_name);
+
+	return rc;
+}
+
+static int msm_cvp_session_cvp_dfs_frame_response(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *dfs_frame)
+{
+	int rc = 0;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
+
+	if (!inst || !inst->core || !dfs_frame) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	rc = wait_for_sess_signal_receipt(inst,
+			HAL_SESSION_DFS_FRAME_CMD_DONE);
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+static int msm_cvp_session_cvp_dme_frame_response(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *dme_frame)
+{
+	int rc = 0;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+	if (!inst || !inst->core || !dme_frame) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	rc = wait_for_sess_signal_receipt(inst,
+			HAL_SESSION_DME_FRAME_CMD_DONE);
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+static int msm_cvp_session_cvp_persist_response(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *pbuf_cmd)
+{
+	int rc = 0;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+	if (!inst || !inst->core || !pbuf_cmd) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	rc = wait_for_sess_signal_receipt(inst,
+			HAL_SESSION_PERSIST_CMD_DONE);
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+
+
+static int msm_cvp_send_cmd(struct msm_cvp_inst *inst,
+		struct cvp_kmd_send_cmd *send_cmd)
+{
+	dprintk(CVP_ERR, "%s: UMD gave a deprecated cmd", __func__);
+
+	return 0;
+}
+
+static int msm_cvp_request_power(struct msm_cvp_inst *inst,
+		struct cvp_kmd_request_power *power)
+{
+	int rc = 0;
+
+	if (!inst || !power) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_DBG,
+		"%s: clock_cycles_a %d, clock_cycles_b %d, ddr_bw %d sys_cache_bw %d\n",
+		__func__, power->clock_cycles_a, power->clock_cycles_b,
+		power->ddr_bw, power->sys_cache_bw);
+
+	return rc;
+}
+
+static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
+		struct cvp_kmd_buffer *buf)
+{
+	struct hfi_device *hdev;
+	struct hal_session *session;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	session = (struct hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+	print_client_buffer(CVP_DBG, "register", inst, buf);
+
+	return msm_cvp_map_buf(inst, buf);
+
+}
+
 static int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
-		struct msm_cvp_buffer *buf)
+		struct cvp_kmd_buffer *buf)
 {
 	int rc = 0;
 	bool found;
 	struct hfi_device *hdev;
 	struct msm_cvp_internal_buffer *cbuf;
-	struct cvp_unregister_buffer vbuf;
+	struct hal_session *session;
 
 	if (!inst || !inst->core || !buf) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
+
+	session = (struct hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
 	hdev = inst->core->device;
 	print_client_buffer(CVP_DBG, "unregister", inst, buf);
 
 	mutex_lock(&inst->cvpbufs.lock);
 	found = false;
 	list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
-		if (cbuf->buf.index == buf->index &&
-			cbuf->buf.fd == buf->fd &&
+		if (cbuf->buf.fd == buf->fd &&
 			cbuf->buf.offset == buf->offset) {
 			found = true;
 			break;
@@ -590,23 +674,26 @@
 		return -EINVAL;
 	}
 
-	memset(&vbuf, 0, sizeof(struct cvp_unregister_buffer));
-	vbuf.index = cbuf->buf.index;
-	vbuf.type = get_hal_buftype(__func__, cbuf->buf.type);
-	vbuf.size = cbuf->buf.size;
-	vbuf.device_addr = cbuf->smem.device_addr;
-	vbuf.client_data = cbuf->smem.device_addr;
-	vbuf.response_required = true;
-	rc = call_hfi_op(hdev, session_unregister_buffer,
-			(void *)inst->session, &vbuf);
-	if (rc)
-		print_cvp_internal_buffer(CVP_ERR,
-			"unregister failed", inst, cbuf);
+	if (buf->index) {
+		rc = cvp_dsp_deregister_buffer((uint32_t)cbuf->smem.device_addr,
+			buf->index, buf->size, hash32_ptr(session));
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: failed dsp registration for fd = %d rc=%d",
+				__func__, buf->fd, rc);
+		}
+	}
+
+	if (cbuf->smem.device_addr)
+		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
+
+	list_del(&cbuf->list);
+	kfree(cbuf);
 
 	return rc;
 }
 
-int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct msm_cvp_arg *arg)
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
 {
 	int rc = 0;
 
@@ -614,73 +701,96 @@
 		dprintk(CVP_ERR, "%s: invalid args\n", __func__);
 		return -EINVAL;
 	}
-	dprintk(CVP_DBG, "%s:: arg->type = %d", __func__, arg->type);
+	dprintk(CVP_DBG, "%s:: arg->type = %x", __func__, arg->type);
 
 	switch (arg->type) {
-	case MSM_CVP_GET_SESSION_INFO:
+	case CVP_KMD_GET_SESSION_INFO:
 	{
-		struct msm_cvp_session_info *session =
-			(struct msm_cvp_session_info *)&arg->data.session;
+		struct cvp_kmd_session_info *session =
+			(struct cvp_kmd_session_info *)&arg->data.session;
 
 		rc = msm_cvp_get_session_info(inst, session);
 		break;
 	}
-	case MSM_CVP_REQUEST_POWER:
+	case CVP_KMD_REQUEST_POWER:
 	{
-		struct msm_cvp_request_power *power =
-			(struct msm_cvp_request_power *)&arg->data.req_power;
+		struct cvp_kmd_request_power *power =
+			(struct cvp_kmd_request_power *)&arg->data.req_power;
 
 		rc = msm_cvp_request_power(inst, power);
 		break;
 	}
-	case MSM_CVP_REGISTER_BUFFER:
+	case CVP_KMD_REGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *buf =
-			(struct msm_cvp_buffer *)&arg->data.regbuf;
+		struct cvp_kmd_buffer *buf =
+			(struct cvp_kmd_buffer *)&arg->data.regbuf;
 
 		rc = msm_cvp_register_buffer(inst, buf);
 		break;
 	}
-	case MSM_CVP_UNREGISTER_BUFFER:
+	case CVP_KMD_UNREGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *buf =
-			(struct msm_cvp_buffer *)&arg->data.unregbuf;
+		struct cvp_kmd_buffer *buf =
+			(struct cvp_kmd_buffer *)&arg->data.unregbuf;
 
 		rc = msm_cvp_unregister_buffer(inst, buf);
 		break;
 	}
-	case MSM_CVP_HFI_SEND_CMD:
+	case CVP_KMD_HFI_SEND_CMD:
 	{
-		//struct msm_cvp_buffer *buf =
-		//(struct msm_cvp_buffer *)&arg->data.unregbuf;
-		struct msm_cvp_send_cmd *send_cmd =
-			(struct msm_cvp_send_cmd *)&arg->data.send_cmd;
+		struct cvp_kmd_send_cmd *send_cmd =
+			(struct cvp_kmd_send_cmd *)&arg->data.send_cmd;
 
 		rc = msm_cvp_send_cmd(inst, send_cmd);
 		break;
 	}
-	case MSM_CVP_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_RECEIVE_MSG_PKT:
 	{
-		struct msm_cvp_dfsconfig *dfsconfig =
-			(struct msm_cvp_dfsconfig *)&arg->data.dfsconfig;
-
-		rc = msm_cvp_session_cvp_dfs_config(inst, dfsconfig);
+		struct cvp_kmd_hfi_packet *out_pkt =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
+		rc = msm_cvp_session_receive_hfi(inst, out_pkt);
 		break;
 	}
-	case MSM_CVP_HFI_DFS_FRAME_CMD:
+	case CVP_KMD_SEND_CMD_PKT:
+	case CVP_KMD_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_HFI_DFS_FRAME_CMD:
+	case CVP_KMD_HFI_DME_CONFIG_CMD:
+	case CVP_KMD_HFI_DME_FRAME_CMD:
+	case CVP_KMD_HFI_PERSIST_CMD:
 	{
-		struct msm_cvp_dfsframe *dfsframe =
-			(struct msm_cvp_dfsframe *)&arg->data.dfsframe;
+		struct cvp_kmd_hfi_packet *in_pkt =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
 
-		rc = msm_cvp_session_cvp_dfs_frame(inst, dfsframe);
+		rc = msm_cvp_session_process_hfi(inst, in_pkt);
 		break;
 	}
-	case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
 	{
-		struct msm_cvp_dfsframe *dfsframe =
-			(struct msm_cvp_dfsframe *)&arg->data.dfsframe;
+		struct cvp_kmd_hfi_packet *dfs_frame =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
 
-		rc = msm_cvp_session_cvp_dfs_frame_response(inst, dfsframe);
+		rc = msm_cvp_session_cvp_dfs_frame_response(inst, dfs_frame);
+		break;
+	}
+	case CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE:
+	{
+		struct cvp_kmd_hfi_packet *dme_frame =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
+
+		rc = msm_cvp_session_cvp_dme_frame_response(inst, dme_frame);
+		break;
+	}
+	case CVP_KMD_HFI_PERSIST_CMD_RESPONSE:
+	{
+		struct cvp_kmd_hfi_packet *pbuf_cmd =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
+
+		rc = msm_cvp_session_cvp_persist_response(inst, pbuf_cmd);
+		break;
+	}
+	case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
+	{
+		rc = msm_cvp_session_process_hfifence(inst, arg);
 		break;
 	}
 	default:
@@ -693,65 +803,6 @@
 	return rc;
 }
 
-static struct msm_cvp_ctrl msm_cvp_ctrls[] = {
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE,
-		.name = "Secure mode",
-		.type = V4L2_CTRL_TYPE_BUTTON,
-		.minimum = 0,
-		.maximum = 1,
-		.default_value = 0,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-};
-
-int msm_cvp_control_init(struct msm_cvp_inst *inst,
-		const struct v4l2_ctrl_ops *ctrl_ops)
-{
-	return msm_cvp_comm_ctrl_init(inst, msm_cvp_ctrls,
-		ARRAY_SIZE(msm_cvp_ctrls), ctrl_ops);
-}
-
-int msm_cvp_session_pause(struct msm_cvp_inst *inst)
-{
-	int rc;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	rc = call_hfi_op(hdev, session_pause, (void *)inst->session);
-	if (rc)
-		dprintk(CVP_ERR, "%s: failed to pause inst %pK (%#x)\n",
-			__func__, inst, hash32_ptr(inst->session));
-
-	return rc;
-}
-
-int msm_cvp_session_resume(struct msm_cvp_inst *inst)
-{
-	int rc;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	rc = call_hfi_op(hdev, session_resume, (void *)inst->session);
-	if (rc)
-		dprintk(CVP_ERR, "%s: failed to resume inst %pK (%#x)\n",
-			__func__, inst, hash32_ptr(inst->session));
-
-	return rc;
-}
-
 int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
 {
 	int rc = 0;
@@ -770,7 +821,7 @@
 
 	mutex_lock(&inst->cvpbufs.lock);
 	list_for_each_entry_safe(cbuf, temp, &inst->cvpbufs.list, list) {
-		print_cvp_internal_buffer(CVP_ERR, "unregistered", inst, cbuf);
+		print_cvp_internal_buffer(CVP_DBG, "unregistered", inst, cbuf);
 		rc = msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
 		if (rc)
 			dprintk(CVP_ERR, "%s: unmap failed\n", __func__);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.h b/drivers/media/platform/msm/cvp/msm_cvp.h
index 56098ee..8b95b8a 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp.h
@@ -10,16 +10,8 @@
 #include "msm_cvp_common.h"
 #include "msm_cvp_clocks.h"
 #include "msm_cvp_debug.h"
-
-void cvp_handle_session_register_buffer_done(enum hal_command_response cmd,
-		void *resp);
-void cvp_handle_session_unregister_buffer_done(enum hal_command_response cmd,
-		void *resp);
-int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct msm_cvp_arg *arg);
+#include "msm_cvp_dsp.h"
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg);
 int msm_cvp_session_init(struct msm_cvp_inst *inst);
 int msm_cvp_session_deinit(struct msm_cvp_inst *inst);
-int msm_cvp_session_pause(struct msm_cvp_inst *inst);
-int msm_cvp_session_resume(struct msm_cvp_inst *inst);
-int msm_cvp_control_init(struct msm_cvp_inst *inst,
-		const struct v4l2_ctrl_ops *ctrl_ops);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
index aba906b..f22576b 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
@@ -14,18 +14,9 @@
 #define MSM_CVP_MIN_UBWC_COMPRESSION_RATIO (1 << 16)
 #define MSM_CVP_MAX_UBWC_COMPRESSION_RATIO (5 << 16)
 
-static unsigned long msm_cvp_calc_freq_ar50(struct msm_cvp_inst *inst,
-	u32 filled_len);
-static int msm_cvp_decide_work_mode_ar50(struct msm_cvp_inst *inst);
 static unsigned long msm_cvp_calc_freq(struct msm_cvp_inst *inst,
 	u32 filled_len);
 
-struct msm_cvp_core_ops cvp_core_ops_vpu4 = {
-	.calc_freq = msm_cvp_calc_freq_ar50,
-	.decide_work_route = NULL,
-	.decide_work_mode = msm_cvp_decide_work_mode_ar50,
-};
-
 struct msm_cvp_core_ops cvp_core_ops_vpu5 = {
 	.calc_freq = msm_cvp_calc_freq,
 	.decide_work_route = msm_cvp_decide_work_route,
@@ -75,23 +66,6 @@
 	return compression_ratio;
 }
 
-int msm_cvp_get_mbs_per_frame(struct msm_cvp_inst *inst)
-{
-	int height, width;
-
-	if (!inst->in_reconfig) {
-		height = max(inst->prop.height[CAPTURE_PORT],
-			inst->prop.height[OUTPUT_PORT]);
-		width = max(inst->prop.width[CAPTURE_PORT],
-			inst->prop.width[OUTPUT_PORT]);
-	} else {
-		height = inst->reconfig_height;
-		width = inst->reconfig_width;
-	}
-
-	return NUM_MBS_PER_FRAME(height, width);
-}
-
 static int msm_cvp_get_fps(struct msm_cvp_inst *inst)
 {
 	int fps;
@@ -105,94 +79,6 @@
 	return fps;
 }
 
-void cvp_update_recon_stats(struct msm_cvp_inst *inst,
-	struct recon_stats_type *recon_stats)
-{
-	struct recon_buf *binfo;
-	u32 CR = 0, CF = 0;
-	u32 frame_size;
-
-	CR = get_ubwc_compression_ratio(recon_stats->ubwc_stats_info);
-
-	frame_size = (msm_cvp_get_mbs_per_frame(inst) / (32 * 8) * 3) / 2;
-
-	if (frame_size)
-		CF = recon_stats->complexity_number / frame_size;
-	else
-		CF = MSM_CVP_MAX_UBWC_COMPLEXITY_FACTOR;
-
-	mutex_lock(&inst->reconbufs.lock);
-	list_for_each_entry(binfo, &inst->reconbufs.list, list) {
-		if (binfo->buffer_index ==
-				recon_stats->buffer_index) {
-			binfo->CR = CR;
-			binfo->CF = CF;
-		}
-	}
-	mutex_unlock(&inst->reconbufs.lock);
-}
-
-static int fill_dynamic_stats(struct msm_cvp_inst *inst,
-	struct cvp_bus_vote_data *vote_data)
-{
-	struct recon_buf *binfo, *nextb;
-	struct cvp_input_cr_data *temp, *next;
-	u32 min_cf = MSM_CVP_MAX_UBWC_COMPLEXITY_FACTOR, max_cf = 0;
-	u32 min_input_cr = MSM_CVP_MAX_UBWC_COMPRESSION_RATIO,
-		max_input_cr = 0;
-	u32 min_cr = MSM_CVP_MAX_UBWC_COMPRESSION_RATIO, max_cr = 0;
-
-	mutex_lock(&inst->reconbufs.lock);
-	list_for_each_entry_safe(binfo, nextb, &inst->reconbufs.list, list) {
-		if (binfo->CR) {
-			min_cr = min(min_cr, binfo->CR);
-			max_cr = max(max_cr, binfo->CR);
-		}
-		if (binfo->CF) {
-			min_cf = min(min_cf, binfo->CF);
-			max_cf = max(max_cf, binfo->CF);
-		}
-	}
-	mutex_unlock(&inst->reconbufs.lock);
-
-	mutex_lock(&inst->input_crs.lock);
-	list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) {
-		min_input_cr = min(min_input_cr, temp->input_cr);
-		max_input_cr = max(max_input_cr, temp->input_cr);
-	}
-	mutex_unlock(&inst->input_crs.lock);
-
-	/* Sanitize CF values from HW . */
-	max_cf = min_t(u32, max_cf, MSM_CVP_MAX_UBWC_COMPLEXITY_FACTOR);
-	min_cf = max_t(u32, min_cf, MSM_CVP_MIN_UBWC_COMPLEXITY_FACTOR);
-	max_cr = min_t(u32, max_cr, MSM_CVP_MAX_UBWC_COMPRESSION_RATIO);
-	min_cr = max_t(u32, min_cr, MSM_CVP_MIN_UBWC_COMPRESSION_RATIO);
-	max_input_cr = min_t(u32,
-		max_input_cr, MSM_CVP_MAX_UBWC_COMPRESSION_RATIO);
-	min_input_cr = max_t(u32,
-		min_input_cr, MSM_CVP_MIN_UBWC_COMPRESSION_RATIO);
-
-	vote_data->compression_ratio = min_cr;
-	vote_data->complexity_factor = max_cf;
-	vote_data->input_cr = min_input_cr;
-	vote_data->use_dpb_read = false;
-
-	/* Check if driver can vote for lower bus BW */
-	if (inst->clk_data.load < inst->clk_data.load_norm) {
-		vote_data->compression_ratio = max_cr;
-		vote_data->complexity_factor = min_cf;
-		vote_data->input_cr = max_input_cr;
-		vote_data->use_dpb_read = true;
-	}
-
-	dprintk(CVP_PROF,
-		"Input CR = %d Recon CR = %d Complexity Factor = %d\n",
-			vote_data->input_cr, vote_data->compression_ratio,
-			vote_data->complexity_factor);
-
-	return 0;
-}
-
 int msm_cvp_comm_vote_bus(struct msm_cvp_core *core)
 {
 	int rc = 0, vote_data_count = 0, i = 0;
@@ -207,6 +93,7 @@
 	}
 
 	if (!core->resources.bus_devfreq_on)
+		dprintk(CVP_WARN, "%s is not enabled for CVP!\n", __func__);
 		return 0;
 
 	hdev = core->device;
@@ -283,8 +170,6 @@
 		vote_data[i].lcu_size = (codec == V4L2_PIX_FMT_HEVC ||
 				codec == V4L2_PIX_FMT_VP9) ? 32 : 16;
 		vote_data[i].b_frames_enabled = false;
-			//msm_cvp_comm_g_ctrl_for_id(inst,
-				//V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES) != 0;
 
 		vote_data[i].fps = msm_cvp_get_fps(inst);
 		if (inst->session_type == MSM_CVP_ENCODER) {
@@ -306,21 +191,14 @@
 
 		if (msm_cvp_comm_get_stream_output_mode(inst) ==
 				HAL_VIDEO_DECODER_PRIMARY) {
-			vote_data[i].color_formats[0] =
-				msm_cvp_comm_get_hal_uncompressed(
-				inst->clk_data.opb_fourcc);
+			vote_data[i].color_formats[0] = HAL_UNUSED_COLOR;
 			vote_data[i].num_formats = 1;
 		} else {
-			vote_data[i].color_formats[0] =
-				msm_cvp_comm_get_hal_uncompressed(
-				inst->clk_data.dpb_fourcc);
-			vote_data[i].color_formats[1] =
-				msm_cvp_comm_get_hal_uncompressed(
-				inst->clk_data.opb_fourcc);
+			vote_data[i].color_formats[0] = HAL_UNUSED_COLOR;
+			vote_data[i].color_formats[1] = HAL_UNUSED_COLOR;
 			vote_data[i].num_formats = 2;
 		}
 		vote_data[i].work_mode = inst->clk_data.work_mode;
-		fill_dynamic_stats(inst, &vote_data[i]);
 
 		if (core->resources.sys_cache_res_set)
 			vote_data[i].use_sys_cache = true;
@@ -455,21 +333,6 @@
 	mutex_unlock(&inst->freqs.lock);
 }
 
-void msm_cvp_clear_freq_entry(struct msm_cvp_inst *inst,
-	u32 device_addr)
-{
-	struct cvp_freq_data *temp, *next;
-
-	mutex_lock(&inst->freqs.lock);
-	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
-		if (temp->device_addr == device_addr)
-			temp->freq = 0;
-	}
-	mutex_unlock(&inst->freqs.lock);
-
-	inst->clk_data.buffer_counter++;
-}
-
 static unsigned long msm_cvp_max_freq(struct msm_cvp_core *core)
 {
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
@@ -494,122 +357,6 @@
 	mutex_unlock(&inst->freqs.lock);
 }
 
-void msm_cvp_comm_free_input_cr_table(struct msm_cvp_inst *inst)
-{
-	struct cvp_input_cr_data *temp, *next;
-
-	mutex_lock(&inst->input_crs.lock);
-	list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) {
-		list_del(&temp->list);
-		kfree(temp);
-	}
-	INIT_LIST_HEAD(&inst->input_crs.list);
-	mutex_unlock(&inst->input_crs.lock);
-}
-
-void msm_cvp_comm_update_input_cr(struct msm_cvp_inst *inst,
-	u32 index, u32 cr)
-{
-	struct cvp_input_cr_data *temp, *next;
-	bool found = false;
-
-	mutex_lock(&inst->input_crs.lock);
-	list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) {
-		if (temp->index == index) {
-			temp->input_cr = cr;
-			found = true;
-			break;
-		}
-	}
-
-	if (!found) {
-		temp = kzalloc(sizeof(*temp), GFP_KERNEL);
-		if (!temp)  {
-			dprintk(CVP_WARN, "%s: malloc failure.\n", __func__);
-			goto exit;
-		}
-		temp->index = index;
-		temp->input_cr = cr;
-		list_add_tail(&temp->list, &inst->input_crs.list);
-	}
-exit:
-	mutex_unlock(&inst->input_crs.lock);
-}
-
-static unsigned long msm_cvp_calc_freq_ar50(struct msm_cvp_inst *inst,
-	u32 filled_len)
-{
-	unsigned long freq = 0;
-	unsigned long vpp_cycles = 0, vsp_cycles = 0;
-	u32 vpp_cycles_per_mb;
-	u32 mbs_per_second;
-	struct msm_cvp_core *core = NULL;
-	int i = 0;
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	u64 rate = 0, fps;
-	struct clock_data *dcvs = NULL;
-
-	core = inst->core;
-	dcvs = &inst->clk_data;
-
-	mbs_per_second = msm_cvp_comm_get_inst_load_per_core(inst,
-		LOAD_CALC_NO_QUIRKS);
-
-	fps = msm_cvp_get_fps(inst);
-
-	/*
-	 * Calculate vpp, vsp cycles separately for encoder and decoder.
-	 * Even though, most part is common now, in future it may change
-	 * between them.
-	 */
-
-	if (inst->session_type == MSM_CVP_ENCODER) {
-		vpp_cycles_per_mb = inst->flags & CVP_LOW_POWER ?
-			inst->clk_data.entry->low_power_cycles :
-			inst->clk_data.entry->vpp_cycles;
-
-		vpp_cycles = mbs_per_second * vpp_cycles_per_mb;
-
-		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
-
-		/* 10 / 7 is overhead factor */
-		vsp_cycles += (inst->clk_data.bitrate * 10) / 7;
-	} else if (inst->session_type == MSM_CVP_DECODER) {
-		vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles;
-
-		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
-		/* 10 / 7 is overhead factor */
-		vsp_cycles += ((fps * filled_len * 8) * 10) / 7;
-
-	} else {
-		dprintk(CVP_ERR, "Unknown session type = %s\n", __func__);
-		return msm_cvp_max_freq(inst->core);
-	}
-
-	freq = max(vpp_cycles, vsp_cycles);
-
-	dprintk(CVP_DBG, "Update DCVS Load\n");
-	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
-		rate = allowed_clks_tbl[i].clock_rate;
-		if (rate >= freq)
-			break;
-	}
-
-	dcvs->load_norm = rate;
-	dcvs->load_low = i < (core->resources.allowed_clks_tbl_size - 1) ?
-		allowed_clks_tbl[i+1].clock_rate : dcvs->load_norm;
-	dcvs->load_high = i > 0 ? allowed_clks_tbl[i-1].clock_rate :
-		dcvs->load_norm;
-
-	msm_dcvs_print_dcvs_stats(dcvs);
-
-	dprintk(CVP_PROF, "%s Inst %pK : Filled Len = %d Freq = %lu\n",
-		__func__, inst, filled_len, freq);
-
-	return freq;
-}
-
 static unsigned long msm_cvp_calc_freq(struct msm_cvp_inst *inst,
 	u32 filled_len)
 {
@@ -717,7 +464,7 @@
 	 * keep checking from lowest to highest rate until
 	 * table rate >= requested rate
 	 */
-	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
+	for (i = 0; i < core->resources.allowed_clks_tbl_size;  i++) {
 		rate = allowed_clks_tbl[i].clock_rate;
 		if (rate >= freq_core_max)
 			break;
@@ -744,69 +491,6 @@
 	return rc;
 }
 
-int msm_cvp_validate_operating_rate(struct msm_cvp_inst *inst,
-	u32 operating_rate)
-{
-	struct msm_cvp_inst *temp;
-	struct msm_cvp_core *core;
-	unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
-	unsigned long mbs_per_second;
-	int rc = 0;
-	u32 curr_operating_rate = 0;
-
-	if (!inst || !inst->core) {
-		dprintk(CVP_ERR, "%s Invalid args\n", __func__);
-		return -EINVAL;
-	}
-	core = inst->core;
-	curr_operating_rate = inst->clk_data.operating_rate >> 16;
-
-	mutex_lock(&core->lock);
-	max_freq = msm_cvp_max_freq(core);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp == inst ||
-				temp->state < MSM_CVP_START_DONE ||
-				temp->state >= MSM_CVP_RELEASE_RESOURCES_DONE)
-			continue;
-
-		freq += temp->clk_data.min_freq;
-	}
-
-	freq_left = max_freq - freq;
-
-	mbs_per_second = msm_cvp_comm_get_inst_load_per_core(inst,
-		LOAD_CALC_NO_QUIRKS);
-
-	cycles = inst->clk_data.entry->vpp_cycles;
-	if (inst->session_type == MSM_CVP_ENCODER)
-		cycles = inst->flags & CVP_LOW_POWER ?
-			inst->clk_data.entry->low_power_cycles :
-			cycles;
-
-	load = cycles * mbs_per_second;
-
-	ops_left = load ? (freq_left / load) : 0;
-
-	operating_rate = operating_rate >> 16;
-
-	if ((curr_operating_rate * (1 + ops_left)) >= operating_rate ||
-			msm_cvp_clock_voting ||
-			inst->clk_data.buffer_counter < DCVS_FTB_WINDOW) {
-		dprintk(CVP_DBG,
-			"Requestd operating rate is valid %u\n",
-			operating_rate);
-		rc = 0;
-	} else {
-		dprintk(CVP_DBG,
-			"Current load is high for requested settings. Cannot set operating rate to %u\n",
-			operating_rate);
-		rc = -EINVAL;
-	}
-	mutex_unlock(&core->lock);
-
-	return rc;
-}
-
 int msm_cvp_comm_scale_clocks(struct msm_cvp_inst *inst)
 {
 	struct msm_video_buffer *temp, *next;
@@ -821,8 +505,10 @@
 		return -EINVAL;
 	}
 
-	if (!inst->core->resources.bus_devfreq_on)
+	if (!inst->core->resources.bus_devfreq_on) {
+		dprintk(CVP_WARN, "%s is not enabled for CVP!\n", __func__);
 		return 0;
+	}
 
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
@@ -911,9 +597,6 @@
 
 int msm_cvp_comm_init_clocks_and_bus_data(struct msm_cvp_inst *inst)
 {
-	int rc = 0, j = 0;
-	int fourcc, count;
-
 	if (!inst || !inst->core) {
 		dprintk(CVP_ERR, "%s Invalid args: Inst = %pK\n",
 				__func__, inst);
@@ -925,115 +608,7 @@
 		return 0;
 	}
 
-	count = inst->core->resources.codec_data_count;
-	fourcc = inst->session_type == MSM_CVP_DECODER ?
-		inst->fmts[OUTPUT_PORT].fourcc :
-		inst->fmts[CAPTURE_PORT].fourcc;
-
-	for (j = 0; j < count; j++) {
-		if (inst->core->resources.codec_data[j].session_type ==
-				inst->session_type &&
-				inst->core->resources.codec_data[j].fourcc ==
-				fourcc) {
-			inst->clk_data.entry =
-				&inst->core->resources.codec_data[j];
-			break;
-		}
-	}
-
-	if (!inst->clk_data.entry) {
-		dprintk(CVP_ERR, "%s No match found\n", __func__);
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
-
-void msm_cvp_clock_data_reset(struct msm_cvp_inst *inst)
-{
-	struct msm_cvp_core *core;
-	int i = 0, rc = 0;
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	u64 total_freq = 0, rate = 0, load;
-	int cycles;
-	struct clock_data *dcvs;
-	struct hal_buffer_requirements *buf_req;
-
-	dprintk(CVP_DBG, "Init DCVS Load\n");
-
-	if (!inst || !inst->core) {
-		dprintk(CVP_ERR, "%s Invalid args: Inst = %pK\n",
-			__func__, inst);
-		return;
-	}
-
-	core = inst->core;
-	dcvs = &inst->clk_data;
-	load = msm_cvp_comm_get_inst_load_per_core(inst, LOAD_CALC_NO_QUIRKS);
-	cycles = inst->clk_data.entry->vpp_cycles;
-	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	if (inst->session_type == MSM_CVP_ENCODER) {
-		cycles = inst->flags & CVP_LOW_POWER ?
-			inst->clk_data.entry->low_power_cycles :
-			cycles;
-
-		dcvs->buffer_type = HAL_BUFFER_INPUT;
-		dcvs->min_threshold =
-			msm_cvp_get_extra_buff_count(inst, HAL_BUFFER_INPUT);
-		buf_req = get_cvp_buff_req_buffer(inst, HAL_BUFFER_INPUT);
-		if (buf_req)
-			dcvs->max_threshold =
-				buf_req->buffer_count_actual -
-				buf_req->buffer_count_min_host + 2;
-		else
-			dprintk(CVP_ERR,
-				"%s: No bufer req for buffer type %x\n",
-				__func__, HAL_BUFFER_INPUT);
-
-	} else if (inst->session_type == MSM_CVP_DECODER) {
-		dcvs->buffer_type = msm_cvp_comm_get_hal_output_buffer(inst);
-		buf_req = get_cvp_buff_req_buffer(inst, dcvs->buffer_type);
-		if (buf_req)
-			dcvs->max_threshold =
-				buf_req->buffer_count_actual -
-				buf_req->buffer_count_min_host + 2;
-		else
-			dprintk(CVP_ERR,
-				"%s: No bufer req for buffer type %x\n",
-				__func__, dcvs->buffer_type);
-
-		dcvs->min_threshold =
-			msm_cvp_get_extra_buff_count(inst, dcvs->buffer_type);
-	} else {
-		dprintk(CVP_ERR, "%s: invalid session type %#x\n",
-			__func__, inst->session_type);
-		return;
-	}
-
-	total_freq = cycles * load;
-
-	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
-		rate = allowed_clks_tbl[i].clock_rate;
-		if (rate >= total_freq)
-			break;
-	}
-
-	dcvs->load = dcvs->load_norm = rate;
-
-	dcvs->load_low = i < (core->resources.allowed_clks_tbl_size - 1) ?
-		allowed_clks_tbl[i+1].clock_rate : dcvs->load_norm;
-	dcvs->load_high = i > 0 ? allowed_clks_tbl[i-1].clock_rate :
-		dcvs->load_norm;
-
-	inst->clk_data.buffer_counter = 0;
-
-	msm_dcvs_print_dcvs_stats(dcvs);
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-
-	if (rc)
-		dprintk(CVP_ERR, "%s Failed to scale Clocks and Bus\n",
-			__func__);
+	return 0;
 }
 
 static bool is_output_buffer(struct msm_cvp_inst *inst,
@@ -1093,340 +668,14 @@
 	return -EINVAL;
 }
 
-static int msm_cvp_decide_work_mode_ar50(struct msm_cvp_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct hal_video_work_mode pdata;
-	struct hal_enable latency;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR,
-			"%s Invalid args: Inst = %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-	if (inst->clk_data.low_latency_mode) {
-		pdata.video_work_mode = CVP_WORK_MODE_1;
-		goto decision_done;
-	}
-
-	if (inst->session_type == MSM_CVP_DECODER) {
-		pdata.video_work_mode = CVP_WORK_MODE_2;
-		switch (inst->fmts[OUTPUT_PORT].fourcc) {
-		case V4L2_PIX_FMT_MPEG2:
-			pdata.video_work_mode = CVP_WORK_MODE_1;
-			break;
-		case V4L2_PIX_FMT_H264:
-		case V4L2_PIX_FMT_HEVC:
-			if (inst->prop.height[OUTPUT_PORT] *
-				inst->prop.width[OUTPUT_PORT] <=
-					1280 * 720)
-				pdata.video_work_mode = CVP_WORK_MODE_1;
-			break;
-		}
-	} else if (inst->session_type == MSM_CVP_ENCODER)
-		pdata.video_work_mode = CVP_WORK_MODE_1;
-	else {
-		return -EINVAL;
-	}
-
-decision_done:
-
-	inst->clk_data.work_mode = pdata.video_work_mode;
-	rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session, HAL_PARAM_VIDEO_WORK_MODE,
-			(void *)&pdata);
-	if (rc)
-		dprintk(CVP_WARN,
-				" Failed to configure Work Mode %pK\n", inst);
-
-	/* For WORK_MODE_1, set Low Latency mode by default to HW. */
-
-	if (inst->session_type == MSM_CVP_ENCODER &&
-			inst->clk_data.work_mode == CVP_WORK_MODE_1) {
-		latency.enable = true;
-		rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session, HAL_PARAM_VENC_LOW_LATENCY,
-			(void *)&latency);
-	}
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-
-	return rc;
-}
-
 int msm_cvp_decide_work_mode(struct msm_cvp_inst *inst)
 {
 	return -EINVAL;
 }
 
-static inline int msm_cvp_power_save_mode_enable(struct msm_cvp_inst *inst,
-	bool enable)
-{
-	u32 rc = 0, mbs_per_frame;
-	u32 prop_id = 0;
-	void *pdata = NULL;
-	struct hfi_device *hdev = NULL;
-	enum hal_perf_mode venc_mode;
-	u32 rc_mode = 0;
-
-	hdev = inst->core->device;
-	if (inst->session_type != MSM_CVP_ENCODER) {
-		dprintk(CVP_DBG,
-			"%s : Not an encoder session. Nothing to do\n",
-				__func__);
-		return 0;
-	}
-	mbs_per_frame = msm_cvp_get_mbs_per_frame(inst);
-	if (mbs_per_frame > inst->core->resources.max_hq_mbs_per_frame ||
-		msm_cvp_get_fps(inst) > inst->core->resources.max_hq_fps) {
-		enable = true;
-	}
-	/* Power saving always disabled for CQ RC mode. */
-	rc_mode = msm_cvp_comm_g_ctrl_for_id(inst,
-		V4L2_CID_MPEG_VIDEO_BITRATE_MODE);
-	if (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
-		enable = false;
-
-	prop_id = HAL_CONFIG_VENC_PERF_MODE;
-	venc_mode = enable ? HAL_PERF_MODE_POWER_SAVE :
-		HAL_PERF_MODE_POWER_MAX_QUALITY;
-	pdata = &venc_mode;
-	rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session, prop_id, pdata);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: Failed to set power save mode for inst: %pK\n",
-			__func__, inst);
-		goto fail_power_mode_set;
-	}
-	inst->flags = enable ?
-		inst->flags | CVP_LOW_POWER :
-		inst->flags & ~CVP_LOW_POWER;
-
-	dprintk(CVP_PROF,
-		"Power Save Mode for inst: %pK Enable = %d\n", inst, enable);
-fail_power_mode_set:
-	return rc;
-}
-
-static int msm_cvp_move_core_to_power_save_mode(struct msm_cvp_core *core,
-	u32 core_id)
-{
-	struct msm_cvp_inst *inst = NULL;
-
-	dprintk(CVP_PROF, "Core %d : Moving all inst to LP mode\n", core_id);
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-		if (inst->clk_data.core_id == core_id &&
-			inst->session_type == MSM_CVP_ENCODER)
-			msm_cvp_power_save_mode_enable(inst, true);
-	}
-	mutex_unlock(&core->lock);
-
-	return 0;
-}
-
-static u32 get_core_load(struct msm_cvp_core *core,
-	u32 core_id, bool lp_mode, bool real_time)
-{
-	struct msm_cvp_inst *inst = NULL;
-	u32 current_inst_mbs_per_sec = 0, load = 0;
-	bool real_time_mode = false;
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-		u32 cycles, lp_cycles;
-
-		real_time_mode = inst->flags & CVP_REALTIME ? true : false;
-		if (!(inst->clk_data.core_id & core_id))
-			continue;
-		if (real_time_mode != real_time)
-			continue;
-		if (inst->session_type == MSM_CVP_DECODER) {
-			cycles = lp_cycles = inst->clk_data.entry->vpp_cycles;
-		} else if (inst->session_type == MSM_CVP_ENCODER) {
-			lp_mode |= inst->flags & CVP_LOW_POWER;
-			cycles = lp_mode ?
-				inst->clk_data.entry->low_power_cycles :
-				inst->clk_data.entry->vpp_cycles;
-		} else {
-			continue;
-		}
-		current_inst_mbs_per_sec =
-			msm_cvp_comm_get_inst_load_per_core(inst,
-			LOAD_CALC_NO_QUIRKS);
-		load += current_inst_mbs_per_sec * cycles /
-			inst->clk_data.work_route;
-	}
-	mutex_unlock(&core->lock);
-
-	return load;
-}
-
-int msm_cvp_decide_core_and_power_mode(
-	struct msm_cvp_inst *inst)
-{
-	int rc = 0, hier_mode = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_core *core;
-	unsigned long max_freq, lp_cycles = 0;
-	struct hal_videocores_usage_info core_info;
-	u32 core0_load = 0, core1_load = 0, core0_lp_load = 0,
-		core1_lp_load = 0;
-	u32 current_inst_load = 0, current_inst_lp_load = 0,
-		min_load = 0, min_lp_load = 0;
-	u32 min_core_id, min_lp_core_id;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR,
-			"%s Invalid args: Inst = %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	core = inst->core;
-	hdev = core->device;
-	max_freq = msm_cvp_max_freq(inst->core);
-	inst->clk_data.core_id = 0;
-
-	core0_load = get_core_load(core, CVP_CORE_ID_1, false, true);
-	core1_load = get_core_load(core, CVP_CORE_ID_2, false, true);
-	core0_lp_load = get_core_load(core, CVP_CORE_ID_1, true, true);
-	core1_lp_load = get_core_load(core, CVP_CORE_ID_2, true, true);
-
-	min_load = min(core0_load, core1_load);
-	min_core_id = core0_load < core1_load ?
-		CVP_CORE_ID_1 : CVP_CORE_ID_2;
-	min_lp_load = min(core0_lp_load, core1_lp_load);
-	min_lp_core_id = core0_lp_load < core1_lp_load ?
-		CVP_CORE_ID_1 : CVP_CORE_ID_2;
-
-	lp_cycles = inst->session_type == MSM_CVP_ENCODER ?
-			inst->clk_data.entry->low_power_cycles :
-			inst->clk_data.entry->vpp_cycles;
-	/*
-	 * Incase there is only 1 core enabled, mark it as the core
-	 * with min load. This ensures that this core is selected and
-	 * video session is set to run on the enabled core.
-	 */
-	if (inst->capability.max_video_cores.max <= CVP_CORE_ID_1) {
-		min_core_id = min_lp_core_id = CVP_CORE_ID_1;
-		min_load = core0_load;
-		min_lp_load = core0_lp_load;
-	}
-
-	current_inst_load =
-		(msm_cvp_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS) *
-		inst->clk_data.entry->vpp_cycles)/inst->clk_data.work_route;
-
-	current_inst_lp_load = (msm_cvp_comm_get_inst_load(inst,
-		LOAD_CALC_NO_QUIRKS) * lp_cycles)/inst->clk_data.work_route;
-
-	dprintk(CVP_DBG, "Core 0 RT Load = %d Core 1 RT Load = %d\n",
-		 core0_load, core1_load);
-	dprintk(CVP_DBG, "Core 0 RT LP Load = %d\n",
-		core0_lp_load);
-	dprintk(CVP_DBG, "Core 1 RT LP Load = %d\n",
-		core1_lp_load);
-	dprintk(CVP_DBG, "Max Load = %lu\n", max_freq);
-	dprintk(CVP_DBG, "Current Load = %d Current LP Load = %d\n",
-		current_inst_load, current_inst_lp_load);
-
-	/* Hier mode can be normal HP or Hybrid HP. */
-
-	hier_mode = 0; // msm_cvp_comm_g_ctrl_for_id(inst,
-		// V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS);
-	hier_mode |= 0; //msm_cvp_comm_g_ctrl_for_id(inst,
-		//V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE);
-
-	if (current_inst_load + min_load < max_freq) {
-		inst->clk_data.core_id = min_core_id;
-		dprintk(CVP_DBG,
-			"Selected normally : Core ID = %d\n",
-				inst->clk_data.core_id);
-		msm_cvp_power_save_mode_enable(inst, false);
-	} else if (current_inst_lp_load + min_load < max_freq) {
-		/* Move current instance to LP and return */
-		inst->clk_data.core_id = min_core_id;
-		dprintk(CVP_DBG,
-			"Selected by moving current to LP : Core ID = %d\n",
-				inst->clk_data.core_id);
-		msm_cvp_power_save_mode_enable(inst, true);
-
-	} else if (current_inst_lp_load + min_lp_load < max_freq) {
-		/* Move all instances to LP mode and return */
-		inst->clk_data.core_id = min_lp_core_id;
-		dprintk(CVP_DBG,
-			"Moved all inst's to LP: Core ID = %d\n",
-				inst->clk_data.core_id);
-		msm_cvp_move_core_to_power_save_mode(core, min_lp_core_id);
-	} else {
-		rc = -EINVAL;
-		dprintk(CVP_ERR,
-			"Sorry ... Core Can't support this load\n");
-		return rc;
-	}
-
-	core_info.video_core_enable_mask = inst->clk_data.core_id;
-	dprintk(CVP_DBG,
-		"Core Enable Mask %d\n", core_info.video_core_enable_mask);
-
-	rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session,
-			HAL_PARAM_VIDEO_CORES_USAGE, &core_info);
-	if (rc)
-		dprintk(CVP_WARN,
-				" Failed to configure CORE ID %pK\n", inst);
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-
-	msm_cvp_print_core_status(core, CVP_CORE_ID_1);
-	msm_cvp_print_core_status(core, CVP_CORE_ID_2);
-
-	return rc;
-}
-
 void msm_cvp_init_core_clk_ops(struct msm_cvp_core *core)
 {
 	if (!core)
 		return;
-
-	if (core->platform_data->vpu_ver == VPU_VERSION_4)
-		core->core_ops = &cvp_core_ops_vpu4;
-	else
-		core->core_ops = &cvp_core_ops_vpu5;
-}
-
-void msm_cvp_print_core_status(struct msm_cvp_core *core, u32 core_id)
-{
-	struct msm_cvp_inst *inst = NULL;
-
-	dprintk(CVP_PROF, "Instances running on core %u", core_id);
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-
-		if ((inst->clk_data.core_id != core_id) &&
-			(inst->clk_data.core_id != CVP_CORE_ID_3))
-			continue;
-
-		dprintk(CVP_PROF,
-			"inst %pK (%4ux%4u) to (%4ux%4u) %3u %s %s %s %s %lu\n",
-			inst,
-			inst->prop.width[OUTPUT_PORT],
-			inst->prop.height[OUTPUT_PORT],
-			inst->prop.width[CAPTURE_PORT],
-			inst->prop.height[CAPTURE_PORT],
-			inst->prop.fps,
-			inst->session_type == MSM_CVP_ENCODER ? "ENC" : "DEC",
-			inst->clk_data.work_mode == CVP_WORK_MODE_1 ?
-				"WORK_MODE_1" : "WORK_MODE_2",
-			inst->flags & CVP_LOW_POWER ? "LP" : "HQ",
-			inst->flags & CVP_REALTIME ? "RealTime" : "NonRTime",
-			inst->clk_data.min_freq);
-	}
-	mutex_unlock(&core->lock);
+	core->core_ops = &cvp_core_ops_vpu5;
 }
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_clocks.h b/drivers/media/platform/msm/cvp/msm_cvp_clocks.h
index a6f6e5f..9f20946 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_clocks.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_clocks.h
@@ -14,28 +14,15 @@
 /* extra o/p buffers in case of decoder dcvs */
 #define DCVS_DEC_EXTRA_OUTPUT_BUFFERS 4
 
-void msm_cvp_clock_data_reset(struct msm_cvp_inst *inst);
-int msm_cvp_validate_operating_rate(struct msm_cvp_inst *inst,
-	u32 operating_rate);
 int msm_cvp_get_extra_buff_count(struct msm_cvp_inst *inst,
 	enum hal_buffer buffer_type);
 int msm_cvp_set_clocks(struct msm_cvp_core *core);
 int msm_cvp_comm_vote_bus(struct msm_cvp_core *core);
 int msm_cvp_dcvs_try_enable(struct msm_cvp_inst *inst);
-int msm_cvp_get_mbs_per_frame(struct msm_cvp_inst *inst);
 int msm_cvp_comm_scale_clocks_and_bus(struct msm_cvp_inst *inst);
 int msm_cvp_comm_init_clocks_and_bus_data(struct msm_cvp_inst *inst);
 void msm_cvp_comm_free_freq_table(struct msm_cvp_inst *inst);
 int msm_cvp_decide_work_route(struct msm_cvp_inst *inst);
 int msm_cvp_decide_work_mode(struct msm_cvp_inst *inst);
-int msm_cvp_decide_core_and_power_mode(struct msm_cvp_inst *inst);
-void msm_cvp_print_core_status(struct msm_cvp_core *core, u32 core_id);
-void msm_cvp_clear_freq_entry(struct msm_cvp_inst *inst,
-	u32 device_addr);
-void msm_cvp_comm_free_input_cr_table(struct msm_cvp_inst *inst);
-void msm_cvp_comm_update_input_cr(struct msm_cvp_inst *inst, u32 index,
-	u32 cr);
-void cvp_update_recon_stats(struct msm_cvp_inst *inst,
-	struct recon_stats_type *recon_stats);
 void msm_cvp_init_core_clk_ops(struct msm_cvp_core *core);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index 2e97c3e..dff1615 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -20,236 +20,7 @@
 	(__p >= __d)\
 )
 
-#define V4L2_EVENT_SEQ_CHANGED_SUFFICIENT \
-		V4L2_EVENT_MSM_CVP_PORT_SETTINGS_CHANGED_SUFFICIENT
-#define V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT \
-		V4L2_EVENT_MSM_CVP_PORT_SETTINGS_CHANGED_INSUFFICIENT
-#define V4L2_EVENT_RELEASE_BUFFER_REFERENCE \
-		V4L2_EVENT_MSM_CVP_RELEASE_BUFFER_REFERENCE
-#define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
-
-const char *const mpeg_video_cvp_extradata[] = {
-	"Extradata none",
-	"Extradata MB Quantization",
-	"Extradata Interlace Video",
-	"Reserved",
-	"Reserved",
-	"Extradata timestamp",
-	"Extradata S3D Frame Packing",
-	"Extradata Frame Rate",
-	"Extradata Panscan Window",
-	"Extradata Recovery point SEI",
-	"Extradata Multislice info",
-	"Extradata number of concealed MB",
-	"Extradata metadata filler",
-	"Extradata input crop",
-	"Extradata digital zoom",
-	"Extradata aspect ratio",
-	"Extradata mpeg2 seqdisp",
-	"Extradata stream userdata",
-	"Extradata frame QP",
-	"Extradata frame bits info",
-	"Extradata LTR",
-	"Extradata macroblock metadata",
-	"Extradata VQZip SEI",
-	"Extradata HDR10+ Metadata",
-	"Extradata ROI QP",
-	"Extradata output crop",
-	"Extradata display colour SEI",
-	"Extradata light level SEI",
-	"Extradata PQ Info",
-	"Extradata display VUI",
-	"Extradata vpx color space",
-	"Extradata UBWC CR stats info",
-};
-
 static void handle_session_error(enum hal_command_response cmd, void *data);
-static void msm_cvp_print_running_insts(struct msm_cvp_core *core);
-
-int msm_cvp_comm_g_ctrl_for_id(struct msm_cvp_inst *inst, int id)
-{
-	int rc = 0;
-	struct v4l2_control ctrl = {
-		.id = id,
-	};
-
-	rc = msm_comm_g_ctrl(inst, &ctrl);
-	return rc ? rc : ctrl.value;
-}
-
-static struct v4l2_ctrl **get_super_cluster(struct msm_cvp_inst *inst,
-				int num_ctrls)
-{
-	int c = 0;
-	struct v4l2_ctrl **cluster = kmalloc(sizeof(struct v4l2_ctrl *) *
-			num_ctrls, GFP_KERNEL);
-
-	if (!cluster || !inst) {
-		kfree(cluster);
-		return NULL;
-	}
-
-	for (c = 0; c < num_ctrls; c++)
-		cluster[c] =  inst->ctrls[c];
-
-	return cluster;
-}
-
-int msm_cvp_comm_hal_to_v4l2(int id, int value)
-{
-	dprintk(CVP_WARN, "Unknown control (%x, %d)\n", id, value);
-	return -EINVAL;
-}
-
-int msm_cvp_comm_get_v4l2_profile(int fourcc, int profile)
-{
-	dprintk(CVP_DBG, "%s : Begin\n", __func__);
-	return -EINVAL;
-}
-
-int msm_cvp_comm_get_v4l2_level(int fourcc, int level)
-{
-	switch (fourcc) {
-	default:
-		dprintk(CVP_WARN, "Unknown codec id %x\n", fourcc);
-		return 0;
-	}
-}
-
-int msm_cvp_comm_ctrl_init(struct msm_cvp_inst *inst,
-		struct msm_cvp_ctrl *drv_ctrls, u32 num_ctrls,
-		const struct v4l2_ctrl_ops *ctrl_ops)
-{
-	int idx = 0;
-	struct v4l2_ctrl_config ctrl_cfg = {0};
-	int ret_val = 0;
-
-	if (!inst || !drv_ctrls || !ctrl_ops || !num_ctrls) {
-		dprintk(CVP_ERR, "%s - invalid input\n", __func__);
-		return -EINVAL;
-	}
-
-	inst->ctrls = kcalloc(num_ctrls, sizeof(struct v4l2_ctrl *),
-				GFP_KERNEL);
-	if (!inst->ctrls) {
-		dprintk(CVP_ERR, "%s - failed to allocate ctrl\n", __func__);
-		return -ENOMEM;
-	}
-
-	ret_val = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls);
-
-	if (ret_val) {
-		dprintk(CVP_ERR, "CTRL ERR: Control handler init failed, %d\n",
-				inst->ctrl_handler.error);
-		return ret_val;
-	}
-
-	for (; idx < num_ctrls; idx++) {
-		struct v4l2_ctrl *ctrl = NULL;
-
-		if (IS_PRIV_CTRL(drv_ctrls[idx].id)) {
-			/*add private control*/
-			ctrl_cfg.def = drv_ctrls[idx].default_value;
-			ctrl_cfg.flags = 0;
-			ctrl_cfg.id = drv_ctrls[idx].id;
-			ctrl_cfg.max = drv_ctrls[idx].maximum;
-			ctrl_cfg.min = drv_ctrls[idx].minimum;
-			ctrl_cfg.menu_skip_mask =
-				drv_ctrls[idx].menu_skip_mask;
-			ctrl_cfg.name = drv_ctrls[idx].name;
-			ctrl_cfg.ops = ctrl_ops;
-			ctrl_cfg.step = drv_ctrls[idx].step;
-			ctrl_cfg.type = drv_ctrls[idx].type;
-			ctrl_cfg.qmenu = drv_ctrls[idx].qmenu;
-
-			ctrl = v4l2_ctrl_new_custom(&inst->ctrl_handler,
-					&ctrl_cfg, NULL);
-		} else {
-			if (drv_ctrls[idx].type == V4L2_CTRL_TYPE_MENU) {
-				ctrl = v4l2_ctrl_new_std_menu(
-					&inst->ctrl_handler,
-					ctrl_ops,
-					drv_ctrls[idx].id,
-					drv_ctrls[idx].maximum,
-					drv_ctrls[idx].menu_skip_mask,
-					drv_ctrls[idx].default_value);
-			} else {
-				ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler,
-					ctrl_ops,
-					drv_ctrls[idx].id,
-					drv_ctrls[idx].minimum,
-					drv_ctrls[idx].maximum,
-					drv_ctrls[idx].step,
-					drv_ctrls[idx].default_value);
-			}
-		}
-
-		if (!ctrl) {
-			dprintk(CVP_ERR, "%s - invalid ctrl %s\n", __func__,
-				 drv_ctrls[idx].name);
-			return -EINVAL;
-		}
-
-		ret_val = inst->ctrl_handler.error;
-		if (ret_val) {
-			dprintk(CVP_ERR,
-				"Error adding ctrl (%s) to ctrl handle, %d\n",
-				drv_ctrls[idx].name, inst->ctrl_handler.error);
-			return ret_val;
-		}
-
-		ctrl->flags |= drv_ctrls[idx].flags;
-		inst->ctrls[idx] = ctrl;
-	}
-
-	/* Construct a super cluster of all controls */
-	inst->cluster = get_super_cluster(inst, num_ctrls);
-	if (!inst->cluster) {
-		dprintk(CVP_WARN,
-			"Failed to setup super cluster\n");
-		return -EINVAL;
-	}
-
-	v4l2_ctrl_cluster(num_ctrls, inst->cluster);
-
-	return ret_val;
-}
-
-int msm_cvp_comm_ctrl_deinit(struct msm_cvp_inst *inst)
-{
-	if (!inst) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	kfree(inst->ctrls);
-	kfree(inst->cluster);
-	v4l2_ctrl_handler_free(&inst->ctrl_handler);
-
-	return 0;
-}
-
-int msm_cvp_comm_set_stream_output_mode(struct msm_cvp_inst *inst,
-		enum multi_stream mode)
-{
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	if (!is_decode_session(inst)) {
-		dprintk(CVP_DBG, "%s: not a decode session %x\n",
-			__func__, hash32_ptr(inst->session));
-		return -EINVAL;
-	}
-
-	if (mode == HAL_VIDEO_DECODER_SECONDARY)
-		inst->stream_output_mode = HAL_VIDEO_DECODER_SECONDARY;
-	else
-		inst->stream_output_mode = HAL_VIDEO_DECODER_PRIMARY;
-
-	return 0;
-}
 
 enum multi_stream msm_cvp_comm_get_stream_output_mode(struct msm_cvp_inst *inst)
 {
@@ -268,79 +39,10 @@
 		return HAL_VIDEO_DECODER_PRIMARY;
 }
 
-static int msm_cvp_comm_get_mbs_per_sec(struct msm_cvp_inst *inst)
-{
-	int output_port_mbs, capture_port_mbs;
-	int fps;
-
-	output_port_mbs = inst->in_reconfig ?
-			NUM_MBS_PER_FRAME(inst->reconfig_width,
-				inst->reconfig_height) :
-			NUM_MBS_PER_FRAME(inst->prop.width[OUTPUT_PORT],
-				inst->prop.height[OUTPUT_PORT]);
-
-	capture_port_mbs = NUM_MBS_PER_FRAME(inst->prop.width[CAPTURE_PORT],
-		inst->prop.height[CAPTURE_PORT]);
-
-	if ((inst->clk_data.operating_rate >> 16) > inst->prop.fps)
-		fps = (inst->clk_data.operating_rate >> 16) ?
-			inst->clk_data.operating_rate >> 16 : 1;
-	else
-		fps = inst->prop.fps;
-
-	return max(output_port_mbs, capture_port_mbs) * fps;
-}
-
 int msm_cvp_comm_get_inst_load(struct msm_cvp_inst *inst,
 		enum load_calc_quirks quirks)
 {
-	int load = 0;
-
-	mutex_lock(&inst->lock);
-
-	if (!(inst->state >= MSM_CVP_OPEN_DONE &&
-		inst->state < MSM_CVP_STOP_DONE))
-		goto exit;
-
-	load = msm_cvp_comm_get_mbs_per_sec(inst);
-
-	if (is_thumbnail_session(inst)) {
-		if (quirks & LOAD_CALC_IGNORE_THUMBNAIL_LOAD)
-			load = 0;
-	}
-
-	if (is_turbo_session(inst)) {
-		if (!(quirks & LOAD_CALC_IGNORE_TURBO_LOAD))
-			load = inst->core->resources.max_load;
-	}
-
-	/*  Clock and Load calculations for REALTIME/NON-REALTIME
-	 *                        OPERATING RATE SET/NO OPERATING RATE SET
-	 *
-	 *                 | OPERATING RATE SET   | OPERATING RATE NOT SET |
-	 * ----------------|--------------------- |------------------------|
-	 * REALTIME        | load = res * op_rate |  load = res * fps      |
-	 *                 | clk  = res * op_rate |  clk  = res * fps      |
-	 * ----------------|----------------------|------------------------|
-	 * NON-REALTIME    | load = res * 1 fps   |  load = res * 1 fps    |
-	 *                 | clk  = res * op_rate |  clk  = res * fps      |
-	 * ----------------|----------------------|------------------------|
-	 */
-
-	if (!is_realtime_session(inst) &&
-		(quirks & LOAD_CALC_IGNORE_NON_REALTIME_LOAD)) {
-		if (!inst->prop.fps) {
-			dprintk(CVP_INFO, "instance:%pK fps = 0\n", inst);
-			load = 0;
-		} else {
-			load =
-			msm_cvp_comm_get_mbs_per_sec(inst)/inst->prop.fps;
-		}
-	}
-
-exit:
-	mutex_unlock(&inst->lock);
-	return load;
+	return 0;
 }
 
 int msm_cvp_comm_get_inst_load_per_core(struct msm_cvp_inst *inst,
@@ -354,29 +56,6 @@
 	return load;
 }
 
-int msm_cvp_comm_get_load(struct msm_cvp_core *core,
-	enum session_type type, enum load_calc_quirks quirks)
-{
-	struct msm_cvp_inst *inst = NULL;
-	int num_mbs_per_sec = 0;
-
-	if (!core) {
-		dprintk(CVP_ERR, "Invalid args: %pK\n", core);
-		return -EINVAL;
-	}
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-		if (inst->session_type != type)
-			continue;
-
-		num_mbs_per_sec += msm_cvp_comm_get_inst_load(inst, quirks);
-	}
-	mutex_unlock(&core->lock);
-
-	return num_mbs_per_sec;
-}
-
 enum hal_domain get_cvp_hal_domain(int session_type)
 {
 	enum hal_domain domain;
@@ -411,11 +90,6 @@
 	return codec;
 }
 
-enum hal_uncompressed_format msm_cvp_comm_get_hal_uncompressed(int fourcc)
-{
-	return HAL_UNUSED_COLOR;
-}
-
 struct msm_cvp_core *get_cvp_core(int core_id)
 {
 	struct msm_cvp_core *core;
@@ -439,26 +113,6 @@
 	return NULL;
 }
 
-struct msm_cvp_format_constraint *msm_cvp_comm_get_pixel_fmt_constraints(
-	struct msm_cvp_format_constraint fmt[], int size, int fourcc)
-{
-	int i;
-
-	if (!fmt) {
-		dprintk(CVP_ERR, "Invalid inputs, fmt = %pK\n", fmt);
-		return NULL;
-	}
-	for (i = 0; i < size; i++) {
-		if (fmt[i].fourcc == fourcc)
-			break;
-	}
-	if (i == size) {
-		dprintk(CVP_INFO, "Format constraint not found.\n");
-		return NULL;
-	}
-	return &fmt[i];
-}
-
 struct buf_queue *msm_cvp_comm_get_vb2q(
 		struct msm_cvp_inst *inst, enum v4l2_buf_type type)
 {
@@ -585,7 +239,7 @@
 	return inst;
 }
 
-static void cvp_handle_session_dfs_cmd_done(enum hal_command_response cmd,
+static void cvp_handle_session_cmd_done(enum hal_command_response cmd,
 	void *data)
 {
 	struct msm_cvp_cb_cmd_done *response = data;
@@ -614,6 +268,7 @@
 	} else
 		dprintk(CVP_ERR,
 			"%s: Invalid inst cmd response: %d\n", __func__, cmd);
+	cvp_put_inst(inst);
 }
 
 static void handle_session_set_buf_done(enum hal_command_response cmd,
@@ -644,6 +299,7 @@
 		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
 	else
 		dprintk(CVP_ERR, "set_buf_done: invalid cmd: %d\n", cmd);
+	cvp_put_inst(inst);
 
 }
 
@@ -677,7 +333,7 @@
 	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
 		buf = list_entry(ptr, struct internal_buf, list);
 		if (address == buf->smem.device_addr) {
-			dprintk(CVP_DBG, "releasing persist: %x\n",
+			dprintk(CVP_DBG, "releasing persist: %#x\n",
 					buf->smem.device_addr);
 			buf_found = true;
 		}
@@ -685,7 +341,8 @@
 	mutex_unlock(&inst->persistbufs.lock);
 
 	if (!buf_found)
-		dprintk(CVP_ERR, "invalid buffer received from firmware");
+		dprintk(CVP_WARN, "invalid buffer %#x from firmware\n",
+				address);
 	if (IS_HAL_SESSION_CMD(cmd))
 		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
 	else
@@ -818,54 +475,10 @@
 	handle_session_error(cmd, (void *)&response);
 }
 
-static void print_cap(const char *type,
-		struct hal_capability_supported *cap)
-{
-	dprintk(CVP_DBG,
-		"%-24s: %-8d %-8d %-8d\n",
-		type, cap->min, cap->max, cap->step_size);
-}
-
-//static int msm_cvp_comm_update_ctrl(struct msm_cvp_inst *inst,
-//	u32 id, struct hal_capability_supported *capability)
-//{
-//	struct v4l2_ctrl *ctrl = NULL;
-//	int rc = 0;
-//
-//	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, id);
-//	if (ctrl) {
-//		v4l2_ctrl_modify_range(ctrl, capability->min,
-//				capability->max, ctrl->step,
-//				ctrl->default_value);
-//		dprintk(CVP_DBG,
-//			"%s: Updated Range = %lld --> %lld Def value = %lld\n",
-//			ctrl->name, ctrl->minimum, ctrl->maximum,
-//			ctrl->default_value);
-//	} else {
-//		dprintk(CVP_ERR,
-//			"Failed to find Conrol %d\n", id);
-//		rc = -EINVAL;
-//	}
-//
-//	return rc;
-//	}
-
-static void msm_cvp_comm_update_ctrl_limits(struct msm_cvp_inst *inst)
-{
-	//msm_cvp_comm_update_ctrl(inst,
-	//	V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE,
-	//	&inst->capability.frame_rate);
-}
-
 static void handle_session_init_done(enum hal_command_response cmd, void *data)
 {
 	struct msm_cvp_cb_cmd_done *response = data;
 	struct msm_cvp_inst *inst = NULL;
-	struct msm_cvp_capability *capability = NULL;
-	struct hfi_device *hdev;
-	struct msm_cvp_core *core;
-	struct hal_profile_level *profile_level;
-	u32 i, codec;
 
 	if (!response) {
 		dprintk(CVP_ERR,
@@ -903,294 +516,13 @@
 		return;
 	}
 
-	core = inst->core;
-	hdev = inst->core->device;
-	codec = inst->session_type == MSM_CVP_DECODER ?
-			inst->fmts[OUTPUT_PORT].fourcc :
-			inst->fmts[CAPTURE_PORT].fourcc;
-
-	/* check if capabilities are available for this session */
-	for (i = 0; i < CVP_MAX_SESSIONS; i++) {
-		if (core->capabilities[i].codec ==
-				get_cvp_hal_codec(codec) &&
-			core->capabilities[i].domain ==
-				get_cvp_hal_domain(inst->session_type)) {
-			capability = &core->capabilities[i];
-			break;
-		}
-	}
-
-	if (capability) {
-		dprintk(CVP_DBG,
-			"%s: capabilities for codec 0x%x, domain %#x\n",
-			__func__, capability->codec, capability->domain);
-		memcpy(&inst->capability, capability,
-			sizeof(struct msm_cvp_capability));
-	} else {
-		dprintk(CVP_ERR,
-			"Watch out : Some property may fail inst %pK\n", inst);
-		dprintk(CVP_ERR,
-			"Caps N/A for codec 0x%x, domain %#x\n",
-			inst->capability.codec, inst->capability.domain);
-	}
-	inst->capability.pixelprocess_capabilities =
-		call_hfi_op(hdev, get_core_capabilities, hdev->hfi_device_data);
-
-	dprintk(CVP_DBG,
-		"Capability type : min      max      step size\n");
-	print_cap("width", &inst->capability.width);
-	print_cap("height", &inst->capability.height);
-	print_cap("mbs_per_frame", &inst->capability.mbs_per_frame);
-	print_cap("mbs_per_sec", &inst->capability.mbs_per_sec);
-	print_cap("frame_rate", &inst->capability.frame_rate);
-	print_cap("bitrate", &inst->capability.bitrate);
-	print_cap("peak_bitrate", &inst->capability.peakbitrate);
-	print_cap("scale_x", &inst->capability.scale_x);
-	print_cap("scale_y", &inst->capability.scale_y);
-	print_cap("hier_p", &inst->capability.hier_p);
-	print_cap("ltr_count", &inst->capability.ltr_count);
-	print_cap("bframe", &inst->capability.bframe);
-	print_cap("secure_output2_threshold",
-		&inst->capability.secure_output2_threshold);
-	print_cap("hier_b", &inst->capability.hier_b);
-	print_cap("lcu_size", &inst->capability.lcu_size);
-	print_cap("hier_p_hybrid", &inst->capability.hier_p_hybrid);
-	print_cap("mbs_per_sec_low_power",
-		&inst->capability.mbs_per_sec_power_save);
-	print_cap("extradata", &inst->capability.extradata);
-	print_cap("profile", &inst->capability.profile);
-	print_cap("level", &inst->capability.level);
-	print_cap("i_qp", &inst->capability.i_qp);
-	print_cap("p_qp", &inst->capability.p_qp);
-	print_cap("b_qp", &inst->capability.b_qp);
-	print_cap("rc_modes", &inst->capability.rc_modes);
-	print_cap("blur_width", &inst->capability.blur_width);
-	print_cap("blur_height", &inst->capability.blur_height);
-	print_cap("slice_delivery_mode", &inst->capability.slice_delivery_mode);
-	print_cap("slice_bytes", &inst->capability.slice_bytes);
-	print_cap("slice_mbs", &inst->capability.slice_mbs);
-	print_cap("secure", &inst->capability.secure);
-	print_cap("max_num_b_frames", &inst->capability.max_num_b_frames);
-	print_cap("max_video_cores", &inst->capability.max_video_cores);
-	print_cap("max_work_modes", &inst->capability.max_work_modes);
-	print_cap("ubwc_cr_stats", &inst->capability.ubwc_cr_stats);
-
-	dprintk(CVP_DBG, "profile count : %u\n",
-		inst->capability.profile_level.profile_count);
-	for (i = 0; i < inst->capability.profile_level.profile_count; i++) {
-		profile_level =
-			&inst->capability.profile_level.profile_level[i];
-		dprintk(CVP_DBG, "profile : %u\n", profile_level->profile);
-		dprintk(CVP_DBG, "level   : %u\n", profile_level->level);
-	}
-
-	signal_session_msg_receipt(cmd, inst);
-
-	/*
-	 * Update controls after informing session_init_done to avoid
-	 * timeouts.
-	 */
-
-	msm_cvp_comm_update_ctrl_limits(inst);
-	cvp_put_inst(inst);
+	dprintk(CVP_ERR, "%s Session type must be CVP\n", __func__);
+	return;
 }
 
 static void handle_event_change(enum hal_command_response cmd, void *data)
 {
-	struct msm_cvp_inst *inst = NULL;
-	struct msm_cvp_cb_event *event_notify = data;
-	int event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
-	struct v4l2_event seq_changed_event = {0};
-	int rc = 0;
-	struct hfi_device *hdev;
-	u32 *ptr = NULL;
-	struct hal_buffer_requirements *bufreq;
-	int extra_buff_count = 0;
-
-	if (!event_notify) {
-		dprintk(CVP_WARN, "Got an empty event from hfi\n");
-		return;
-	}
-
-	inst = cvp_get_inst(get_cvp_core(event_notify->device_id),
-			event_notify->session_id);
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_WARN, "Got a response for an inactive session\n");
-		goto err_bad_event;
-	}
-	hdev = inst->core->device;
-
-	switch (event_notify->hal_event_type) {
-	case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES:
-		event = V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
-		break;
-	case HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES:
-		event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
-		break;
-	case HAL_EVENT_RELEASE_BUFFER_REFERENCE:
-	{
-		struct msm_video_buffer *mbuf;
-		u32 planes[VIDEO_MAX_PLANES] = {0};
-
-		dprintk(CVP_DBG,
-			"%s: inst: %pK data_buffer: %x extradata_buffer: %x\n",
-			__func__, inst, event_notify->packet_buffer,
-			event_notify->extra_data_buffer);
-
-		planes[0] = event_notify->packet_buffer;
-		planes[1] = event_notify->extra_data_buffer;
-		mbuf = msm_cvp_comm_get_buffer_using_device_planes(inst,
-				V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, planes);
-		if (!mbuf || !kref_cvp_get_mbuf(inst, mbuf)) {
-			dprintk(CVP_ERR,
-				"%s: data_addr %x, extradata_addr %x not found\n",
-				__func__, planes[0], planes[1]);
-		} else {
-			//handle_release_buffer_reference(inst, mbuf);
-			kref_cvp_put_mbuf(mbuf);
-		}
-		goto err_bad_event;
-	}
-	default:
-		break;
-	}
-
-	/* Bit depth and pic struct changed event are combined into a single
-	 * event (insufficient event) for the userspace. Currently bitdepth
-	 * changes is only for HEVC and interlaced support is for all
-	 * codecs except HEVC
-	 * event data is now as follows:
-	 * u32 *ptr = seq_changed_event.u.data;
-	 * ptr[0] = height
-	 * ptr[1] = width
-	 * ptr[2] = bit depth
-	 * ptr[3] = pic struct (progressive or interlaced)
-	 * ptr[4] = colour space
-	 * ptr[5] = crop_data(top)
-	 * ptr[6] = crop_data(left)
-	 * ptr[7] = crop_data(height)
-	 * ptr[8] = crop_data(width)
-	 * ptr[9] = profile
-	 * ptr[10] = level
-	 */
-
-	inst->entropy_mode = event_notify->entropy_mode;
-	inst->profile = event_notify->profile;
-	inst->level = event_notify->level;
-	inst->prop.crop_info.left =
-		event_notify->crop_data.left;
-	inst->prop.crop_info.top =
-		event_notify->crop_data.top;
-	inst->prop.crop_info.height =
-		event_notify->crop_data.height;
-	inst->prop.crop_info.width =
-		event_notify->crop_data.width;
-	/* HW returns progressive_only flag in pic_struct. */
-	inst->pic_struct =
-		event_notify->pic_struct ?
-		MSM_CVP_PIC_STRUCT_PROGRESSIVE :
-		MSM_CVP_PIC_STRUCT_MAYBE_INTERLACED;
-
-	ptr = (u32 *)seq_changed_event.u.data;
-	ptr[0] = event_notify->height;
-	ptr[1] = event_notify->width;
-	ptr[2] = event_notify->bit_depth;
-	ptr[3] = event_notify->pic_struct;
-	ptr[4] = event_notify->colour_space;
-	ptr[5] = event_notify->crop_data.top;
-	ptr[6] = event_notify->crop_data.left;
-	ptr[7] = event_notify->crop_data.height;
-	ptr[8] = event_notify->crop_data.width;
-	ptr[9] = msm_cvp_comm_get_v4l2_profile(
-		inst->fmts[OUTPUT_PORT].fourcc,
-		event_notify->profile);
-	ptr[10] = msm_cvp_comm_get_v4l2_level(
-		inst->fmts[OUTPUT_PORT].fourcc,
-		event_notify->level);
-
-	dprintk(CVP_DBG,
-		"Event payload: height = %u width = %u profile = %u level = %u\n",
-			event_notify->height, event_notify->width,
-			ptr[9], ptr[10]);
-
-	dprintk(CVP_DBG,
-		"Event payload: bit_depth = %u pic_struct = %u colour_space = %u\n",
-		event_notify->bit_depth, event_notify->pic_struct,
-			event_notify->colour_space);
-
-	dprintk(CVP_DBG,
-		"Event payload: CROP top = %u left = %u Height = %u Width = %u\n",
-			event_notify->crop_data.top,
-			event_notify->crop_data.left,
-			event_notify->crop_data.height,
-			event_notify->crop_data.width);
-
-	mutex_lock(&inst->lock);
-	inst->in_reconfig = true;
-	inst->reconfig_height = event_notify->height;
-	inst->reconfig_width = event_notify->width;
-	inst->bit_depth = event_notify->bit_depth;
-
-	if (msm_cvp_comm_get_stream_output_mode(inst) ==
-			HAL_VIDEO_DECODER_SECONDARY) {
-		bufreq = get_cvp_buff_req_buffer(inst,
-				HAL_BUFFER_OUTPUT);
-		if (!bufreq) {
-			mutex_unlock(&inst->lock);
-			return;
-		}
-
-		/* No need to add extra buffers to DPBs */
-		bufreq->buffer_count_min = event_notify->capture_buf_count;
-		bufreq->buffer_count_min_host = bufreq->buffer_count_min;
-
-		bufreq = get_cvp_buff_req_buffer(inst,
-				HAL_BUFFER_OUTPUT2);
-		if (!bufreq) {
-			mutex_unlock(&inst->lock);
-			return;
-		}
-
-		extra_buff_count = msm_cvp_get_extra_buff_count(inst,
-						HAL_BUFFER_OUTPUT2);
-		bufreq->buffer_count_min = event_notify->capture_buf_count;
-		bufreq->buffer_count_min_host = bufreq->buffer_count_min +
-							extra_buff_count;
-	} else {
-
-		bufreq = get_cvp_buff_req_buffer(inst,
-				HAL_BUFFER_OUTPUT);
-		if (!bufreq) {
-			mutex_unlock(&inst->lock);
-			return;
-		}
-
-		extra_buff_count = msm_cvp_get_extra_buff_count(inst,
-						HAL_BUFFER_OUTPUT);
-		bufreq->buffer_count_min = event_notify->capture_buf_count;
-		bufreq->buffer_count_min_host = bufreq->buffer_count_min +
-							extra_buff_count;
-	}
-	dprintk(CVP_DBG, "%s: buffer[%d] count: min %d min_host %d\n",
-		__func__, bufreq->buffer_type, bufreq->buffer_count_min,
-		bufreq->buffer_count_min_host);
-
-	mutex_unlock(&inst->lock);
-
-	rc = msm_cvp_check_session_supported(inst);
-	if (!rc) {
-		seq_changed_event.type = event;
-		v4l2_event_queue_fh(&inst->event_handler, &seq_changed_event);
-	} else if (rc == -ENOTSUPP) {
-		msm_cvp_queue_v4l2_event(inst,
-				V4L2_EVENT_MSM_CVP_HW_UNSUPPORTED);
-	} else if (rc == -EBUSY) {
-		msm_cvp_queue_v4l2_event(inst,
-				V4L2_EVENT_MSM_CVP_HW_OVERLOAD);
-	}
-
-err_bad_event:
-	cvp_put_inst(inst);
+	dprintk(CVP_WARN, "%s is not supported on CVP!\n", __func__);
 }
 
 static void handle_release_res_done(enum hal_command_response cmd, void *data)
@@ -1215,166 +547,9 @@
 	cvp_put_inst(inst);
 }
 
-void msm_cvp_comm_validate_output_buffers(struct msm_cvp_inst *inst)
-{
-	struct internal_buf *binfo;
-	u32 buffers_owned_by_driver = 0;
-	struct hal_buffer_requirements *output_buf;
-
-	output_buf = get_cvp_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-
-	if (!output_buf) {
-		dprintk(CVP_DBG,
-			"This output buffer not required, buffer_type: %x\n",
-			HAL_BUFFER_OUTPUT);
-		return;
-	}
-	mutex_lock(&inst->outputbufs.lock);
-	if (list_empty(&inst->outputbufs.list)) {
-		dprintk(CVP_DBG, "%s: no OUTPUT buffers allocated\n",
-			__func__);
-		mutex_unlock(&inst->outputbufs.lock);
-		return;
-	}
-	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
-		if (binfo->buffer_ownership != DRIVER) {
-			dprintk(CVP_DBG,
-				"This buffer is with FW %x\n",
-				binfo->smem.device_addr);
-			continue;
-		}
-		buffers_owned_by_driver++;
-	}
-	mutex_unlock(&inst->outputbufs.lock);
-
-	if (buffers_owned_by_driver != output_buf->buffer_count_actual) {
-		dprintk(CVP_WARN,
-			"OUTPUT Buffer count mismatch %d of %d\n",
-			buffers_owned_by_driver,
-			output_buf->buffer_count_actual);
-		msm_cvp_handle_hw_error(inst->core);
-	}
-}
-
-int msm_cvp_comm_queue_output_buffers(struct msm_cvp_inst *inst)
-{
-	struct internal_buf *binfo;
-	struct hfi_device *hdev;
-	struct cvp_frame_data frame_data = {0};
-	struct hal_buffer_requirements *output_buf, *extra_buf;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-
-	output_buf = get_cvp_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-	if (!output_buf) {
-		dprintk(CVP_DBG,
-			"This output buffer not required, buffer_type: %x\n",
-			HAL_BUFFER_OUTPUT);
-		return 0;
-	}
-	dprintk(CVP_DBG,
-		"output: num = %d, size = %d\n",
-		output_buf->buffer_count_actual,
-		output_buf->buffer_size);
-
-	extra_buf = get_cvp_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
-
-	mutex_lock(&inst->outputbufs.lock);
-	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
-		if (binfo->buffer_ownership != DRIVER)
-			continue;
-		if (binfo->mark_remove)
-			continue;
-		frame_data.alloc_len = output_buf->buffer_size;
-		frame_data.filled_len = 0;
-		frame_data.offset = 0;
-		frame_data.device_addr = binfo->smem.device_addr;
-		frame_data.flags = 0;
-		frame_data.extradata_addr = binfo->smem.device_addr +
-		output_buf->buffer_size;
-		frame_data.buffer_type = HAL_BUFFER_OUTPUT;
-		frame_data.extradata_size = extra_buf ?
-			extra_buf->buffer_size : 0;
-		//rc = call_hfi_op(hdev, session_ftb,
-		//	(void *) inst->session, &frame_data);
-		binfo->buffer_ownership = FIRMWARE;
-	}
-	mutex_unlock(&inst->outputbufs.lock);
-
-	return 0;
-}
-
 static void handle_session_flush(enum hal_command_response cmd, void *data)
 {
-	struct msm_cvp_cb_cmd_done *response = data;
-	struct msm_cvp_inst *inst;
-	struct v4l2_event flush_event = {0};
-	u32 *ptr = NULL;
-	enum hal_flush flush_type;
-	int rc;
-
-	if (!response) {
-		dprintk(CVP_ERR, "Failed to get valid response for flush\n");
-		return;
-	}
-
-	inst = cvp_get_inst(get_cvp_core(response->device_id),
-			response->session_id);
-	if (!inst) {
-		dprintk(CVP_WARN, "Got a response for an inactive session\n");
-		return;
-	}
-
-	mutex_lock(&inst->flush_lock);
-	if (msm_cvp_comm_get_stream_output_mode(inst) ==
-			HAL_VIDEO_DECODER_SECONDARY) {
-
-		if (!(inst->fmts[OUTPUT_PORT].defer_outputs &&
-				inst->in_reconfig))
-			msm_cvp_comm_validate_output_buffers(inst);
-
-		if (!inst->in_reconfig) {
-			rc = msm_cvp_comm_queue_output_buffers(inst);
-			if (rc) {
-				dprintk(CVP_ERR,
-						"Failed to queue output buffers: %d\n",
-						rc);
-			}
-		}
-	}
-	inst->in_flush = false;
-	flush_event.type = V4L2_EVENT_MSM_CVP_FLUSH_DONE;
-	ptr = (u32 *)flush_event.u.data;
-
-	flush_type = response->data.flush_type;
-	switch (flush_type) {
-	case HAL_FLUSH_INPUT:
-		ptr[0] = V4L2_CMD_FLUSH_OUTPUT;
-		break;
-	case HAL_FLUSH_OUTPUT:
-		ptr[0] = V4L2_CMD_FLUSH_CAPTURE;
-		break;
-	case HAL_FLUSH_ALL:
-		ptr[0] |= V4L2_CMD_FLUSH_CAPTURE;
-		ptr[0] |= V4L2_CMD_FLUSH_OUTPUT;
-		break;
-	default:
-		dprintk(CVP_ERR, "Invalid flush type received!");
-		goto exit;
-	}
-
-	dprintk(CVP_DBG,
-		"Notify flush complete, flush_type: %x\n", flush_type);
-	v4l2_event_queue_fh(&inst->event_handler, &flush_event);
-
-exit:
-	mutex_unlock(&inst->flush_lock);
-	cvp_put_inst(inst);
+	dprintk(CVP_WARN, "%s is not supported on CVP!\n", __func__);
 }
 
 static void handle_session_error(enum hal_command_response cmd, void *data)
@@ -1485,7 +660,7 @@
 	}
 
 	dprintk(CVP_WARN, "SYS_ERROR received for core %pK\n", core);
-	msm_cvp_noc_error_info(core);
+	/* msm_cvp_noc_error_info(core) is disabled as of now */
 	call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
 	list_for_each_entry(inst, &core->instances, list) {
 		dprintk(CVP_WARN,
@@ -1567,102 +742,6 @@
 	cvp_put_inst(inst);
 }
 
-struct vb2_buffer *msm_cvp_comm_get_vb_using_video_buffer(
-		struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf)
-{
-	u32 port = 0;
-	struct vb2_buffer *vb = NULL;
-	struct vb2_queue *q = NULL;
-	bool found = false;
-
-	if (mbuf->vvb.vb2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		port = CAPTURE_PORT;
-	} else if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		port = OUTPUT_PORT;
-	} else {
-		dprintk(CVP_ERR, "%s: invalid type %d\n",
-			__func__, mbuf->vvb.vb2_buf.type);
-		return NULL;
-	}
-
-	mutex_lock(&inst->bufq[port].lock);
-	found = false;
-	q = &inst->bufq[port].vb2_bufq;
-	if (!q->streaming) {
-		dprintk(CVP_ERR, "port %d is not streaming", port);
-		goto unlock;
-	}
-	list_for_each_entry(vb, &q->queued_list, queued_entry) {
-		if (vb->state != VB2_BUF_STATE_ACTIVE)
-			continue;
-		if (msm_cvp_comm_compare_vb2_planes(inst, mbuf, vb)) {
-			found = true;
-			break;
-		}
-	}
-unlock:
-	mutex_unlock(&inst->bufq[port].lock);
-	if (!found) {
-		print_video_buffer(CVP_ERR, "vb2 not found for", inst, mbuf);
-		return NULL;
-	}
-
-	return vb;
-}
-
-int msm_cvp_comm_vb2_buffer_done(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	struct vb2_buffer *vb2;
-	struct vb2_v4l2_buffer *vbuf;
-	u32 i, port;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return -EINVAL;
-	}
-
-	if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-		port = CAPTURE_PORT;
-	else if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-		port = OUTPUT_PORT;
-	else
-		return -EINVAL;
-
-	vb2 = msm_cvp_comm_get_vb_using_video_buffer(inst, mbuf);
-	if (!vb2)
-		return -EINVAL;
-
-	/*
-	 * access vb2 buffer under q->lock and if streaming only to
-	 * ensure the buffer was not free'd by vb2 framework while
-	 * we are accessing it here.
-	 */
-	mutex_lock(&inst->bufq[port].lock);
-	if (inst->bufq[port].vb2_bufq.streaming) {
-		vbuf = to_vb2_v4l2_buffer(vb2);
-		vbuf->flags = mbuf->vvb.flags;
-		vb2->timestamp = mbuf->vvb.vb2_buf.timestamp;
-		for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
-			vb2->planes[i].bytesused =
-				mbuf->vvb.vb2_buf.planes[i].bytesused;
-			vb2->planes[i].data_offset =
-				mbuf->vvb.vb2_buf.planes[i].data_offset;
-		}
-		vb2_buffer_done(vb2, VB2_BUF_STATE_DONE);
-	} else {
-		dprintk(CVP_ERR, "%s: port %d is not streaming\n",
-			__func__, port);
-	}
-	mutex_unlock(&inst->bufq[port].lock);
-
-	return 0;
-}
-
 static void handle_operation_config(enum hal_command_response cmd, void *data)
 {
 	dprintk(CVP_ERR,
@@ -1679,11 +758,6 @@
 		return HAL_BUFFER_OUTPUT;
 }
 
-static void handle_dfs(enum hal_command_response cmd, void *data)
-{
-	dprintk(CVP_ERR, "%s: is called\n", __func__);
-}
-
 void cvp_handle_cmd_response(enum hal_command_response cmd, void *data)
 {
 	dprintk(CVP_DBG, "Command response = %d\n", cmd);
@@ -1700,9 +774,6 @@
 	case HAL_SESSION_CVP_OPERATION_CONFIG:
 		handle_operation_config(cmd, data);
 		break;
-	case HAL_SESSION_CVP_DFS:
-		handle_dfs(cmd, data);
-		break;
 	case HAL_SESSION_RELEASE_RESOURCE_DONE:
 		handle_release_res_done(cmd, data);
 		break;
@@ -1729,15 +800,13 @@
 	case HAL_SESSION_RELEASE_BUFFER_DONE:
 		handle_session_release_buf_done(cmd, data);
 		break;
-	case HAL_SESSION_REGISTER_BUFFER_DONE:
-		cvp_handle_session_register_buffer_done(cmd, data);
-		break;
-	case HAL_SESSION_UNREGISTER_BUFFER_DONE:
-		cvp_handle_session_unregister_buffer_done(cmd, data);
-		break;
 	case HAL_SESSION_DFS_CONFIG_CMD_DONE:
 	case HAL_SESSION_DFS_FRAME_CMD_DONE:
-		cvp_handle_session_dfs_cmd_done(cmd, data);
+	case HAL_SESSION_DME_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_FRAME_CMD_DONE:
+	case HAL_SESSION_PERSIST_CMD_DONE:
+		cvp_handle_session_cmd_done(cmd, data);
 		break;
 	default:
 		dprintk(CVP_DBG, "response unhandled: %d\n", cmd);
@@ -1806,37 +875,6 @@
 	return true;
 }
 
-bool cvp_is_batching_allowed(struct msm_cvp_inst *inst)
-{
-	bool allowed = false;
-
-	if (!inst || !inst->core)
-		return false;
-
-	/*
-	 * Enable decode batching based on below conditions
-	 * - platform supports batching
-	 * - decode session and H264/HEVC/VP9 format
-	 * - session resolution <= 1080p
-	 * - low latency not enabled
-	 * - not a thumbnail session
-	 * - UBWC color format
-	 */
-	if (inst->core->resources.decode_batching && is_decode_session(inst) &&
-		(inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264 ||
-		inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
-		inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9) &&
-		(msm_cvp_get_mbs_per_frame(inst) <=
-		NUM_MBS_PER_FRAME(MAX_DEC_BATCH_HEIGHT, MAX_DEC_BATCH_WIDTH)) &&
-		!inst->clk_data.low_latency_mode &&
-		!is_thumbnail_session(inst) &&
-		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_NV12_UBWC ||
-		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_NV12_TP10_UBWC))
-		allowed = true;
-
-	return allowed;
-}
-
 static int msm_comm_session_abort(struct msm_cvp_inst *inst)
 {
 	int rc = 0, abort_completion = 0;
@@ -1986,7 +1024,7 @@
 	hdev = core->device;
 	mutex_lock(&core->lock);
 	if (core->state >= CVP_CORE_INIT) {
-		dprintk(CVP_INFO, "Video core: %d is already in state: %d\n",
+		dprintk(CVP_DBG, "CVP core: %d is already in state: %d\n",
 				core->id, core->state);
 		goto core_already_inited;
 	}
@@ -2104,96 +1142,6 @@
 	return rc;
 }
 
-static int msm_comm_init_buffer_count(struct msm_cvp_inst *inst)
-{
-	int extra_buff_count = 0;
-	struct hal_buffer_requirements *bufreq;
-	int rc = 0;
-	int port;
-
-	if (!is_decode_session(inst) && !is_encode_session(inst))
-		return 0;
-
-	if (is_decode_session(inst))
-		port = OUTPUT_PORT;
-	else
-		port = CAPTURE_PORT;
-
-	/* Update input buff counts */
-	bufreq = get_cvp_buff_req_buffer(inst, HAL_BUFFER_INPUT);
-	if (!bufreq)
-		return -EINVAL;
-
-	extra_buff_count = msm_cvp_get_extra_buff_count(inst,
-				HAL_BUFFER_INPUT);
-	bufreq->buffer_count_min = inst->fmts[port].input_min_count;
-	/* batching needs minimum batch size count of input buffers */
-	if (inst->core->resources.decode_batching &&
-		is_decode_session(inst) &&
-		bufreq->buffer_count_min < inst->batch.size)
-		bufreq->buffer_count_min = inst->batch.size;
-	bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
-				bufreq->buffer_count_min + extra_buff_count;
-
-	dprintk(CVP_DBG, "%s: %x : input min %d min_host %d actual %d\n",
-		__func__, hash32_ptr(inst->session),
-		bufreq->buffer_count_min, bufreq->buffer_count_min_host,
-		bufreq->buffer_count_actual);
-
-	rc = msm_cvp_comm_set_buffer_count(inst,
-			bufreq->buffer_count_min,
-			bufreq->buffer_count_actual, HAL_BUFFER_INPUT);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: Failed to set in buffer count to FW\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	bufreq = get_cvp_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_INPUT);
-	if (!bufreq)
-		return -EINVAL;
-
-	bufreq->buffer_count_min = inst->fmts[port].input_min_count;
-	bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
-				bufreq->buffer_count_min + extra_buff_count;
-
-	/* Update output buff count */
-	bufreq = get_cvp_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-	if (!bufreq)
-		return -EINVAL;
-
-	extra_buff_count = msm_cvp_get_extra_buff_count(inst,
-				HAL_BUFFER_OUTPUT);
-	bufreq->buffer_count_min = inst->fmts[port].output_min_count;
-	bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
-		bufreq->buffer_count_min + extra_buff_count;
-
-	dprintk(CVP_DBG, "%s: %x : output min %d min_host %d actual %d\n",
-		__func__, hash32_ptr(inst->session),
-		bufreq->buffer_count_min, bufreq->buffer_count_min_host,
-		bufreq->buffer_count_actual);
-
-	rc = msm_cvp_comm_set_buffer_count(inst,
-		bufreq->buffer_count_min,
-		bufreq->buffer_count_actual, HAL_BUFFER_OUTPUT);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"Failed to set out buffer count to FW\n");
-		return -EINVAL;
-	}
-
-	bufreq = get_cvp_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
-	if (!bufreq)
-		return -EINVAL;
-
-	bufreq->buffer_count_min = inst->fmts[port].output_min_count;
-	bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
-		bufreq->buffer_count_min + extra_buff_count;
-
-	return 0;
-}
-
 static int msm_comm_session_init(int flipped_state,
 	struct msm_cvp_inst *inst)
 {
@@ -2239,180 +1187,12 @@
 		rc = -EINVAL;
 		goto exit;
 	}
-
-	rc = msm_comm_init_buffer_count(inst);
-	if (rc) {
-		dprintk(CVP_ERR, "Failed to initialize buff counts\n");
-		goto exit;
-	}
 	change_cvp_inst_state(inst, MSM_CVP_OPEN);
 
 exit:
 	return rc;
 }
 
-static void msm_cvp_print_running_insts(struct msm_cvp_core *core)
-{
-	struct msm_cvp_inst *temp;
-	int op_rate = 0;
-
-	dprintk(CVP_ERR, "Running instances:\n");
-	dprintk(CVP_ERR, "%4s|%4s|%4s|%4s|%4s|%4s\n",
-			"type", "w", "h", "fps", "opr", "prop");
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp->state >= MSM_CVP_OPEN_DONE &&
-				temp->state < MSM_CVP_STOP_DONE) {
-			char properties[4] = "";
-
-			if (is_thumbnail_session(temp))
-				strlcat(properties, "N", sizeof(properties));
-
-			if (is_turbo_session(temp))
-				strlcat(properties, "T", sizeof(properties));
-
-			if (is_realtime_session(temp))
-				strlcat(properties, "R", sizeof(properties));
-
-			if (temp->clk_data.operating_rate)
-				op_rate = temp->clk_data.operating_rate >> 16;
-			else
-				op_rate = temp->prop.fps;
-
-			dprintk(CVP_ERR, "%4d|%4d|%4d|%4d|%4d|%4s\n",
-					temp->session_type,
-					max(temp->prop.width[CAPTURE_PORT],
-						temp->prop.width[OUTPUT_PORT]),
-					max(temp->prop.height[CAPTURE_PORT],
-						temp->prop.height[OUTPUT_PORT]),
-					temp->prop.fps, op_rate, properties);
-		}
-	}
-	mutex_unlock(&core->lock);
-}
-
-static int msm_cvp_load_resources(int flipped_state,
-	struct msm_cvp_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	int num_mbs_per_sec = 0, max_load_adj = 0;
-	struct msm_cvp_core *core;
-	enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
-		LOAD_CALC_IGNORE_THUMBNAIL_LOAD |
-		LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR,
-			"%s: inst %pK is in invalid state\n", __func__, inst);
-		return -EINVAL;
-	}
-	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_LOAD_RESOURCES)) {
-		dprintk(CVP_INFO, "inst: %pK is already in state: %d\n",
-						inst, inst->state);
-		goto exit;
-	}
-	core = inst->core;
-
-	num_mbs_per_sec =
-		msm_cvp_comm_get_load(core, MSM_CVP_DECODER, quirks) +
-		msm_cvp_comm_get_load(core, MSM_CVP_ENCODER, quirks);
-
-	max_load_adj = core->resources.max_load +
-		inst->capability.mbs_per_frame.max;
-
-	if (num_mbs_per_sec > max_load_adj) {
-		dprintk(CVP_ERR, "HW is overloaded, needed: %d max: %d\n",
-			num_mbs_per_sec, max_load_adj);
-		msm_cvp_print_running_insts(core);
-		msm_cvp_comm_kill_session(inst);
-		return -EBUSY;
-	}
-
-	hdev = core->device;
-	dprintk(CVP_DBG, "%s: inst %pK\n", __func__, inst);
-	rc = call_hfi_op(hdev, session_load_res, (void *) inst->session);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"Failed to send load resources\n");
-		goto exit;
-	}
-	change_cvp_inst_state(inst, MSM_CVP_LOAD_RESOURCES);
-exit:
-	return rc;
-}
-
-static int msm_cvp_start(int flipped_state, struct msm_cvp_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR,
-			"%s: inst %pK is in invalid\n", __func__, inst);
-		return -EINVAL;
-	}
-	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_START)) {
-		dprintk(CVP_INFO,
-			"inst: %pK is already in state: %d\n",
-			inst, inst->state);
-		goto exit;
-	}
-	hdev = inst->core->device;
-	dprintk(CVP_DBG, "%s: inst %pK\n", __func__, inst);
-	rc = call_hfi_op(hdev, session_start, (void *) inst->session);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"Failed to send start\n");
-		goto exit;
-	}
-	change_cvp_inst_state(inst, MSM_CVP_START);
-exit:
-	return rc;
-}
-
-static int msm_cvp_stop(int flipped_state, struct msm_cvp_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR,
-			"%s: inst %pK is in invalid state\n", __func__, inst);
-		return -EINVAL;
-	}
-	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_STOP)) {
-		dprintk(CVP_INFO,
-			"inst: %pK is already in state: %d\n",
-			inst, inst->state);
-		goto exit;
-	}
-	hdev = inst->core->device;
-	dprintk(CVP_DBG, "%s: inst %pK\n", __func__, inst);
-	rc = call_hfi_op(hdev, session_stop, (void *) inst->session);
-	if (rc) {
-		dprintk(CVP_ERR, "%s: inst %pK session_stop failed\n",
-				__func__, inst);
-		goto exit;
-	}
-	change_cvp_inst_state(inst, MSM_CVP_STOP);
-exit:
-	return rc;
-}
-
 static int msm_comm_session_close(int flipped_state,
 			struct msm_cvp_inst *inst)
 {
@@ -2489,59 +1269,6 @@
 	return flipped_state;
 }
 
-int msm_cvp_comm_reset_bufreqs(struct msm_cvp_inst *inst,
-	enum hal_buffer buf_type)
-{
-	struct hal_buffer_requirements *bufreqs;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	bufreqs = get_cvp_buff_req_buffer(inst, buf_type);
-	if (!bufreqs) {
-		dprintk(CVP_ERR, "%s: invalid buf type %d\n",
-			__func__, buf_type);
-		return -EINVAL;
-	}
-	bufreqs->buffer_size = bufreqs->buffer_region_size =
-	bufreqs->buffer_count_min = bufreqs->buffer_count_min_host =
-	bufreqs->buffer_count_actual = bufreqs->contiguous =
-	bufreqs->buffer_alignment = 0;
-
-	return 0;
-}
-
-int msm_cvp_comm_copy_bufreqs(struct msm_cvp_inst *inst,
-	enum hal_buffer src_type, enum hal_buffer dst_type)
-{
-	struct hal_buffer_requirements *src_bufreqs;
-	struct hal_buffer_requirements *dst_bufreqs;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	src_bufreqs = get_cvp_buff_req_buffer(inst, src_type);
-	dst_bufreqs = get_cvp_buff_req_buffer(inst, dst_type);
-	if (!src_bufreqs || !dst_bufreqs) {
-		dprintk(CVP_ERR, "%s: invalid buf type: src %d dst %d\n",
-			__func__, src_type, dst_type);
-		return -EINVAL;
-	}
-	dst_bufreqs->buffer_size = src_bufreqs->buffer_size;
-	dst_bufreqs->buffer_region_size = src_bufreqs->buffer_region_size;
-	dst_bufreqs->buffer_count_min = src_bufreqs->buffer_count_min;
-	dst_bufreqs->buffer_count_min_host = src_bufreqs->buffer_count_min_host;
-	dst_bufreqs->buffer_count_actual = src_bufreqs->buffer_count_actual;
-	dst_bufreqs->contiguous = src_bufreqs->contiguous;
-	dst_bufreqs->buffer_alignment = src_bufreqs->buffer_alignment;
-
-	return 0;
-}
-
 struct hal_buffer_requirements *get_cvp_buff_req_buffer(
 		struct msm_cvp_inst *inst, enum hal_buffer buffer_type)
 {
@@ -2599,31 +1326,21 @@
 		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_CVP_LOAD_RESOURCES:
-		rc = msm_cvp_load_resources(flipped_state, inst);
-		if (rc || state <= get_flipped_state(inst->state, state))
-			break;
+		dprintk(CVP_WARN, "Deprecated state LOAD_RESOURCES\n");
 	case MSM_CVP_LOAD_RESOURCES_DONE:
+		dprintk(CVP_WARN, "Deprecated state LOAD_RESOURCES_DONE\n");
 	case MSM_CVP_START:
-		rc = msm_cvp_start(flipped_state, inst);
-		if (rc || state <= get_flipped_state(inst->state, state))
-			break;
+		dprintk(CVP_WARN, "Deprecated state START\n");
 	case MSM_CVP_START_DONE:
-		dprintk(CVP_ERR, "Deprecated HFI packet: START_DONE\n");
-			break;
+		dprintk(CVP_WARN, "Deprecated state START_DONE\n");
 	case MSM_CVP_STOP:
-		rc = msm_cvp_stop(flipped_state, inst);
-		if (rc || state <= get_flipped_state(inst->state, state))
-			break;
+		dprintk(CVP_WARN, "Deprecated state STOP\n");
 	case MSM_CVP_STOP_DONE:
-		rc = wait_for_state(inst, flipped_state, MSM_CVP_STOP_DONE,
-				HAL_SESSION_STOP_DONE);
-		if (rc || state <= get_flipped_state(inst->state, state))
-			break;
-		dprintk(CVP_DBG, "Moving to Stop Done state\n");
+		dprintk(CVP_WARN, "Deprecated state STOP_DONE\n");
 	case MSM_CVP_RELEASE_RESOURCES:
-		dprintk(CVP_ERR, "Deprecated state RELEASE_SOURCES\n");
+		dprintk(CVP_WARN, "Deprecated state RELEASE_SOURCES\n");
 	case MSM_CVP_RELEASE_RESOURCES_DONE:
-		dprintk(CVP_ERR, "Deprecated state RELEASE_SOURCES_DONE\n");
+		dprintk(CVP_WARN, "Deprecated state RELEASE_RESOURCES_DONE\n");
 	case MSM_CVP_CLOSE:
 		rc = msm_comm_session_close(flipped_state, inst);
 		if (rc || state <= get_flipped_state(inst->state, state))
@@ -2661,65 +1378,6 @@
 	return rc;
 }
 
-int msm_cvp_comm_cmd(void *instance, union msm_v4l2_cmd *cmd)
-{
-	return 0;
-}
-
-static void populate_frame_data(struct cvp_frame_data *data,
-		struct msm_video_buffer *mbuf, struct msm_cvp_inst *inst)
-{
-	u64 time_usec;
-	int extra_idx;
-	struct vb2_buffer *vb;
-	struct vb2_v4l2_buffer *vbuf;
-
-	if (!inst || !mbuf || !data) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK %pK\n",
-			__func__, inst, mbuf, data);
-		return;
-	}
-
-	vb = &mbuf->vvb.vb2_buf;
-	vbuf = to_vb2_v4l2_buffer(vb);
-
-	time_usec = vb->timestamp;
-	do_div(time_usec, NSEC_PER_USEC);
-
-	data->alloc_len = vb->planes[0].length;
-	data->device_addr = mbuf->smem[0].device_addr;
-	data->timestamp = time_usec;
-	data->flags = 0;
-	data->clnt_data = data->device_addr;
-
-	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		data->buffer_type = HAL_BUFFER_INPUT;
-		data->filled_len = vb->planes[0].bytesused;
-		data->offset = vb->planes[0].data_offset;
-
-		if (vbuf->flags & V4L2_BUF_FLAG_EOS)
-			data->flags |= HAL_BUFFERFLAG_EOS;
-
-		if (vbuf->flags & V4L2_BUF_FLAG_CODECCONFIG)
-			data->flags |= HAL_BUFFERFLAG_CODECCONFIG;
-
-		if (inst->session_type == MSM_CVP_DECODER) {
-			msm_cvp_comm_fetch_mark_data(&inst->etb_data, vb->index,
-				&data->mark_data, &data->mark_target);
-		}
-
-	} else if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		data->buffer_type = msm_cvp_comm_get_hal_output_buffer(inst);
-	}
-
-	extra_idx = EXTRADATA_IDX(vb->num_planes);
-	if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
-		data->extradata_addr = mbuf->smem[extra_idx].device_addr;
-		data->extradata_size = vb->planes[extra_idx].length;
-		data->flags |= HAL_BUFFERFLAG_EXTRADATA;
-	}
-}
-
 enum hal_buffer cvp_get_hal_buffer_type(unsigned int type,
 		unsigned int plane_num)
 {
@@ -2761,512 +1419,6 @@
 	return count;
 }
 
-static int num_pending_qbufs(struct msm_cvp_inst *inst, u32 type)
-{
-	int count = 0;
-	struct msm_video_buffer *mbuf;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return 0;
-	}
-
-	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-		if (mbuf->vvb.vb2_buf.type != type)
-			continue;
-		/* Count only deferred buffers */
-		if (!(mbuf->flags & MSM_CVP_FLAG_DEFERRED))
-			continue;
-		count++;
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	return count;
-}
-
-static int msm_comm_qbuf_to_hfi(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	enum msm_cvp_debugfs_event e = { 0 };
-	struct cvp_frame_data frame_data = {0};
-
-	if (!inst || !inst->core || !inst->core->device || !mbuf) {
-		dprintk(CVP_ERR, "%s: Invalid arguments\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	populate_frame_data(&frame_data, mbuf, inst);
-	/* mbuf is not deferred anymore */
-	mbuf->flags &= ~MSM_CVP_FLAG_DEFERRED;
-	mbuf->flags |= MSM_CVP_FLAG_QUEUED;
-	msm_cvp_debugfs_update(inst, e);
-
-//err_bad_input:
-	return rc;
-}
-
-int msm_cvp_comm_qbuf(struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf)
-{
-	int rc = 0;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: Invalid arguments\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR, "%s: inst is in bad state\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state != MSM_CVP_START_DONE) {
-		mbuf->flags |= MSM_CVP_FLAG_DEFERRED;
-		print_video_buffer(CVP_DBG, "qbuf deferred", inst, mbuf);
-		return 0;
-	}
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-	if (rc)
-		dprintk(CVP_ERR, "%s: scale clocks failed\n", __func__);
-
-	print_video_buffer(CVP_DBG, "qbuf", inst, mbuf);
-	rc = msm_comm_qbuf_to_hfi(inst, mbuf);
-	if (rc)
-		dprintk(CVP_ERR, "%s: Failed qbuf to hfi: %d\n", __func__, rc);
-
-	return rc;
-}
-
-/*
- * msm_comm_qbuf_decode_batch - count the buffers which are not queued to
- *              firmware yet (count includes rbr pending buffers too) and
- *              queue the buffers at once if full batch count reached.
- *              Don't queue rbr pending buffers as they would be queued
- *              when rbr event arrived from firmware.
- */
-int msm_cvp_comm_qbuf_decode_batch(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	int rc = 0;
-	u32 count = 0;
-	struct msm_video_buffer *buf;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: Invalid arguments\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR, "%s: inst is in bad state\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state != MSM_CVP_START_DONE) {
-		mbuf->flags |= MSM_CVP_FLAG_DEFERRED;
-		print_video_buffer(CVP_DBG, "qbuf deferred", inst, mbuf);
-		return 0;
-	}
-
-	/*
-	 * Don't defer buffers initially to avoid startup
-	 * latency increase due to batching
-	 */
-	if (inst->clk_data.buffer_counter > SKIP_BATCH_WINDOW) {
-		count = num_pending_qbufs(inst,
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-		if (count < inst->batch.size) {
-			print_video_buffer(CVP_DBG,
-				"batch-qbuf deferred", inst, mbuf);
-			return 0;
-		}
-	}
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-	if (rc)
-		dprintk(CVP_ERR, "%s: scale clocks failed\n", __func__);
-
-	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry(buf, &inst->registeredbufs.list, list) {
-		/* Don't queue if buffer is not CAPTURE_MPLANE */
-		if (buf->vvb.vb2_buf.type !=
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-			goto loop_end;
-		/* Don't queue if buffer is not a deferred buffer */
-		if (!(buf->flags & MSM_CVP_FLAG_DEFERRED))
-			goto loop_end;
-		/* Don't queue if RBR event is pending on this buffer */
-		if (buf->flags & MSM_CVP_FLAG_RBR_PENDING)
-			goto loop_end;
-
-		print_video_buffer(CVP_DBG, "batch-qbuf", inst, buf);
-		rc = msm_comm_qbuf_to_hfi(inst, buf);
-		if (rc) {
-			dprintk(CVP_ERR, "%s: Failed qbuf to hfi: %d\n",
-				__func__, rc);
-			break;
-		}
-loop_end:
-		/* Queue pending buffers till the current buffer only */
-		if (buf == mbuf)
-			break;
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	return rc;
-}
-
-int msm_cvp_comm_try_get_prop(struct msm_cvp_inst *inst,
-	enum hal_property ptype, union hal_get_property *hprop)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct getprop_buf *buf;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-	mutex_lock(&inst->sync_lock);
-	if (inst->state < MSM_CVP_OPEN_DONE ||
-			inst->state >= MSM_CVP_CLOSE) {
-
-		/* No need to check inst->state == MSM_CVP_INVALID since
-		 * INVALID is > CLOSE_DONE. When core went to INVALID state,
-		 * we put all the active instances in INVALID. So > CLOSE_DONE
-		 * is enough check to have.
-		 */
-
-		dprintk(CVP_ERR,
-			"In Wrong state to call Buf Req: Inst %pK or Core %pK\n",
-				inst, inst->core);
-		rc = -EAGAIN;
-		mutex_unlock(&inst->sync_lock);
-		goto exit;
-	}
-	mutex_unlock(&inst->sync_lock);
-
-	switch (ptype) {
-	case HAL_PARAM_GET_BUFFER_REQUIREMENTS:
-		rc = call_hfi_op(hdev, session_get_buf_req, inst->session);
-		break;
-	default:
-		rc = -EAGAIN;
-		break;
-	}
-
-	if (rc) {
-		dprintk(CVP_ERR, "Can't query hardware for property: %d\n",
-				rc);
-		goto exit;
-	}
-
-	rc = wait_for_completion_timeout(&inst->completions[
-			SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO)],
-		msecs_to_jiffies(
-			inst->core->resources.msm_cvp_hw_rsp_timeout));
-	if (!rc) {
-		dprintk(CVP_ERR,
-			"%s: Wait interrupted or timed out [%pK]: %d\n",
-			__func__, inst,
-			SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO));
-		msm_cvp_comm_kill_session(inst);
-		rc = -ETIMEDOUT;
-		goto exit;
-	} else {
-		/* wait_for_completion_timeout returns jiffies before expiry */
-		rc = 0;
-	}
-
-	mutex_lock(&inst->pending_getpropq.lock);
-	if (!list_empty(&inst->pending_getpropq.list)) {
-		buf = list_first_entry(&inst->pending_getpropq.list,
-					struct getprop_buf, list);
-		*hprop = *(union hal_get_property *)buf->data;
-		kfree(buf->data);
-		list_del(&buf->list);
-		kfree(buf);
-	} else {
-		dprintk(CVP_ERR, "%s getprop list empty\n", __func__);
-		rc = -EINVAL;
-	}
-	mutex_unlock(&inst->pending_getpropq.lock);
-exit:
-	return rc;
-}
-
-int msm_cvp_comm_release_output_buffers(struct msm_cvp_inst *inst,
-	bool force_release)
-{
-	struct msm_smem *handle;
-	struct internal_buf *buf, *dummy;
-	struct cvp_buffer_addr_info buffer_info;
-	int rc = 0;
-	struct msm_cvp_core *core;
-	struct hfi_device *hdev;
-
-	if (!inst) {
-		dprintk(CVP_ERR,
-				"Invalid instance pointer = %pK\n", inst);
-		return -EINVAL;
-	}
-	mutex_lock(&inst->outputbufs.lock);
-	if (list_empty(&inst->outputbufs.list)) {
-		dprintk(CVP_DBG, "%s - No OUTPUT buffers allocated\n",
-			__func__);
-		mutex_unlock(&inst->outputbufs.lock);
-		return 0;
-	}
-	mutex_unlock(&inst->outputbufs.lock);
-
-	core = inst->core;
-	if (!core) {
-		dprintk(CVP_ERR,
-				"Invalid core pointer = %pK\n", core);
-		return -EINVAL;
-	}
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(CVP_ERR, "Invalid device pointer = %pK\n", hdev);
-		return -EINVAL;
-	}
-	mutex_lock(&inst->outputbufs.lock);
-	list_for_each_entry_safe(buf, dummy, &inst->outputbufs.list, list) {
-		handle = &buf->smem;
-
-		if ((buf->buffer_ownership == FIRMWARE) && !force_release) {
-			dprintk(CVP_INFO, "DPB is with f/w. Can't free it\n");
-			/*
-			 * mark this buffer to avoid sending it to video h/w
-			 * again, this buffer belongs to old resolution and
-			 * it will be removed when video h/w returns it.
-			 */
-			buf->mark_remove = true;
-			continue;
-		}
-
-		buffer_info.buffer_size = handle->size;
-		buffer_info.buffer_type = buf->buffer_type;
-		buffer_info.num_buffers = 1;
-		buffer_info.align_device_addr = handle->device_addr;
-		if (inst->buffer_mode_set[CAPTURE_PORT] ==
-				HAL_BUFFER_MODE_STATIC) {
-			buffer_info.response_required = false;
-			rc = call_hfi_op(hdev, session_release_buffers,
-				(void *)inst->session, &buffer_info);
-			if (rc) {
-				dprintk(CVP_WARN,
-					"Rel output buf fail:%x, %d\n",
-					buffer_info.align_device_addr,
-					buffer_info.buffer_size);
-			}
-		}
-
-		list_del(&buf->list);
-		msm_cvp_comm_smem_free(inst, &buf->smem);
-		kfree(buf);
-	}
-
-	mutex_unlock(&inst->outputbufs.lock);
-	return rc;
-}
-
-static enum hal_buffer scratch_buf_sufficient(struct msm_cvp_inst *inst,
-				enum hal_buffer buffer_type)
-{
-	struct hal_buffer_requirements *bufreq = NULL;
-	struct internal_buf *buf;
-	int count = 0;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s - invalid param\n", __func__);
-		goto not_sufficient;
-	}
-
-	bufreq = get_cvp_buff_req_buffer(inst, buffer_type);
-	if (!bufreq)
-		goto not_sufficient;
-
-	/* Check if current scratch buffers are sufficient */
-	mutex_lock(&inst->scratchbufs.lock);
-
-	list_for_each_entry(buf, &inst->scratchbufs.list, list) {
-		if (buf->buffer_type == buffer_type &&
-			buf->smem.size >= bufreq->buffer_size)
-			count++;
-	}
-	mutex_unlock(&inst->scratchbufs.lock);
-
-	if (count != bufreq->buffer_count_actual)
-		goto not_sufficient;
-
-	dprintk(CVP_DBG,
-		"Existing scratch buffer is sufficient for buffer type %#x\n",
-		buffer_type);
-
-	return buffer_type;
-
-not_sufficient:
-	return HAL_BUFFER_NONE;
-}
-
-int msm_cvp_comm_release_scratch_buffers(struct msm_cvp_inst *inst,
-					bool check_for_reuse)
-{
-	struct msm_smem *handle;
-	struct internal_buf *buf, *dummy;
-	struct cvp_buffer_addr_info buffer_info;
-	int rc = 0;
-	struct msm_cvp_core *core;
-	struct hfi_device *hdev;
-	enum hal_buffer sufficiency = HAL_BUFFER_NONE;
-
-	if (!inst) {
-		dprintk(CVP_ERR,
-				"Invalid instance pointer = %pK\n", inst);
-		return -EINVAL;
-	}
-	core = inst->core;
-	if (!core) {
-		dprintk(CVP_ERR,
-				"Invalid core pointer = %pK\n", core);
-		return -EINVAL;
-	}
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(CVP_ERR, "Invalid device pointer = %pK\n", hdev);
-		return -EINVAL;
-	}
-
-	if (check_for_reuse) {
-		sufficiency |= scratch_buf_sufficient(inst,
-					HAL_BUFFER_INTERNAL_SCRATCH);
-
-		sufficiency |= scratch_buf_sufficient(inst,
-					HAL_BUFFER_INTERNAL_SCRATCH_1);
-
-		sufficiency |= scratch_buf_sufficient(inst,
-					HAL_BUFFER_INTERNAL_SCRATCH_2);
-	}
-
-	mutex_lock(&inst->scratchbufs.lock);
-	list_for_each_entry_safe(buf, dummy, &inst->scratchbufs.list, list) {
-		handle = &buf->smem;
-		buffer_info.buffer_size = handle->size;
-		buffer_info.buffer_type = buf->buffer_type;
-		buffer_info.num_buffers = 1;
-		buffer_info.align_device_addr = handle->device_addr;
-		buffer_info.response_required = true;
-		rc = call_hfi_op(hdev, session_release_buffers,
-				(void *)inst->session, &buffer_info);
-		if (!rc) {
-			mutex_unlock(&inst->scratchbufs.lock);
-			rc = wait_for_sess_signal_receipt(inst,
-				HAL_SESSION_RELEASE_BUFFER_DONE);
-			if (rc)
-				dprintk(CVP_WARN,
-					"%s: wait for signal failed, rc %d\n",
-					__func__, rc);
-			mutex_lock(&inst->scratchbufs.lock);
-		} else {
-			dprintk(CVP_WARN,
-				"Rel scrtch buf fail:%x, %d\n",
-				buffer_info.align_device_addr,
-				buffer_info.buffer_size);
-		}
-
-		/*If scratch buffers can be reused, do not free the buffers*/
-		if (sufficiency & buf->buffer_type)
-			continue;
-
-		list_del(&buf->list);
-		msm_cvp_comm_smem_free(inst, handle);
-		kfree(buf);
-	}
-
-	mutex_unlock(&inst->scratchbufs.lock);
-	return rc;
-}
-
-void msm_cvp_comm_release_eos_buffers(struct msm_cvp_inst *inst)
-{
-	struct eos_buf *buf, *next;
-
-	if (!inst) {
-		dprintk(CVP_ERR,
-			"Invalid instance pointer = %pK\n", inst);
-		return;
-	}
-
-	mutex_lock(&inst->eosbufs.lock);
-	list_for_each_entry_safe(buf, next, &inst->eosbufs.list, list) {
-		list_del(&buf->list);
-		msm_cvp_comm_smem_free(inst, &buf->smem);
-		kfree(buf);
-	}
-	INIT_LIST_HEAD(&inst->eosbufs.list);
-	mutex_unlock(&inst->eosbufs.lock);
-}
-
-
-int msm_cvp_comm_release_recon_buffers(struct msm_cvp_inst *inst)
-{
-	struct recon_buf *buf, *next;
-
-	if (!inst) {
-		dprintk(CVP_ERR,
-			"Invalid instance pointer = %pK\n", inst);
-		return -EINVAL;
-	}
-
-	mutex_lock(&inst->reconbufs.lock);
-	list_for_each_entry_safe(buf, next, &inst->reconbufs.list, list) {
-		list_del(&buf->list);
-		kfree(buf);
-	}
-	INIT_LIST_HEAD(&inst->reconbufs.list);
-	mutex_unlock(&inst->reconbufs.lock);
-
-	return 0;
-}
-
-int msm_cvp_comm_try_set_prop(struct msm_cvp_inst *inst,
-	enum hal_property ptype, void *pdata)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "Invalid input: %pK\n", inst);
-		return -EINVAL;
-	}
-
-	if (!inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	mutex_lock(&inst->sync_lock);
-	if (inst->state < MSM_CVP_OPEN_DONE || inst->state >= MSM_CVP_CLOSE) {
-		dprintk(CVP_ERR, "Not in proper state to set property\n");
-		rc = -EAGAIN;
-		goto exit;
-	}
-	rc = call_hfi_op(hdev, session_set_property, (void *)inst->session,
-			ptype, pdata);
-	if (rc)
-		dprintk(CVP_ERR, "Failed to set hal property for framesize\n");
-exit:
-	mutex_unlock(&inst->sync_lock);
-	return rc;
-}
-
 int msm_cvp_comm_set_buffer_count(struct msm_cvp_inst *inst,
 	int host_count, int act_count, enum hal_buffer type)
 {
@@ -3291,218 +1443,6 @@
 	return rc;
 }
 
-static void msm_comm_flush_in_invalid_state(struct msm_cvp_inst *inst)
-{
-	struct list_head *ptr, *next;
-	enum cvp_ports ports[] = {OUTPUT_PORT, CAPTURE_PORT};
-	int c = 0;
-
-	/* before flush ensure venus released all buffers */
-	msm_cvp_comm_try_state(inst, MSM_CVP_RELEASE_RESOURCES_DONE);
-
-	for (c = 0; c < ARRAY_SIZE(ports); ++c) {
-		enum cvp_ports port = ports[c];
-
-		mutex_lock(&inst->bufq[port].lock);
-		list_for_each_safe(ptr, next,
-				&inst->bufq[port].vb2_bufq.queued_list) {
-			struct vb2_buffer *vb = container_of(ptr,
-					struct vb2_buffer, queued_entry);
-			if (vb->state == VB2_BUF_STATE_ACTIVE) {
-				vb->planes[0].bytesused = 0;
-				print_cvp_vb2_buffer(CVP_ERR,
-					"flush in invalid", inst, vb);
-				vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-			} else {
-				dprintk(CVP_WARN,
-					"%s VB is in state %d not in ACTIVE state\n"
-					, __func__, vb->state);
-			}
-		}
-		mutex_unlock(&inst->bufq[port].lock);
-	}
-	msm_cvp_queue_v4l2_event(inst, V4L2_EVENT_MSM_CVP_FLUSH_DONE);
-}
-
-int msm_cvp_comm_flush(struct msm_cvp_inst *inst, u32 flags)
-{
-	int i, rc =  0;
-	bool ip_flush = false;
-	bool op_flush = false;
-	struct msm_video_buffer *mbuf, *next;
-	struct msm_cvp_core *core;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR,
-				"Invalid params, inst %pK\n", inst);
-		return -EINVAL;
-	}
-	core = inst->core;
-	hdev = core->device;
-
-	ip_flush = flags & V4L2_CMD_FLUSH_OUTPUT;
-	op_flush = flags & V4L2_CMD_FLUSH_CAPTURE;
-
-	if (ip_flush && !op_flush) {
-		dprintk(CVP_WARN,
-			"Input only flush not supported, making it flush all\n");
-		op_flush = true;
-		return 0;
-	}
-
-	msm_cvp_clock_data_reset(inst);
-
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR,
-				"Core %pK and inst %pK are in bad state\n",
-					core, inst);
-		msm_comm_flush_in_invalid_state(inst);
-		return 0;
-	}
-
-	mutex_lock(&inst->flush_lock);
-	/* enable in flush */
-	inst->in_flush = true;
-
-	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry_safe(mbuf, next, &inst->registeredbufs.list, list) {
-		/* don't flush input buffers if input flush is not requested */
-		if (!ip_flush && mbuf->vvb.vb2_buf.type ==
-				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-			continue;
-
-		/* flush only deferred or rbr pending buffers */
-		if (!(mbuf->flags & MSM_CVP_FLAG_DEFERRED ||
-			mbuf->flags & MSM_CVP_FLAG_RBR_PENDING))
-			continue;
-
-		/*
-		 * flush buffers which are queued by client already,
-		 * the refcount will be two or more for those buffers.
-		 */
-		if (!(mbuf->smem[0].refcount >= 2))
-			continue;
-
-		print_video_buffer(CVP_DBG, "flush buf", inst, mbuf);
-		msm_cvp_comm_flush_video_buffer(inst, mbuf);
-
-		for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
-			if (msm_cvp_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-				print_video_buffer(CVP_ERR,
-					"dqbuf: unmap failed.", inst, mbuf);
-			if (msm_cvp_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-				print_video_buffer(CVP_ERR,
-					"dqbuf: unmap failed..", inst, mbuf);
-		}
-		if (!mbuf->smem[0].refcount) {
-			list_del(&mbuf->list);
-			kref_cvp_put_mbuf(mbuf);
-		} else {
-			/* buffer is no more a deferred buffer */
-			mbuf->flags &= ~MSM_CVP_FLAG_DEFERRED;
-		}
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	hdev = inst->core->device;
-	if (ip_flush) {
-		dprintk(CVP_DBG, "Send flush on all ports to firmware\n");
-		rc = call_hfi_op(hdev, session_flush, inst->session,
-			HAL_FLUSH_ALL);
-	} else {
-		dprintk(CVP_DBG, "Send flush on output port to firmware\n");
-		rc = call_hfi_op(hdev, session_flush, inst->session,
-			HAL_FLUSH_OUTPUT);
-	}
-	mutex_unlock(&inst->flush_lock);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"Sending flush to firmware failed, flush out all buffers\n");
-		msm_comm_flush_in_invalid_state(inst);
-		/* disable in_flush */
-		inst->in_flush = false;
-	}
-
-	return rc;
-}
-
-enum hal_extradata_id msm_cvp_comm_get_hal_extradata_index(
-	enum v4l2_mpeg_cvp_extradata index)
-{
-	int ret = 0;
-
-	switch (index) {
-	case V4L2_MPEG_CVP_EXTRADATA_NONE:
-		ret = HAL_EXTRADATA_NONE;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_INTERLACE_VIDEO:
-		ret = HAL_EXTRADATA_INTERLACE_VIDEO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_TIMESTAMP:
-		ret = HAL_EXTRADATA_TIMESTAMP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_S3D_FRAME_PACKING:
-		ret = HAL_EXTRADATA_S3D_FRAME_PACKING;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_FRAME_RATE:
-		ret = HAL_EXTRADATA_FRAME_RATE;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_PANSCAN_WINDOW:
-		ret = HAL_EXTRADATA_PANSCAN_WINDOW;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_RECOVERY_POINT_SEI:
-		ret = HAL_EXTRADATA_RECOVERY_POINT_SEI;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_NUM_CONCEALED_MB:
-		ret = HAL_EXTRADATA_NUM_CONCEALED_MB;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_ASPECT_RATIO:
-		ret = HAL_EXTRADATA_ASPECT_RATIO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_MPEG2_SEQDISP:
-		ret = HAL_EXTRADATA_MPEG2_SEQDISP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_STREAM_USERDATA:
-		ret = HAL_EXTRADATA_STREAM_USERDATA;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_FRAME_QP:
-		ret = HAL_EXTRADATA_FRAME_QP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_LTR:
-		ret = HAL_EXTRADATA_LTR_INFO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_ROI_QP:
-		ret = HAL_EXTRADATA_ROI_QP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_OUTPUT_CROP:
-		ret = HAL_EXTRADATA_OUTPUT_CROP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_DISPLAY_COLOUR_SEI:
-		ret = HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
-		ret = HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_VUI_DISPLAY:
-		ret = HAL_EXTRADATA_VUI_DISPLAY_INFO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_VPX_COLORSPACE:
-		ret = HAL_EXTRADATA_VPX_COLORSPACE;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_UBWC_CR_STATS_INFO:
-		ret = HAL_EXTRADATA_UBWC_CR_STATS_INFO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_HDR10PLUS_METADATA:
-		ret = HAL_EXTRADATA_HDR10PLUS_METADATA;
-		break;
-	default:
-		dprintk(CVP_WARN, "Extradata not found: %d\n", index);
-		break;
-	}
-	return ret;
-};
-
 int msm_cvp_noc_error_info(struct msm_cvp_core *core)
 {
 	struct hfi_device *hdev;
@@ -3575,196 +1515,6 @@
 	mutex_unlock(&core->lock);
 }
 
-static int msm_cvp_load_supported(struct msm_cvp_inst *inst)
-{
-	int num_mbs_per_sec = 0, max_load_adj = 0;
-	enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
-		LOAD_CALC_IGNORE_THUMBNAIL_LOAD |
-		LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
-
-	if (inst->state == MSM_CVP_OPEN_DONE) {
-		max_load_adj = inst->core->resources.max_load;
-		num_mbs_per_sec = msm_cvp_comm_get_load(inst->core,
-					MSM_CVP_DECODER, quirks);
-		num_mbs_per_sec += msm_cvp_comm_get_load(inst->core,
-					MSM_CVP_ENCODER, quirks);
-		if (num_mbs_per_sec > max_load_adj) {
-			dprintk(CVP_ERR,
-				"H/W is overloaded. needed: %d max: %d\n",
-				num_mbs_per_sec,
-				max_load_adj);
-			msm_cvp_print_running_insts(inst->core);
-			return -EBUSY;
-		}
-	}
-	return 0;
-}
-
-int msm_cvp_check_scaling_supported(struct msm_cvp_inst *inst)
-{
-	u32 x_min, x_max, y_min, y_max;
-	u32 input_height, input_width, output_height, output_width;
-
-	if (inst->grid_enable > 0) {
-		dprintk(CVP_DBG, "Skip scaling check for HEIC\n");
-		return 0;
-	}
-
-	input_height = inst->prop.height[OUTPUT_PORT];
-	input_width = inst->prop.width[OUTPUT_PORT];
-	output_height = inst->prop.height[CAPTURE_PORT];
-	output_width = inst->prop.width[CAPTURE_PORT];
-
-	if (!input_height || !input_width || !output_height || !output_width) {
-		dprintk(CVP_ERR,
-			"Invalid : Input height = %d width = %d",
-			input_height, input_width);
-		dprintk(CVP_ERR,
-			" output height = %d width = %d\n",
-			output_height, output_width);
-		return -ENOTSUPP;
-	}
-
-	if (!inst->capability.scale_x.min ||
-		!inst->capability.scale_x.max ||
-		!inst->capability.scale_y.min ||
-		!inst->capability.scale_y.max) {
-
-		if (input_width * input_height !=
-			output_width * output_height) {
-			dprintk(CVP_ERR,
-				"%s: scaling is not supported (%dx%d != %dx%d)\n",
-				__func__, input_width, input_height,
-				output_width, output_height);
-			return -ENOTSUPP;
-		}
-
-		dprintk(CVP_DBG, "%s: supported WxH = %dx%d\n",
-			__func__, input_width, input_height);
-		return 0;
-	}
-
-	x_min = (1<<16)/inst->capability.scale_x.min;
-	y_min = (1<<16)/inst->capability.scale_y.min;
-	x_max = inst->capability.scale_x.max >> 16;
-	y_max = inst->capability.scale_y.max >> 16;
-
-	if (input_height > output_height) {
-		if (input_height > x_min * output_height) {
-			dprintk(CVP_ERR,
-				"Unsupported height min height %d vs %d\n",
-				input_height / x_min, output_height);
-			return -ENOTSUPP;
-		}
-	} else {
-		if (output_height > x_max * input_height) {
-			dprintk(CVP_ERR,
-				"Unsupported height max height %d vs %d\n",
-				x_max * input_height, output_height);
-			return -ENOTSUPP;
-		}
-	}
-	if (input_width > output_width) {
-		if (input_width > y_min * output_width) {
-			dprintk(CVP_ERR,
-				"Unsupported width min width %d vs %d\n",
-				input_width / y_min, output_width);
-			return -ENOTSUPP;
-		}
-	} else {
-		if (output_width > y_max * input_width) {
-			dprintk(CVP_ERR,
-				"Unsupported width max width %d vs %d\n",
-				y_max * input_width, output_width);
-			return -ENOTSUPP;
-		}
-	}
-	return 0;
-}
-
-int msm_cvp_check_session_supported(struct msm_cvp_inst *inst)
-{
-	struct msm_cvp_capability *capability;
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_core *core;
-	u32 output_height, output_width, input_height, input_width;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_WARN, "%s: Invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-	capability = &inst->capability;
-	hdev = inst->core->device;
-	core = inst->core;
-	rc = msm_cvp_load_supported(inst);
-	if (rc) {
-		dprintk(CVP_WARN,
-			"%s: Hardware is overloaded\n", __func__);
-		return rc;
-	}
-
-	if (!is_thermal_permissible(core)) {
-		dprintk(CVP_WARN,
-			"Thermal level critical, stop all active sessions!\n");
-		return -ENOTSUPP;
-	}
-
-	output_height = inst->prop.height[CAPTURE_PORT];
-	output_width = inst->prop.width[CAPTURE_PORT];
-	input_height = inst->prop.height[OUTPUT_PORT];
-	input_width = inst->prop.width[OUTPUT_PORT];
-
-	if (inst->session_type == MSM_CVP_ENCODER && (input_width % 2 != 0 ||
-			input_height % 2 != 0 || output_width % 2 != 0 ||
-			output_height % 2 != 0)) {
-		dprintk(CVP_ERR,
-			"Height and Width should be even numbers for NV12\n");
-		dprintk(CVP_ERR,
-			"Input WxH = (%u)x(%u), Output WxH = (%u)x(%u)\n",
-			input_width, input_height,
-			output_width, output_height);
-		rc = -ENOTSUPP;
-	}
-
-	output_height = ALIGN(inst->prop.height[CAPTURE_PORT], 16);
-	output_width = ALIGN(inst->prop.width[CAPTURE_PORT], 16);
-
-	if (!rc) {
-		if (output_width < capability->width.min ||
-			output_height < capability->height.min) {
-			dprintk(CVP_ERR,
-				"Unsupported WxH = (%u)x(%u), min supported is - (%u)x(%u)\n",
-				output_width,
-				output_height,
-				capability->width.min,
-				capability->height.min);
-			rc = -ENOTSUPP;
-		}
-		if (!rc && output_width > capability->width.max) {
-			dprintk(CVP_ERR,
-				"Unsupported width = %u supported max width = %u\n",
-				output_width,
-				capability->width.max);
-				rc = -ENOTSUPP;
-		}
-
-		if (!rc && output_height * output_width >
-			capability->width.max * capability->height.max) {
-			dprintk(CVP_ERR,
-			"Unsupported WxH = (%u)x(%u), max supported is - (%u)x(%u)\n",
-			output_width, output_height,
-			capability->width.max, capability->height.max);
-			rc = -ENOTSUPP;
-		}
-	}
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: Resolution unsupported\n", __func__);
-	}
-	return rc;
-}
-
 void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst)
 {
 	enum hal_command_response cmd = HAL_SESSION_ERROR;
@@ -3900,123 +1650,6 @@
 	mutex_unlock(&core->lock);
 }
 
-int msm_cvp_comm_set_color_format(struct msm_cvp_inst *inst,
-		enum hal_buffer buffer_type, int fourcc)
-{
-	struct hal_uncompressed_format_select hal_fmt = {0};
-	enum hal_uncompressed_format format = HAL_UNUSED_COLOR;
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s - invalid param\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-
-	format = msm_cvp_comm_get_hal_uncompressed(fourcc);
-	if (format == HAL_UNUSED_COLOR) {
-		dprintk(CVP_ERR, "Using unsupported colorformat %#x\n",
-				fourcc);
-		rc = -ENOTSUPP;
-		goto exit;
-	}
-
-	hal_fmt.buffer_type = buffer_type;
-	hal_fmt.format = format;
-
-	rc = call_hfi_op(hdev, session_set_property, inst->session,
-		HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT, &hal_fmt);
-	if (rc)
-		dprintk(CVP_ERR,
-			"Failed to set input color format\n");
-	else
-		dprintk(CVP_DBG, "Setting uncompressed colorformat to %#x\n",
-				format);
-
-exit:
-	return rc;
-}
-
-int msm_cvp_comm_s_parm(struct msm_cvp_inst *inst, struct v4l2_streamparm *a)
-{
-	u32 property_id = 0;
-	u64 us_per_frame = 0;
-	void *pdata;
-	int rc = 0, fps = 0;
-	struct hal_frame_rate frame_rate;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device || !a) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-	property_id = HAL_CONFIG_FRAME_RATE;
-
-	if (a->parm.output.timeperframe.denominator) {
-		switch (a->type) {
-		case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-			us_per_frame = a->parm.output.timeperframe.numerator *
-				(u64)USEC_PER_SEC;
-			do_div(us_per_frame,
-				a->parm.output.timeperframe.denominator);
-			break;
-		default:
-			dprintk(CVP_ERR,
-					"Scale clocks : Unknown buffer type %d\n",
-					a->type);
-			break;
-		}
-	}
-
-	if (!us_per_frame) {
-		dprintk(CVP_ERR,
-				"Failed to scale clocks : time between frames is 0\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	fps = us_per_frame > USEC_PER_SEC ?
-		0 : USEC_PER_SEC / (u32)us_per_frame;
-
-	if (fps % 15 == 14 || fps % 24 == 23)
-		fps = fps + 1;
-	else if ((fps > 1) && (fps % 24 == 1 || fps % 15 == 1))
-		fps = fps - 1;
-
-	if (fps < inst->capability.frame_rate.min ||
-			fps > inst->capability.frame_rate.max) {
-		dprintk(CVP_ERR,
-			"FPS is out of limits : fps = %d Min = %d, Max = %d\n",
-			fps, inst->capability.frame_rate.min,
-			inst->capability.frame_rate.max);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	dprintk(CVP_PROF, "reported fps changed for %pK: %d->%d\n",
-			inst, inst->prop.fps, fps);
-	inst->prop.fps = fps;
-	if (inst->session_type == MSM_CVP_ENCODER &&
-		get_cvp_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) !=
-			HAL_VIDEO_CODEC_TME) {
-		frame_rate.frame_rate = inst->prop.fps * BIT(16);
-		frame_rate.buffer_type = HAL_BUFFER_OUTPUT;
-		pdata = &frame_rate;
-		rc = call_hfi_op(hdev, session_set_property,
-			inst->session, property_id, pdata);
-		if (rc)
-			dprintk(CVP_WARN,
-				"Failed to set frame rate %d\n", rc);
-	}
-exit:
-	return rc;
-}
-
 void msm_cvp_comm_print_inst_info(struct msm_cvp_inst *inst)
 {
 	struct msm_video_buffer *mbuf;
@@ -4035,15 +1668,6 @@
 	port = is_decode ? OUTPUT_PORT : CAPTURE_PORT;
 	is_secure = inst->flags & CVP_SECURE;
 	dprintk(CVP_ERR,
-			"%s session, %s, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
-			is_decode ? "Decode" : "Encode",
-			is_secure ? "Secure" : "Non-Secure",
-			inst->fmts[port].name,
-			inst->prop.height[port], inst->prop.width[port],
-			inst->prop.fps, inst->prop.bitrate,
-			!inst->bit_depth ? "8" : "10");
-
-	dprintk(CVP_ERR,
 			"---Buffer details for inst: %pK of type: %d---\n",
 			inst, inst->session_type);
 	mutex_lock(&inst->registeredbufs.lock);
@@ -4052,14 +1676,6 @@
 		print_video_buffer(CVP_ERR, "buf", inst, mbuf);
 	mutex_unlock(&inst->registeredbufs.lock);
 
-	mutex_lock(&inst->scratchbufs.lock);
-	dprintk(CVP_ERR, "scratch buffer list:\n");
-	list_for_each_entry(buf, &inst->scratchbufs.list, list)
-		dprintk(CVP_ERR, "type: %d addr: %x size: %u\n",
-				buf->buffer_type, buf->smem.device_addr,
-				buf->smem.size);
-	mutex_unlock(&inst->scratchbufs.lock);
-
 	mutex_lock(&inst->persistbufs.lock);
 	dprintk(CVP_ERR, "persist buffer list:\n");
 	list_for_each_entry(buf, &inst->persistbufs.list, list)
@@ -4067,74 +1683,6 @@
 				buf->buffer_type, buf->smem.device_addr,
 				buf->smem.size);
 	mutex_unlock(&inst->persistbufs.lock);
-
-	mutex_lock(&inst->outputbufs.lock);
-	dprintk(CVP_ERR, "dpb buffer list:\n");
-	list_for_each_entry(buf, &inst->outputbufs.list, list)
-		dprintk(CVP_ERR, "type: %d addr: %x size: %u\n",
-				buf->buffer_type, buf->smem.device_addr,
-				buf->smem.size);
-	mutex_unlock(&inst->outputbufs.lock);
-}
-
-int msm_cvp_comm_session_continue(void *instance)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device)
-		return -EINVAL;
-	hdev = inst->core->device;
-	mutex_lock(&inst->lock);
-	if (inst->state >= MSM_CVP_RELEASE_RESOURCES_DONE ||
-			inst->state < MSM_CVP_START_DONE) {
-		dprintk(CVP_DBG,
-			"Inst %pK : Not in valid state to call %s\n",
-				inst, __func__);
-		goto sess_continue_fail;
-	}
-	dprintk(CVP_ERR,
-				"session_continue called in wrong state for decoder");
-
-sess_continue_fail:
-	mutex_unlock(&inst->lock);
-	return rc;
-}
-
-u32 cvp_get_frame_size_nv12(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
-}
-
-u32 cvp_get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
-}
-
-u32 cvp_get_frame_size_rgba(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888, width, height);
-}
-
-u32 cvp_get_frame_size_nv21(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
-}
-
-u32 cvp_get_frame_size_tp10_ubwc(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
-}
-
-u32 cvp_get_frame_size_p010(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_P010, width, height);
-}
-
-u32 cvp_get_frame_size_nv12_512(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_512, width, height);
 }
 
 void print_video_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
@@ -4172,552 +1720,6 @@
 			vb2->planes[1].bytesused, mbuf->smem[1].refcount);
 }
 
-void print_cvp_vb2_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
-		struct vb2_buffer *vb2)
-{
-	if (!(tag & msm_cvp_debug) || !inst || !vb2)
-		return;
-
-	if (vb2->num_planes == 1)
-		dprintk(tag,
-			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n",
-			str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
-			vb2->index, vb2->planes[0].m.fd,
-			vb2->planes[0].data_offset, vb2->planes[0].length,
-			vb2->planes[0].bytesused);
-	else
-		dprintk(tag,
-			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d filled %d\n",
-			str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
-			vb2->index, vb2->planes[0].m.fd,
-			vb2->planes[0].data_offset, vb2->planes[0].length,
-			vb2->planes[0].bytesused, vb2->planes[1].m.fd,
-			vb2->planes[1].data_offset, vb2->planes[1].length,
-			vb2->planes[1].bytesused);
-}
-
-void print_cvp_v4l2_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
-		struct v4l2_buffer *v4l2)
-{
-	if (!(tag & msm_cvp_debug) || !inst || !v4l2)
-		return;
-
-	if (v4l2->length == 1)
-		dprintk(tag,
-			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n",
-			str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
-			v4l2->index, v4l2->m.planes[0].m.fd,
-			v4l2->m.planes[0].data_offset,
-			v4l2->m.planes[0].length,
-			v4l2->m.planes[0].bytesused);
-	else
-		dprintk(tag,
-			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d filled %d\n",
-			str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
-			v4l2->index, v4l2->m.planes[0].m.fd,
-			v4l2->m.planes[0].data_offset,
-			v4l2->m.planes[0].length,
-			v4l2->m.planes[0].bytesused,
-			v4l2->m.planes[1].m.fd,
-			v4l2->m.planes[1].data_offset,
-			v4l2->m.planes[1].length,
-			v4l2->m.planes[1].bytesused);
-}
-
-bool msm_cvp_comm_compare_vb2_plane(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, struct vb2_buffer *vb2, u32 i)
-{
-	struct vb2_buffer *vb;
-
-	if (!inst || !mbuf || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK %pK\n",
-			__func__, inst, mbuf, vb2);
-		return false;
-	}
-
-	vb = &mbuf->vvb.vb2_buf;
-	if (vb->planes[i].m.fd == vb2->planes[i].m.fd &&
-		vb->planes[i].length == vb2->planes[i].length) {
-		return true;
-	}
-
-	return false;
-}
-
-bool msm_cvp_comm_compare_vb2_planes(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, struct vb2_buffer *vb2)
-{
-	int i = 0;
-	struct vb2_buffer *vb;
-
-	if (!inst || !mbuf || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK %pK\n",
-			__func__, inst, mbuf, vb2);
-		return false;
-	}
-
-	vb = &mbuf->vvb.vb2_buf;
-
-	if (vb->num_planes != vb2->num_planes)
-		return false;
-
-	for (i = 0; i < vb->num_planes; i++) {
-		if (!msm_cvp_comm_compare_vb2_plane(inst, mbuf, vb2, i))
-			return false;
-	}
-
-	return true;
-}
-
-bool msm_cvp_comm_compare_dma_plane(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, unsigned long *dma_planes, u32 i)
-{
-	if (!inst || !mbuf || !dma_planes) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK %pK\n",
-			__func__, inst, mbuf, dma_planes);
-		return false;
-	}
-
-	if ((unsigned long)mbuf->smem[i].dma_buf == dma_planes[i])
-		return true;
-
-	return false;
-}
-
-bool msm_cvp_comm_compare_dma_planes(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, unsigned long *dma_planes)
-{
-	int i = 0;
-	struct vb2_buffer *vb;
-
-	if (!inst || !mbuf || !dma_planes) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK %pK\n",
-			__func__, inst, mbuf, dma_planes);
-		return false;
-	}
-
-	vb = &mbuf->vvb.vb2_buf;
-	for (i = 0; i < vb->num_planes; i++) {
-		if (!msm_cvp_comm_compare_dma_plane(inst, mbuf, dma_planes, i))
-			return false;
-	}
-
-	return true;
-}
-
-
-bool msm_cvp_comm_compare_device_plane(struct msm_video_buffer *mbuf,
-		u32 type, u32 *planes, u32 i)
-{
-	if (!mbuf || !planes) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK\n",
-			__func__, mbuf, planes);
-		return false;
-	}
-
-	if (mbuf->vvb.vb2_buf.type == type &&
-		mbuf->smem[i].device_addr == planes[i])
-		return true;
-
-	return false;
-}
-
-bool msm_cvp_comm_compare_device_planes(struct msm_video_buffer *mbuf,
-		u32 type, u32 *planes)
-{
-	int i = 0;
-
-	if (!mbuf || !planes)
-		return false;
-
-	for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
-		if (!msm_cvp_comm_compare_device_plane(mbuf, type, planes, i))
-			return false;
-	}
-
-	return true;
-}
-
-struct msm_video_buffer *msm_cvp_comm_get_buffer_using_device_planes(
-		struct msm_cvp_inst *inst, u32 type, u32 *planes)
-{
-	struct msm_video_buffer *mbuf;
-	bool found = false;
-
-	mutex_lock(&inst->registeredbufs.lock);
-	found = false;
-	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-		if (msm_cvp_comm_compare_device_planes(mbuf, type, planes)) {
-			found = true;
-			break;
-		}
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-	if (!found) {
-		dprintk(CVP_ERR,
-			"%s: data_addr %x, extradata_addr %x not found\n",
-			__func__, planes[0], planes[1]);
-		mbuf = NULL;
-	}
-
-	return mbuf;
-}
-
-int msm_cvp_comm_flush_video_buffer(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	struct vb2_buffer *vb;
-	u32 port;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return -EINVAL;
-	}
-
-	vb = msm_cvp_comm_get_vb_using_video_buffer(inst, mbuf);
-	if (!vb) {
-		print_video_buffer(CVP_ERR,
-			"vb not found for buf", inst, mbuf);
-		return -EINVAL;
-	}
-
-	if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-		port = CAPTURE_PORT;
-	else if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-		port = OUTPUT_PORT;
-	else
-		return -EINVAL;
-
-	mutex_lock(&inst->bufq[port].lock);
-	if (inst->bufq[port].vb2_bufq.streaming) {
-		vb->planes[0].bytesused = 0;
-		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-	} else {
-		dprintk(CVP_ERR, "%s: port %d is not streaming\n",
-			__func__, port);
-	}
-	mutex_unlock(&inst->bufq[port].lock);
-
-	return 0;
-}
-
-int msm_cvp_comm_qbuf_cache_operations(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	int rc = 0, i;
-	struct vb2_buffer *vb;
-	bool skip;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return -EINVAL;
-	}
-	vb = &mbuf->vvb.vb2_buf;
-
-	for (i = 0; i < vb->num_planes; i++) {
-		unsigned long offset, size;
-		enum smem_cache_ops cache_op;
-
-		skip = true;
-		if (inst->session_type == MSM_CVP_DECODER) {
-			if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-				if (!i) { /* bitstream */
-					skip = false;
-					offset = vb->planes[i].data_offset;
-					size = vb->planes[i].bytesused;
-					cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
-				}
-			} else if (vb->type ==
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-				if (!i) { /* yuv */
-					skip = false;
-					offset = 0;
-					size = vb->planes[i].length;
-					cache_op = SMEM_CACHE_INVALIDATE;
-				}
-			}
-		} else if (inst->session_type == MSM_CVP_ENCODER) {
-			if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-				if (!i) { /* yuv */
-					skip = false;
-					offset = vb->planes[i].data_offset;
-					size = vb->planes[i].bytesused;
-					cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
-				}
-			} else if (vb->type ==
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-				if (!i) { /* bitstream */
-					skip = false;
-					offset = 0;
-					size = vb->planes[i].length;
-					cache_op = SMEM_CACHE_INVALIDATE;
-				}
-			}
-		}
-
-		if (!skip) {
-			rc = msm_cvp_smem_cache_operations(
-					mbuf->smem[i].dma_buf,
-					cache_op, offset, size);
-			if (rc)
-				print_video_buffer(CVP_ERR,
-					"qbuf cache ops failed", inst, mbuf);
-		}
-	}
-
-	return rc;
-}
-
-int msm_cvp_comm_dqbuf_cache_operations(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	int rc = 0, i;
-	struct vb2_buffer *vb;
-	bool skip;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return -EINVAL;
-	}
-	vb = &mbuf->vvb.vb2_buf;
-
-	for (i = 0; i < vb->num_planes; i++) {
-		unsigned long offset, size;
-		enum smem_cache_ops cache_op;
-
-		skip = true;
-		if (inst->session_type == MSM_CVP_DECODER) {
-			if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-				/* bitstream and extradata */
-				/* we do not need cache operations */
-			} else if (vb->type ==
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-				if (!i) { /* yuv */
-					skip = false;
-					offset = vb->planes[i].data_offset;
-					size = vb->planes[i].bytesused;
-					cache_op = SMEM_CACHE_INVALIDATE;
-				}
-			}
-		} else if (inst->session_type == MSM_CVP_ENCODER) {
-			if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-				/* yuv and extradata */
-				/* we do not need cache operations */
-			} else if (vb->type ==
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-				if (!i) { /* bitstream */
-					skip = false;
-					/*
-					 * Include vp8e header bytes as well
-					 * by making offset equal to zero
-					 */
-					offset = 0;
-					size = vb->planes[i].bytesused +
-						vb->planes[i].data_offset;
-					cache_op = SMEM_CACHE_INVALIDATE;
-				}
-			}
-		}
-
-		if (!skip) {
-			rc = msm_cvp_smem_cache_operations(
-					mbuf->smem[i].dma_buf,
-					cache_op, offset, size);
-			if (rc)
-				print_video_buffer(CVP_ERR,
-					"dqbuf cache ops failed", inst, mbuf);
-		}
-	}
-
-	return rc;
-}
-
-struct msm_video_buffer *msm_cvp_comm_get_video_buffer(
-		struct msm_cvp_inst *inst,
-		struct vb2_buffer *vb2)
-{
-	int rc = 0;
-	struct vb2_v4l2_buffer *vbuf;
-	struct vb2_buffer *vb;
-	unsigned long dma_planes[VB2_MAX_PLANES] = {0};
-	struct msm_video_buffer *mbuf;
-	bool found = false;
-	int i;
-
-	if (!inst || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return NULL;
-	}
-
-	for (i = 0; i < vb2->num_planes; i++) {
-		/*
-		 * always compare dma_buf addresses which is guaranteed
-		 * to be same across the processes (duplicate fds).
-		 */
-		dma_planes[i] = (unsigned long)msm_cvp_smem_get_dma_buf(
-				vb2->planes[i].m.fd);
-		if (!dma_planes[i])
-			return NULL;
-		msm_cvp_smem_put_dma_buf((struct dma_buf *)dma_planes[i]);
-	}
-
-	mutex_lock(&inst->registeredbufs.lock);
-	/*
-	 * for encoder input, client may queue the same buffer with different
-	 * fd before driver returned old buffer to the client. This buffer
-	 * should be treated as new buffer Search the list with fd so that
-	 * it will be treated as new msm_video_buffer.
-	 */
-	if (is_encode_session(inst) && vb2->type ==
-			V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-			if (msm_cvp_comm_compare_vb2_planes(inst, mbuf, vb2)) {
-				found = true;
-				break;
-			}
-		}
-	} else {
-		list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-			if (msm_cvp_comm_compare_dma_planes(inst, mbuf,
-					dma_planes)) {
-				found = true;
-				break;
-			}
-		}
-	}
-
-	if (!found) {
-		/* this is new vb2_buffer */
-		mbuf = kzalloc(sizeof(struct msm_video_buffer), GFP_KERNEL);
-		if (!mbuf) {
-			dprintk(CVP_ERR, "%s: alloc msm_video_buffer failed\n",
-				__func__);
-			rc = -ENOMEM;
-			goto exit;
-		}
-		kref_init(&mbuf->kref);
-	}
-
-	/* Initially assume all the buffer are going to be deferred */
-	mbuf->flags |= MSM_CVP_FLAG_DEFERRED;
-
-	vbuf = to_vb2_v4l2_buffer(vb2);
-	memcpy(&mbuf->vvb, vbuf, sizeof(struct vb2_v4l2_buffer));
-	vb = &mbuf->vvb.vb2_buf;
-
-	for (i = 0; i < vb->num_planes; i++) {
-		mbuf->smem[i].buffer_type =
-			cvp_get_hal_buffer_type(vb->type, i);
-		mbuf->smem[i].fd = vb->planes[i].m.fd;
-		mbuf->smem[i].offset = vb->planes[i].data_offset;
-		mbuf->smem[i].size = vb->planes[i].length;
-		rc = msm_cvp_smem_map_dma_buf(inst, &mbuf->smem[i]);
-		if (rc) {
-			dprintk(CVP_ERR, "%s: map failed.\n", __func__);
-			goto exit;
-		}
-		/* increase refcount as we get both fbd and rbr */
-		rc = msm_cvp_smem_map_dma_buf(inst, &mbuf->smem[i]);
-		if (rc) {
-			dprintk(CVP_ERR, "%s: map failed..\n", __func__);
-			goto exit;
-		}
-	}
-	/* dma cache operations need to be performed after dma_map */
-	msm_cvp_comm_qbuf_cache_operations(inst, mbuf);
-
-	/* add the new buffer to list */
-	if (!found)
-		list_add_tail(&mbuf->list, &inst->registeredbufs.list);
-
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	/*
-	 * Return mbuf if decode batching is enabled as this buffer
-	 * may trigger queuing full batch to firmware, also this buffer
-	 * will not be queued to firmware while full batch queuing,
-	 * it will be queued when rbr event arrived from firmware.
-	 */
-	if (rc == -EEXIST && !inst->batch.enable)
-		return ERR_PTR(rc);
-
-	return mbuf;
-
-exit:
-	dprintk(CVP_ERR, "%s: rc %d\n", __func__, rc);
-	msm_cvp_comm_unmap_video_buffer(inst, mbuf);
-	if (!found)
-		kref_cvp_put_mbuf(mbuf);
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	return ERR_PTR(rc);
-}
-
-void msm_cvp_comm_put_video_buffer(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	struct msm_video_buffer *temp;
-	bool found = false;
-	int i = 0;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return;
-	}
-
-	mutex_lock(&inst->registeredbufs.lock);
-	/* check if mbuf was not removed by any chance */
-	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
-		if (msm_cvp_comm_compare_vb2_planes(inst, mbuf,
-				&temp->vvb.vb2_buf)) {
-			found = true;
-			break;
-		}
-	}
-	if (!found) {
-		print_video_buffer(CVP_ERR, "buf was removed", inst, mbuf);
-		goto unlock;
-	}
-
-	print_video_buffer(CVP_DBG, "dqbuf", inst, mbuf);
-	for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
-		if (msm_cvp_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-			print_video_buffer(CVP_ERR,
-				"dqbuf: unmap failed.", inst, mbuf);
-
-		if (!(mbuf->vvb.flags & V4L2_BUF_FLAG_READONLY)) {
-			/* rbr won't come for this buffer */
-			if (msm_cvp_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-				print_video_buffer(CVP_ERR,
-					"dqbuf: unmap failed..", inst, mbuf);
-		} else {
-			/* RBR event expected */
-			mbuf->flags |= MSM_CVP_FLAG_RBR_PENDING;
-		}
-	}
-	/*
-	 * remove the entry if plane[0].refcount is zero else
-	 * don't remove as client queued same buffer that's why
-	 * plane[0].refcount is not zero
-	 */
-	if (!mbuf->smem[0].refcount) {
-		list_del(&mbuf->list);
-		kref_cvp_put_mbuf(mbuf);
-	}
-unlock:
-	mutex_unlock(&inst->registeredbufs.lock);
-}
-
 int msm_cvp_comm_unmap_video_buffer(struct msm_cvp_inst *inst,
 		struct msm_video_buffer *mbuf)
 {
@@ -4786,167 +1788,6 @@
 	return ret;
 }
 
-void msm_cvp_comm_store_mark_data(struct msm_cvp_list *data_list,
-		u32 index, u32 mark_data, u32 mark_target)
-{
-	struct msm_cvp_buf_data *pdata = NULL;
-	bool found = false;
-
-	if (!data_list) {
-		dprintk(CVP_ERR, "%s: invalid params %pK\n",
-			__func__, data_list);
-		return;
-	}
-
-	mutex_lock(&data_list->lock);
-	list_for_each_entry(pdata, &data_list->list, list) {
-		if (pdata->index == index) {
-			pdata->mark_data = mark_data;
-			pdata->mark_target = mark_target;
-			found = true;
-			break;
-		}
-	}
-
-	if (!found) {
-		pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
-		if (!pdata)  {
-			dprintk(CVP_WARN, "%s: malloc failure.\n", __func__);
-			goto exit;
-		}
-		pdata->index = index;
-		pdata->mark_data = mark_data;
-		pdata->mark_target = mark_target;
-		list_add_tail(&pdata->list, &data_list->list);
-	}
-
-exit:
-	mutex_unlock(&data_list->lock);
-}
-
-void msm_cvp_comm_fetch_mark_data(struct msm_cvp_list *data_list,
-		u32 index, u32 *mark_data, u32 *mark_target)
-{
-	struct msm_cvp_buf_data *pdata = NULL;
-
-	if (!data_list || !mark_data || !mark_target) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK %pK\n",
-			__func__, data_list, mark_data, mark_target);
-		return;
-	}
-
-	*mark_data = *mark_target = 0;
-	mutex_lock(&data_list->lock);
-	list_for_each_entry(pdata, &data_list->list, list) {
-		if (pdata->index == index) {
-			*mark_data = pdata->mark_data;
-			*mark_target = pdata->mark_target;
-			/* clear after fetch */
-			pdata->mark_data = pdata->mark_target = 0;
-			break;
-		}
-	}
-	mutex_unlock(&data_list->lock);
-}
-
-int msm_cvp_comm_release_mark_data(struct msm_cvp_inst *inst)
-{
-	struct msm_cvp_buf_data *pdata, *next;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	mutex_lock(&inst->etb_data.lock);
-	list_for_each_entry_safe(pdata, next, &inst->etb_data.list, list) {
-		list_del(&pdata->list);
-		kfree(pdata);
-	}
-	mutex_unlock(&inst->etb_data.lock);
-
-	mutex_lock(&inst->fbd_data.lock);
-	list_for_each_entry_safe(pdata, next, &inst->fbd_data.list, list) {
-		list_del(&pdata->list);
-		kfree(pdata);
-	}
-	mutex_unlock(&inst->fbd_data.lock);
-
-	return 0;
-}
-
-int msm_cvp_comm_set_color_format_constraints(struct msm_cvp_inst *inst,
-		enum hal_buffer buffer_type,
-		struct msm_cvp_format_constraint *pix_constraint)
-{
-	struct hal_uncompressed_plane_actual_constraints_info
-		*pconstraint = NULL;
-	u32 num_planes = 2;
-	u32 size = 0;
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s - invalid param\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-
-	size = sizeof(buffer_type)
-			+ sizeof(u32)
-			+ num_planes
-			* sizeof(struct hal_uncompressed_plane_constraints);
-
-	pconstraint = kzalloc(size, GFP_KERNEL);
-	if (!pconstraint) {
-		dprintk(CVP_ERR, "No memory cannot alloc constrain\n");
-		rc = -ENOMEM;
-		goto exit;
-	}
-
-	pconstraint->buffer_type = buffer_type;
-	pconstraint->num_planes = pix_constraint->num_planes;
-	//set Y plan constraints
-	dprintk(CVP_INFO, "Set Y plan constraints.\n");
-	pconstraint->rg_plane_format[0].stride_multiples =
-			pix_constraint->y_stride_multiples;
-	pconstraint->rg_plane_format[0].max_stride =
-			pix_constraint->y_max_stride;
-	pconstraint->rg_plane_format[0].min_plane_buffer_height_multiple =
-			pix_constraint->y_min_plane_buffer_height_multiple;
-	pconstraint->rg_plane_format[0].buffer_alignment =
-			pix_constraint->y_buffer_alignment;
-
-	//set UV plan constraints
-	dprintk(CVP_INFO, "Set UV plan constraints.\n");
-	pconstraint->rg_plane_format[1].stride_multiples =
-			pix_constraint->uv_stride_multiples;
-	pconstraint->rg_plane_format[1].max_stride =
-			pix_constraint->uv_max_stride;
-	pconstraint->rg_plane_format[1].min_plane_buffer_height_multiple =
-			pix_constraint->uv_min_plane_buffer_height_multiple;
-	pconstraint->rg_plane_format[1].buffer_alignment =
-			pix_constraint->uv_buffer_alignment;
-
-	rc = call_hfi_op(hdev,
-			session_set_property,
-			inst->session,
-			HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
-			pconstraint);
-	if (rc)
-		dprintk(CVP_ERR,
-			"Failed to set input color format constraint\n");
-	else
-		dprintk(CVP_DBG, "Set color format constraint success\n");
-
-exit:
-	if (!pconstraint)
-		kfree(pconstraint);
-	return rc;
-}
-
 static int set_internal_buf_on_fw(struct msm_cvp_inst *inst,
 				enum hal_buffer buffer_type,
 				struct msm_smem *handle, bool reuse)
@@ -5037,7 +1878,7 @@
 
 
 /* Set ARP buffer for CVP firmware to handle concurrency */
-int cvp_comm_set_persist_buffers(struct msm_cvp_inst *inst)
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst)
 {
 	int rc = 0, idx = 0;
 	struct hal_buffer_requirements *internal_buf = NULL;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.h b/drivers/media/platform/msm/cvp/msm_cvp_common.h
index f1cc762..240e43f 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.h
@@ -8,30 +8,6 @@
 #define _MSM_CVP_COMMON_H_
 #include "msm_cvp_internal.h"
 
-#define MAX_DEC_BATCH_SIZE                     6
-#define MAX_DEC_BATCH_WIDTH                    1920
-#define MAX_DEC_BATCH_HEIGHT                   1088
-#define SKIP_BATCH_WINDOW                      100
-#define MIN_FRAME_QUALITY 0
-#define MAX_FRAME_QUALITY 100
-#define DEFAULT_FRAME_QUALITY 80
-#define FRAME_QUALITY_STEP 1
-#define HEIC_GRID_DIMENSION 512
-#define CBR_MB_LIMIT                           (((1280+15)/16)*((720+15)/16)*30)
-#define CBR_VFR_MB_LIMIT                       (((640+15)/16)*((480+15)/16)*30)
-
-struct vb2_buf_entry {
-	struct list_head list;
-	struct vb2_buffer *vb;
-};
-
-struct getprop_buf {
-	struct list_head list;
-	void *data;
-};
-
-extern const char *const mpeg_video_cvp_extradata[];
-
 enum load_calc_quirks {
 	LOAD_CALC_NO_QUIRKS = 0,
 	LOAD_CALC_IGNORE_TURBO_LOAD = 1 << 0,
@@ -39,26 +15,11 @@
 	LOAD_CALC_IGNORE_NON_REALTIME_LOAD = 1 << 2,
 };
 
-static inline bool is_turbo_session(struct msm_cvp_inst *inst)
-{
-	return !!(inst->flags & CVP_TURBO);
-}
-
 static inline bool is_thumbnail_session(struct msm_cvp_inst *inst)
 {
 	return !!(inst->flags & CVP_THUMBNAIL);
 }
 
-static inline bool is_low_power_session(struct msm_cvp_inst *inst)
-{
-	return !!(inst->flags & CVP_LOW_POWER);
-}
-
-static inline bool is_realtime_session(struct msm_cvp_inst *inst)
-{
-	return !!(inst->flags & CVP_REALTIME);
-}
-
 static inline bool is_decode_session(struct msm_cvp_inst *inst)
 {
 	return inst->session_type == MSM_CVP_DECODER;
@@ -69,28 +30,6 @@
 	return inst->session_type == MSM_CVP_ENCODER;
 }
 
-static inline bool is_primary_output_mode(struct msm_cvp_inst *inst)
-{
-	return inst->stream_output_mode == HAL_VIDEO_DECODER_PRIMARY;
-}
-
-static inline bool is_secondary_output_mode(struct msm_cvp_inst *inst)
-{
-	return inst->stream_output_mode == HAL_VIDEO_DECODER_SECONDARY;
-}
-
-static inline int msm_comm_g_ctrl(struct msm_cvp_inst *inst,
-		struct v4l2_control *ctrl)
-{
-	return v4l2_g_ctrl(&inst->ctrl_handler, ctrl);
-}
-
-static inline int msm_comm_s_ctrl(struct msm_cvp_inst *inst,
-		struct v4l2_control *ctrl)
-{
-	return v4l2_s_ctrl(NULL, &inst->ctrl_handler, ctrl);
-}
-bool cvp_is_batching_allowed(struct msm_cvp_inst *inst);
 enum hal_buffer cvp_get_hal_buffer_type(unsigned int type,
 		unsigned int plane_num);
 void cvp_put_inst(struct msm_cvp_inst *inst);
@@ -99,52 +38,21 @@
 void cvp_change_inst_state(struct msm_cvp_inst *inst,
 		enum instance_state state);
 struct msm_cvp_core *get_cvp_core(int core_id);
-struct msm_cvp_format_constraint *msm_cvp_comm_get_pixel_fmt_constraints(
-	struct msm_cvp_format_constraint fmt[], int size, int fourcc);
-int msm_cvp_comm_set_color_format_constraints(struct msm_cvp_inst *inst,
-		enum hal_buffer buffer_type,
-		struct msm_cvp_format_constraint *pix_constraint);
 struct buf_queue *msm_cvp_comm_get_vb2q(
 		struct msm_cvp_inst *inst, enum v4l2_buf_type type);
 int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state);
-int msm_cvp_comm_try_set_prop(struct msm_cvp_inst *inst,
-	enum hal_property ptype, void *pdata);
-int msm_cvp_comm_try_get_prop(struct msm_cvp_inst *inst,
-	enum hal_property ptype, union hal_get_property *hprop);
 int msm_cvp_comm_set_buffer_count(struct msm_cvp_inst *inst,
 	int host_count, int act_count, enum hal_buffer type);
-int msm_cvp_comm_queue_output_buffers(struct msm_cvp_inst *inst);
-int msm_cvp_comm_qbuf(struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf);
-void msm_cvp_comm_flush_dynamic_buffers(struct msm_cvp_inst *inst);
-int msm_cvp_comm_flush(struct msm_cvp_inst *inst, u32 flags);
-int msm_cvp_comm_release_scratch_buffers(struct msm_cvp_inst *inst,
-					bool check_for_reuse);
-int msm_cvp_comm_release_recon_buffers(struct msm_cvp_inst *inst);
-void msm_cvp_comm_release_eos_buffers(struct msm_cvp_inst *inst);
-int msm_cvp_comm_release_output_buffers(struct msm_cvp_inst *inst,
-	bool force_release);
-void msm_cvp_comm_validate_output_buffers(struct msm_cvp_inst *inst);
 int msm_cvp_comm_force_cleanup(struct msm_cvp_inst *inst);
 int msm_cvp_comm_suspend(int core_id);
-enum hal_extradata_id msm_cvp_comm_get_hal_extradata_index(
-	enum v4l2_mpeg_cvp_extradata index);
-int msm_cvp_comm_reset_bufreqs(struct msm_cvp_inst *inst,
-	enum hal_buffer buf_type);
-int msm_cvp_comm_copy_bufreqs(struct msm_cvp_inst *inst,
-	enum hal_buffer src_type, enum hal_buffer dst_type);
 struct hal_buffer_requirements *get_cvp_buff_req_buffer(
 			struct msm_cvp_inst *inst, u32 buffer_type);
-#define IS_PRIV_CTRL(idx) (\
-		(V4L2_CTRL_ID2WHICH(idx) == V4L2_CTRL_CLASS_MPEG) && \
-		V4L2_CTRL_DRIVER_PRIV(idx))
 void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst);
 int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst);
 void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst);
 void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst);
 enum multi_stream msm_cvp_comm_get_stream_output_mode(
 		struct msm_cvp_inst *inst);
-int msm_cvp_comm_set_stream_output_mode(struct msm_cvp_inst *inst,
-		enum multi_stream mode);
 enum hal_buffer msm_cvp_comm_get_hal_output_buffer(struct msm_cvp_inst *inst);
 int msm_cvp_comm_smem_alloc(struct msm_cvp_inst *inst, size_t size, u32 align,
 		u32 flags, enum hal_buffer buffer_type, int map_kernel,
@@ -159,83 +67,19 @@
 			enum load_calc_quirks quirks);
 int msm_cvp_comm_get_inst_load_per_core(struct msm_cvp_inst *inst,
 			enum load_calc_quirks quirks);
-int msm_cvp_comm_get_load(struct msm_cvp_core *core,
-			enum session_type type, enum load_calc_quirks quirks);
-int msm_cvp_comm_set_color_format(struct msm_cvp_inst *inst,
-		enum hal_buffer buffer_type, int fourcc);
-int msm_comm_g_ctrl(struct msm_cvp_inst *inst, struct v4l2_control *ctrl);
-int msm_comm_s_ctrl(struct msm_cvp_inst *inst, struct v4l2_control *ctrl);
-int msm_cvp_comm_g_ctrl_for_id(struct msm_cvp_inst *inst, int id);
-int msm_cvp_comm_ctrl_init(struct msm_cvp_inst *inst,
-		struct msm_cvp_ctrl *drv_ctrls, u32 num_ctrls,
-		const struct v4l2_ctrl_ops *ctrl_ops);
-int msm_cvp_comm_ctrl_deinit(struct msm_cvp_inst *inst);
-void msm_cvp_comm_cleanup_internal_buffers(struct msm_cvp_inst *inst);
-int msm_cvp_comm_s_parm(struct msm_cvp_inst *inst, struct v4l2_streamparm *a);
-bool msm_cvp_comm_turbo_session(struct msm_cvp_inst *inst);
 void msm_cvp_comm_print_inst_info(struct msm_cvp_inst *inst);
-int msm_cvp_comm_hal_to_v4l2(int id, int value);
-int msm_cvp_comm_get_v4l2_profile(int fourcc, int profile);
-int msm_cvp_comm_get_v4l2_level(int fourcc, int level);
-int msm_cvp_comm_session_continue(void *instance);
-enum hal_uncompressed_format msm_cvp_comm_get_hal_uncompressed(int fourcc);
-u32 cvp_get_frame_size_nv12(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_nv12_512(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_nv12_ubwc(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_rgba(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_nv21(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_p010(int plane, u32 height, u32 width);
-struct vb2_buffer *msm_cvp_comm_get_vb_using_video_buffer(
-		struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf);
-struct msm_video_buffer *msm_cvp_comm_get_buffer_using_device_planes(
-		struct msm_cvp_inst *inst, u32 type, u32 *planes);
 struct msm_video_buffer *msm_cvp_comm_get_video_buffer(
 		struct msm_cvp_inst *inst, struct vb2_buffer *vb2);
-void msm_cvp_comm_put_video_buffer(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
-int msm_cvp_comm_vb2_buffer_done(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
-int msm_cvp_comm_flush_video_buffer(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
 int msm_cvp_comm_unmap_video_buffer(struct msm_cvp_inst *inst,
 		struct msm_video_buffer *mbuf);
-bool msm_cvp_comm_compare_dma_plane(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf,
-		unsigned long *dma_planes, u32 i);
-bool msm_cvp_comm_compare_dma_planes(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, unsigned long *dma_planes);
-bool msm_cvp_comm_compare_vb2_plane(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, struct vb2_buffer *vb2, u32 i);
-bool msm_cvp_comm_compare_vb2_planes(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, struct vb2_buffer *vb2);
-bool msm_cvp_comm_compare_device_plane(struct msm_video_buffer *mbuf,
-		u32 type, u32 *planes, u32 i);
-bool msm_cvp_comm_compare_device_planes(struct msm_video_buffer *mbuf,
-		u32 type, u32 *planes);
-int msm_cvp_comm_qbuf_cache_operations(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
-int msm_cvp_comm_dqbuf_cache_operations(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
 void print_video_buffer(u32 tag, const char *str,
 		struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf);
-void print_cvp_vb2_buffer(u32 tag, const char *str,
-		struct msm_cvp_inst *inst, struct vb2_buffer *vb2);
-void print_cvp_v4l2_buffer(u32 tag, const char *str,
-		struct msm_cvp_inst *inst, struct v4l2_buffer *v4l2);
 void kref_cvp_put_mbuf(struct msm_video_buffer *mbuf);
 bool kref_cvp_get_mbuf(struct msm_cvp_inst *inst,
 	struct msm_video_buffer *mbuf);
-void msm_cvp_comm_store_mark_data(struct msm_cvp_list *data_list,
-		u32 index, u32 mark_data, u32 mark_target);
-void msm_cvp_comm_fetch_mark_data(struct msm_cvp_list *data_list,
-		u32 index, u32 *mark_data, u32 *mark_target);
-int msm_cvp_comm_release_mark_data(struct msm_cvp_inst *inst);
-int msm_cvp_comm_qbuf_decode_batch(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
 int msm_cvp_comm_num_queued_bufs(struct msm_cvp_inst *inst, u32 type);
 int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
 	enum hal_command_response cmd);
-int cvp_comm_set_persist_buffers(struct msm_cvp_inst *inst);
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst);
 int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.c b/drivers/media/platform/msm/cvp/msm_cvp_core.c
index 1453401..167c0d1 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.c
@@ -18,9 +18,6 @@
 
 #define MAX_EVENTS 30
 
-static int try_get_ctrl(struct msm_cvp_inst *inst,
-	struct v4l2_ctrl *ctrl);
-
 static int get_poll_flags(void *instance)
 {
 	struct msm_cvp_inst *inst = instance;
@@ -75,48 +72,6 @@
 }
 EXPORT_SYMBOL(msm_cvp_poll);
 
-int msm_cvp_querycap(void *instance, struct v4l2_capability *cap)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_querycap);
-
-int msm_cvp_enum_fmt(void *instance, struct v4l2_fmtdesc *f)
-{
-	struct msm_cvp_inst *inst = instance;
-
-	if (!inst || !f)
-		return -EINVAL;
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_enum_fmt);
-
-int msm_cvp_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_query_ctrl);
-
-int msm_cvp_s_fmt(void *instance, struct v4l2_format *f)
-{
-	int rc = 0;
-	struct msm_cvp_inst *inst = instance;
-
-	if (!inst || !f)
-		return -EINVAL;
-
-	dprintk(CVP_DBG,
-		"s_fmt: %x : type %d wxh %dx%d pixelfmt %#x num_planes %d size[0] %d size[1] %d in_reconfig %d\n",
-		hash32_ptr(inst->session), f->type,
-		f->fmt.pix_mp.width, f->fmt.pix_mp.height,
-		f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.num_planes,
-		f->fmt.pix_mp.plane_fmt[0].sizeimage,
-		f->fmt.pix_mp.plane_fmt[1].sizeimage, inst->in_reconfig);
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_s_fmt);
-
 int msm_cvp_g_fmt(void *instance, struct v4l2_format *f)
 {
 	struct msm_cvp_inst *inst = instance;
@@ -129,10 +84,6 @@
 			"Invalid input, inst = %pK, format = %pK\n", inst, f);
 		return -EINVAL;
 	}
-	if (inst->in_reconfig) {
-		inst->prop.height[OUTPUT_PORT] = inst->reconfig_height;
-		inst->prop.width[OUTPUT_PORT] = inst->reconfig_width;
-	}
 
 	port = f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
 		OUTPUT_PORT : CAPTURE_PORT;
@@ -174,91 +125,11 @@
 			inst->prop.height[port]);
 	f->fmt.pix_mp.plane_fmt[0].sizeimage = VENUS_BUFFER_SIZE(color_format,
 			inst->prop.width[port], inst->prop.height[port]);
-
-	dprintk(CVP_DBG,
-		"g_fmt: %x : type %d wxh %dx%d pixelfmt %#x num_planes %d size[0] %d size[1] %d in_reconfig %d\n",
-		hash32_ptr(inst->session), f->type,
-		f->fmt.pix_mp.width, f->fmt.pix_mp.height,
-		f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.num_planes,
-		f->fmt.pix_mp.plane_fmt[0].sizeimage,
-		f->fmt.pix_mp.plane_fmt[1].sizeimage, inst->in_reconfig);
 exit:
 	return rc;
 }
 EXPORT_SYMBOL(msm_cvp_g_fmt);
 
-int msm_cvp_s_ctrl(void *instance, struct v4l2_control *control)
-{
-	struct msm_cvp_inst *inst = instance;
-
-	if (!inst || !control)
-		return -EINVAL;
-
-	return msm_comm_s_ctrl(instance, control);
-}
-EXPORT_SYMBOL(msm_cvp_s_ctrl);
-
-int msm_cvp_g_crop(void *instance, struct v4l2_crop *crop)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_g_crop);
-
-int msm_cvp_g_ctrl(void *instance, struct v4l2_control *control)
-{
-	struct msm_cvp_inst *inst = instance;
-	struct v4l2_ctrl *ctrl = NULL;
-	int rc = 0;
-
-	if (!inst || !control)
-		return -EINVAL;
-
-	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, control->id);
-	if (ctrl) {
-		rc = try_get_ctrl(inst, ctrl);
-		if (!rc)
-			control->value = ctrl->val;
-	}
-
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_g_ctrl);
-
-int msm_cvp_g_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
-{
-	struct msm_cvp_inst *inst = instance;
-	struct v4l2_ext_control *ext_control;
-	int i = 0, rc = 0;
-
-	if (!inst || !control)
-		return -EINVAL;
-
-	ext_control = control->controls;
-
-	for (i = 0; i < control->count; i++) {
-		switch (ext_control[i].id) {
-		default:
-			dprintk(CVP_ERR,
-				"This control %x is not supported yet\n",
-					ext_control[i].id);
-			break;
-		}
-	}
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_g_ext_ctrl);
-
-int msm_cvp_s_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
-{
-	struct msm_cvp_inst *inst = instance;
-
-	if (!inst || !control)
-		return -EINVAL;
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_s_ext_ctrl);
-
 int msm_cvp_reqbufs(void *instance, struct v4l2_requestbuffers *b)
 {
 	struct msm_cvp_inst *inst = instance;
@@ -285,19 +156,6 @@
 }
 EXPORT_SYMBOL(msm_cvp_reqbufs);
 
-static bool valid_v4l2_buffer(struct v4l2_buffer *b,
-		struct msm_cvp_inst *inst)
-{
-	enum cvp_ports port =
-		!V4L2_TYPE_IS_MULTIPLANAR(b->type) ? MAX_PORT_NUM :
-		b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? CAPTURE_PORT :
-		b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? OUTPUT_PORT :
-								MAX_PORT_NUM;
-
-	return port != MAX_PORT_NUM &&
-		inst->bufq[port].num_planes == b->length;
-}
-
 int msm_cvp_release_buffer(void *instance, int type, unsigned int index)
 {
 	int rc = 0;
@@ -309,18 +167,6 @@
 		return -EINVAL;
 	}
 
-	if (!inst->in_reconfig &&
-		inst->state > MSM_CVP_LOAD_RESOURCES &&
-		inst->state < MSM_CVP_RELEASE_RESOURCES_DONE) {
-		rc = msm_cvp_comm_try_state(inst,
-			MSM_CVP_RELEASE_RESOURCES_DONE);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s: Failed to move inst: %pK to rel res done\n",
-					__func__, inst);
-		}
-	}
-
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(mbuf, dummy, &inst->registeredbufs.list,
 			list) {
@@ -346,142 +192,6 @@
 }
 EXPORT_SYMBOL(msm_cvp_release_buffer);
 
-int msm_cvp_qbuf(void *instance, struct v4l2_buffer *b)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0, i = 0;
-	struct buf_queue *q = NULL;
-
-	if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst)) {
-		dprintk(CVP_ERR, "%s: invalid params, inst %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < b->length; i++) {
-		b->m.planes[i].m.fd = b->m.planes[i].reserved[0];
-		b->m.planes[i].data_offset = b->m.planes[i].reserved[1];
-	}
-
-	q = msm_cvp_comm_get_vb2q(inst, b->type);
-	if (!q) {
-		dprintk(CVP_ERR,
-			"Failed to find buffer queue for type = %d\n", b->type);
-		return -EINVAL;
-	}
-
-	mutex_lock(&q->lock);
-	rc = vb2_qbuf(&q->vb2_bufq, b);
-	mutex_unlock(&q->lock);
-	if (rc)
-		dprintk(CVP_ERR, "Failed to qbuf, %d\n", rc);
-
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_qbuf);
-
-int msm_cvp_dqbuf(void *instance, struct v4l2_buffer *b)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0, i = 0;
-	struct buf_queue *q = NULL;
-
-	if (!inst || !b || !valid_v4l2_buffer(b, inst)) {
-		dprintk(CVP_ERR, "%s: invalid params, inst %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	q = msm_cvp_comm_get_vb2q(inst, b->type);
-	if (!q) {
-		dprintk(CVP_ERR,
-			"Failed to find buffer queue for type = %d\n", b->type);
-		return -EINVAL;
-	}
-
-	mutex_lock(&q->lock);
-	rc = vb2_dqbuf(&q->vb2_bufq, b, true);
-	mutex_unlock(&q->lock);
-	if (rc == -EAGAIN) {
-		return rc;
-	} else if (rc) {
-		dprintk(CVP_ERR, "Failed to dqbuf, %d\n", rc);
-		return rc;
-	}
-
-	for (i = 0; i < b->length; i++) {
-		b->m.planes[i].reserved[0] = b->m.planes[i].m.fd;
-		b->m.planes[i].reserved[1] = b->m.planes[i].data_offset;
-	}
-
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_dqbuf);
-
-int msm_cvp_streamon(void *instance, enum v4l2_buf_type i)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0;
-	struct buf_queue *q;
-
-	if (!inst)
-		return -EINVAL;
-
-	q = msm_cvp_comm_get_vb2q(inst, i);
-	if (!q) {
-		dprintk(CVP_ERR,
-			"Failed to find buffer queue for type = %d\n", i);
-		return -EINVAL;
-	}
-	dprintk(CVP_DBG, "Calling streamon\n");
-	mutex_lock(&q->lock);
-	rc = vb2_streamon(&q->vb2_bufq, i);
-	mutex_unlock(&q->lock);
-	if (rc) {
-		dprintk(CVP_ERR, "streamon failed on port: %d\n", i);
-		msm_cvp_comm_kill_session(inst);
-	}
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_streamon);
-
-int msm_cvp_streamoff(void *instance, enum v4l2_buf_type i)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0;
-	struct buf_queue *q;
-
-	if (!inst)
-		return -EINVAL;
-
-	q = msm_cvp_comm_get_vb2q(inst, i);
-	if (!q) {
-		dprintk(CVP_ERR,
-			"Failed to find buffer queue for type = %d\n", i);
-		return -EINVAL;
-	}
-
-	if (!inst->in_reconfig) {
-		dprintk(CVP_DBG, "%s: inst %pK release resources\n",
-			__func__, inst);
-		rc = msm_cvp_comm_try_state(inst,
-			MSM_CVP_RELEASE_RESOURCES_DONE);
-		if (rc)
-			dprintk(CVP_ERR,
-				"%s: inst %pK move to rel res done failed\n",
-				__func__, inst);
-	}
-
-	dprintk(CVP_DBG, "Calling streamoff\n");
-	mutex_lock(&q->lock);
-	rc = vb2_streamoff(&q->vb2_bufq, i);
-	mutex_unlock(&q->lock);
-	if (rc)
-		dprintk(CVP_ERR, "streamoff failed on port: %d\n", i);
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_streamoff);
-
 int msm_cvp_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
 {
 	struct msm_cvp_inst *inst = instance;
@@ -661,85 +371,18 @@
 
 static int msm_cvp_start_streaming(struct vb2_queue *q, unsigned int count)
 {
-	dprintk(CVP_ERR, "Invalid input, q = %pK\n", q);
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
 	return -EINVAL;
 }
 
 static void msm_cvp_stop_streaming(struct vb2_queue *q)
 {
-	dprintk(CVP_INFO, "%s: No streaming use case supported\n",
-		__func__);
-}
-
-static int msm_cvp_queue_buf(struct msm_cvp_inst *inst,
-		struct vb2_buffer *vb2)
-{
-	int rc = 0;
-	struct msm_video_buffer *mbuf;
-
-	if (!inst || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	mbuf = msm_cvp_comm_get_video_buffer(inst, vb2);
-	if (IS_ERR_OR_NULL(mbuf)) {
-		/*
-		 * if the buffer has RBR_PENDING flag (-EEXIST) then don't queue
-		 * it now, it will be queued via msm_cvp_comm_qbuf_rbr() as
-		 * part of RBR event processing.
-		 */
-		if (PTR_ERR(mbuf) == -EEXIST)
-			return 0;
-		dprintk(CVP_ERR, "%s: failed to get cvp-buf\n", __func__);
-		return -EINVAL;
-	}
-	if (!kref_cvp_get_mbuf(inst, mbuf)) {
-		dprintk(CVP_ERR, "%s: mbuf not found\n", __func__);
-		return -EINVAL;
-	}
-	rc = msm_cvp_comm_qbuf(inst, mbuf);
-	if (rc)
-		dprintk(CVP_ERR, "%s: failed qbuf\n", __func__);
-	kref_cvp_put_mbuf(mbuf);
-
-	return rc;
-}
-
-static int msm_cvp_queue_buf_batch(struct msm_cvp_inst *inst,
-		struct vb2_buffer *vb2)
-{
-	int rc;
-
-	if (!inst || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	rc = msm_cvp_queue_buf(inst, vb2);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
 }
 
 static void msm_cvp_buf_queue(struct vb2_buffer *vb2)
 {
-	int rc = 0;
-	struct msm_cvp_inst *inst = NULL;
-
-	inst = vb2_get_drv_priv(vb2->vb2_queue);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid inst\n", __func__);
-		return;
-	}
-
-	if (inst->batch.enable)
-		rc = msm_cvp_queue_buf_batch(inst, vb2);
-	else
-		rc = msm_cvp_queue_buf(inst, vb2);
-	if (rc) {
-		print_cvp_vb2_buffer(CVP_ERR, "failed vb2-qbuf", inst, vb2);
-		msm_cvp_comm_generate_session_error(inst);
-	}
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
 }
 
 static const struct vb2_ops msm_cvp_vb2q_ops = {
@@ -816,21 +459,8 @@
 }
 EXPORT_SYMBOL(msm_cvp_unsubscribe_event);
 
-int msm_cvp_dqevent(void *inst, struct v4l2_event *event)
-{
-	int rc = 0;
-	struct msm_cvp_inst *cvp_inst = (struct msm_cvp_inst *)inst;
-
-	if (!inst || !event)
-		return -EINVAL;
-
-	rc = v4l2_event_dequeue(&cvp_inst->event_handler, event, false);
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_dqevent);
-
 int msm_cvp_private(void *cvp_inst, unsigned int cmd,
-		struct msm_cvp_arg *arg)
+		struct cvp_kmd_arg *arg)
 {
 	int rc = 0;
 	struct msm_cvp_inst *inst = (struct msm_cvp_inst *)cvp_inst;
@@ -877,104 +507,38 @@
 	return overload;
 }
 
-static int msm_cvp_try_set_ctrl(void *instance, struct v4l2_ctrl *ctrl)
+static int _init_session_queue(struct msm_cvp_inst *inst)
 {
-	return -EINVAL;
-}
-
-static int msm_cvp_op_s_ctrl(struct v4l2_ctrl *ctrl)
-{
-
-	int rc = 0, c = 0;
-	struct msm_cvp_inst *inst;
-
-	if (!ctrl) {
-		dprintk(CVP_ERR, "%s invalid parameters for ctrl\n", __func__);
-		return -EINVAL;
+	spin_lock_init(&inst->session_queue.lock);
+	INIT_LIST_HEAD(&inst->session_queue.msgs);
+	inst->session_queue.msg_count = 0;
+	init_waitqueue_head(&inst->session_queue.wq);
+	inst->session_queue.msg_cache = KMEM_CACHE(session_msg, 0);
+	if (!inst->session_queue.msg_cache) {
+		dprintk(CVP_ERR, "Failed to allocate msg quque\n");
+		return -ENOMEM;
 	}
-
-	inst = container_of(ctrl->handler,
-		struct msm_cvp_inst, ctrl_handler);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s invalid parameters for inst\n", __func__);
-		return -EINVAL;
-	}
-
-	for (c = 0; c < ctrl->ncontrols; ++c) {
-		if (ctrl->cluster[c]->is_new) {
-			rc = msm_cvp_try_set_ctrl(inst, ctrl->cluster[c]);
-			if (rc) {
-				dprintk(CVP_ERR, "Failed setting %x\n",
-					ctrl->cluster[c]->id);
-				break;
-			}
-		}
-	}
-	if (rc)
-		dprintk(CVP_ERR, "Failed setting control: Inst = %pK (%s)\n",
-				inst, v4l2_ctrl_get_name(ctrl->id));
-	return rc;
-}
-static int try_get_ctrl(struct msm_cvp_inst *inst, struct v4l2_ctrl *ctrl)
-{
-	switch (ctrl->id) {
-	default:
-		/*
-		 * Other controls aren't really volatile, shouldn't need to
-		 * modify ctrl->value
-		 */
-		break;
-	}
-
 	return 0;
 }
 
-static int msm_cvp_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+static void _deinit_session_queue(struct msm_cvp_inst *inst)
 {
-	int rc = 0, c = 0;
-	struct msm_cvp_inst *inst;
-	struct v4l2_ctrl *master;
+	struct session_msg *msg, *tmpmsg;
 
-	if (!ctrl) {
-		dprintk(CVP_ERR, "%s invalid parameters for ctrl\n", __func__);
-		return -EINVAL;
+	/* free all messages */
+	spin_lock(&inst->session_queue.lock);
+	list_for_each_entry_safe(msg, tmpmsg, &inst->session_queue.msgs, node) {
+		list_del_init(&msg->node);
+		kmem_cache_free(inst->session_queue.msg_cache, msg);
 	}
+	inst->session_queue.msg_count = 0;
+	spin_unlock(&inst->session_queue.lock);
 
-	inst = container_of(ctrl->handler,
-		struct msm_cvp_inst, ctrl_handler);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s invalid parameters for inst\n", __func__);
-		return -EINVAL;
-	}
-	master = ctrl->cluster[0];
-	if (!master) {
-		dprintk(CVP_ERR, "%s invalid parameters for master\n",
-			__func__);
-		return -EINVAL;
-	}
+	wake_up_all(&inst->session_queue.wq);
 
-	for (c = 0; c < master->ncontrols; ++c) {
-		if (master->cluster[c]->flags & V4L2_CTRL_FLAG_VOLATILE) {
-			rc = try_get_ctrl(inst, master->cluster[c]);
-			if (rc) {
-				dprintk(CVP_ERR, "Failed getting %x\n",
-					master->cluster[c]->id);
-				return rc;
-			}
-		}
-	}
-	if (rc)
-		dprintk(CVP_ERR, "Failed getting control: Inst = %pK (%s)\n",
-				inst, v4l2_ctrl_get_name(ctrl->id));
-	return rc;
+	kmem_cache_destroy(inst->session_queue.msg_cache);
 }
 
-static const struct v4l2_ctrl_ops msm_cvp_ctrl_ops = {
-
-	.s_ctrl = msm_cvp_op_s_ctrl,
-	.g_volatile_ctrl = msm_cvp_op_g_volatile_ctrl,
-};
-
 void *msm_cvp_open(int core_id, int session_type)
 {
 	struct msm_cvp_inst *inst = NULL;
@@ -1010,19 +574,10 @@
 	mutex_init(&inst->lock);
 	mutex_init(&inst->flush_lock);
 
-	INIT_MSM_CVP_LIST(&inst->scratchbufs);
 	INIT_MSM_CVP_LIST(&inst->freqs);
-	INIT_MSM_CVP_LIST(&inst->input_crs);
 	INIT_MSM_CVP_LIST(&inst->persistbufs);
-	INIT_MSM_CVP_LIST(&inst->pending_getpropq);
-	INIT_MSM_CVP_LIST(&inst->outputbufs);
 	INIT_MSM_CVP_LIST(&inst->registeredbufs);
 	INIT_MSM_CVP_LIST(&inst->cvpbufs);
-	INIT_MSM_CVP_LIST(&inst->reconbufs);
-	INIT_MSM_CVP_LIST(&inst->eosbufs);
-	INIT_MSM_CVP_LIST(&inst->etb_data);
-	INIT_MSM_CVP_LIST(&inst->fbd_data);
-	INIT_MSM_CVP_LIST(&inst->dfs_config);
 
 	kref_init(&inst->kref);
 
@@ -1035,12 +590,6 @@
 	inst->clk_data.sys_cache_bw = 0;
 	inst->clk_data.bitrate = 0;
 	inst->clk_data.core_id = CVP_CORE_ID_DEFAULT;
-	inst->bit_depth = MSM_CVP_BIT_DEPTH_8;
-	inst->pic_struct = MSM_CVP_PIC_STRUCT_PROGRESSIVE;
-	inst->colour_space = MSM_CVP_BT601_6_525;
-	inst->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
-	inst->level = V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
-	inst->entropy_mode = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
 
 	for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
 		i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
@@ -1049,11 +598,6 @@
 
 	if (session_type == MSM_CVP_CORE) {
 		msm_cvp_session_init(inst);
-		rc = msm_cvp_control_init(inst, &msm_cvp_ctrl_ops);
-	}
-	if (rc) {
-		dprintk(CVP_ERR, "Failed control initialization\n");
-		goto fail_bufq_capture;
 	}
 
 	rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
@@ -1077,6 +621,11 @@
 	list_add_tail(&inst->list, &core->instances);
 	mutex_unlock(&core->lock);
 
+
+	rc = _init_session_queue(inst);
+	if (rc)
+		goto fail_init;
+
 	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CORE_INIT_DONE);
 	if (rc) {
 		dprintk(CVP_ERR,
@@ -1104,7 +653,7 @@
 				"Failed to move video instance to open done state\n");
 			goto fail_init;
 		}
-		rc = cvp_comm_set_persist_buffers(inst);
+		rc = cvp_comm_set_arp_buffers(inst);
 		if (rc) {
 			dprintk(CVP_ERR,
 				"Failed to set ARP buffers\n");
@@ -1115,6 +664,7 @@
 
 	return inst;
 fail_init:
+	_deinit_session_queue(inst);
 	mutex_lock(&core->lock);
 	list_del(&inst->list);
 	mutex_unlock(&core->lock);
@@ -1125,25 +675,16 @@
 fail_bufq_output:
 	vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq);
 fail_bufq_capture:
-	msm_cvp_comm_ctrl_deinit(inst);
 	mutex_destroy(&inst->sync_lock);
 	mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
 	mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
 	mutex_destroy(&inst->lock);
 	mutex_destroy(&inst->flush_lock);
 
-	DEINIT_MSM_CVP_LIST(&inst->scratchbufs);
 	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
-	DEINIT_MSM_CVP_LIST(&inst->pending_getpropq);
-	DEINIT_MSM_CVP_LIST(&inst->outputbufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpbufs);
 	DEINIT_MSM_CVP_LIST(&inst->registeredbufs);
-	DEINIT_MSM_CVP_LIST(&inst->eosbufs);
 	DEINIT_MSM_CVP_LIST(&inst->freqs);
-	DEINIT_MSM_CVP_LIST(&inst->input_crs);
-	DEINIT_MSM_CVP_LIST(&inst->etb_data);
-	DEINIT_MSM_CVP_LIST(&inst->fbd_data);
-	DEINIT_MSM_CVP_LIST(&inst->dfs_config);
 
 	kfree(inst);
 	inst = NULL;
@@ -1155,7 +696,6 @@
 static void msm_cvp_cleanup_instance(struct msm_cvp_inst *inst)
 {
 	struct msm_video_buffer *temp, *dummy;
-	struct getprop_buf *temp_prop, *dummy_prop;
 
 	if (!inst) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -1174,46 +714,13 @@
 
 	msm_cvp_comm_free_freq_table(inst);
 
-	msm_cvp_comm_free_input_cr_table(inst);
-
-	if (msm_cvp_comm_release_scratch_buffers(inst, false))
-		dprintk(CVP_ERR,
-			"Failed to release scratch buffers\n");
-
-	if (msm_cvp_comm_release_recon_buffers(inst))
-		dprintk(CVP_ERR,
-			"Failed to release recon buffers\n");
-
 	if (cvp_comm_release_persist_buffers(inst))
 		dprintk(CVP_ERR,
 			"Failed to release persist buffers\n");
 
-	if (msm_cvp_comm_release_mark_data(inst))
-		dprintk(CVP_ERR,
-			"Failed to release mark_data buffers\n");
-
-	msm_cvp_comm_release_eos_buffers(inst);
-
-	if (msm_cvp_comm_release_output_buffers(inst, true))
-		dprintk(CVP_ERR,
-			"Failed to release output buffers\n");
-
+	/* cvp_comm_release_cvp_buffers cvpbufs */
 	if (inst->extradata_handle)
 		msm_cvp_comm_smem_free(inst, inst->extradata_handle);
-
-	mutex_lock(&inst->pending_getpropq.lock);
-	if (!list_empty(&inst->pending_getpropq.list)) {
-		dprintk(CVP_ERR,
-			"pending_getpropq not empty for instance %pK\n",
-			inst);
-		list_for_each_entry_safe(temp_prop, dummy_prop,
-			&inst->pending_getpropq.list, list) {
-			kfree(temp_prop->data);
-			list_del(&temp_prop->list);
-			kfree(temp_prop);
-		}
-	}
-	mutex_unlock(&inst->pending_getpropq.lock);
 }
 
 int msm_cvp_destroy(struct msm_cvp_inst *inst)
@@ -1233,26 +740,16 @@
 	list_del(&inst->list);
 	mutex_unlock(&core->lock);
 
-	msm_cvp_comm_ctrl_deinit(inst);
-
 	v4l2_fh_del(&inst->event_handler);
 	v4l2_fh_exit(&inst->event_handler);
 
 	for (i = 0; i < MAX_PORT_NUM; i++)
 		vb2_queue_release(&inst->bufq[i].vb2_bufq);
 
-	DEINIT_MSM_CVP_LIST(&inst->scratchbufs);
 	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
-	DEINIT_MSM_CVP_LIST(&inst->pending_getpropq);
-	DEINIT_MSM_CVP_LIST(&inst->outputbufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpbufs);
 	DEINIT_MSM_CVP_LIST(&inst->registeredbufs);
-	DEINIT_MSM_CVP_LIST(&inst->eosbufs);
 	DEINIT_MSM_CVP_LIST(&inst->freqs);
-	DEINIT_MSM_CVP_LIST(&inst->input_crs);
-	DEINIT_MSM_CVP_LIST(&inst->etb_data);
-	DEINIT_MSM_CVP_LIST(&inst->fbd_data);
-	DEINIT_MSM_CVP_LIST(&inst->dfs_config);
 
 	mutex_destroy(&inst->sync_lock);
 	mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
@@ -1261,8 +758,9 @@
 	mutex_destroy(&inst->flush_lock);
 
 	msm_cvp_debugfs_deinit_inst(inst);
+	_deinit_session_queue(inst);
 
-	pr_info(CVP_DBG_TAG "Closed video instance: %pK\n",
+	pr_info(CVP_DBG_TAG "Closed cvp instance: %pK\n",
 			"info", inst);
 	kfree(inst);
 	return 0;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.h b/drivers/media/platform/msm/cvp/msm_cvp_core.h
index 97339ed..92290d4 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.h
@@ -97,22 +97,10 @@
 void *msm_cvp_open(int core_id, int session_type);
 int msm_cvp_close(void *instance);
 int msm_cvp_suspend(int core_id);
-int msm_cvp_querycap(void *instance, struct v4l2_capability *cap);
-int msm_cvp_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
-int msm_cvp_s_fmt(void *instance, struct v4l2_format *f);
 int msm_cvp_g_fmt(void *instance, struct v4l2_format *f);
-int msm_cvp_s_ctrl(void *instance, struct v4l2_control *a);
-int msm_cvp_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
-int msm_cvp_g_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
-int msm_cvp_g_ctrl(void *instance, struct v4l2_control *a);
 int msm_cvp_reqbufs(void *instance, struct v4l2_requestbuffers *b);
 int msm_cvp_release_buffer(void *instance, int buffer_type,
 		unsigned int buffer_index);
-int msm_cvp_qbuf(void *instance, struct v4l2_buffer *b);
-int msm_cvp_dqbuf(void *instance, struct v4l2_buffer *b);
-int msm_cvp_streamon(void *instance, enum v4l2_buf_type i);
-int msm_cvp_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl);
-int msm_cvp_streamoff(void *instance, enum v4l2_buf_type i);
 int msm_cvp_comm_cmd(void *instance, union msm_v4l2_cmd *cmd);
 int msm_cvp_poll(void *instance, struct file *filp,
 		struct poll_table_struct *pt);
@@ -120,9 +108,7 @@
 		const struct v4l2_event_subscription *sub);
 int msm_cvp_unsubscribe_event(void *instance,
 		const struct v4l2_event_subscription *sub);
-int msm_cvp_dqevent(void *instance, struct v4l2_event *event);
-int msm_cvp_g_crop(void *instance, struct v4l2_crop *a);
 int msm_cvp_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
 int msm_cvp_private(void *cvp_inst, unsigned int cmd,
-		struct msm_cvp_arg *arg);
+		struct cvp_kmd_arg *arg);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_debug.c b/drivers/media/platform/msm/cvp/msm_cvp_debug.c
index 2ab6d44..8d5d7c5 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_debug.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_debug.c
@@ -8,7 +8,7 @@
 #include "msm_cvp_debug.h"
 #include "cvp_hfi_api.h"
 
-int msm_cvp_debug = CVP_ERR | CVP_WARN | CVP_DBG;
+int msm_cvp_debug = CVP_ERR | CVP_WARN;
 EXPORT_SYMBOL(msm_cvp_debug);
 
 int msm_cvp_debug_out = CVP_OUT_PRINTK;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
index 6eeb481..e1e641c 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
@@ -5,6 +5,7 @@
 #include <linux/module.h>
 #include <linux/rpmsg.h>
 #include <linux/of_platform.h>
+#include <linux/of_fdt.h>
 #include <soc/qcom/secure_buffer.h>
 #include "msm_cvp_dsp.h"
 
@@ -21,39 +22,50 @@
 #define STATUS_DEINIT 1
 #define STATUS_OK 2
 #define STATUS_SSR 3
+#define CVP_DSP_MAX_RESERVED 5
 
-struct cvpd_cmd_msg {
+struct cvp_dsp_cmd_msg {
 	uint32_t cmd_msg_type;
 	int32_t ret_val;
 	uint64_t msg_ptr;
 	uint32_t msg_ptr_len;
 	uint32_t iova_buff_addr;
 	uint32_t buff_index;
-	uint32_t buf_size;
+	uint32_t buff_size;
 	uint32_t session_id;
-	uint32_t context;
+	int32_t ddr_type;
+	uint32_t reserved[CVP_DSP_MAX_RESERVED];
 };
 
-struct cvpd_rsp_msg {
-	uint32_t context;
+struct cvp_dsp_rsp_msg {
+	uint32_t cmd_msg_type;
 	int32_t ret_val;
+	uint32_t reserved[CVP_DSP_MAX_RESERVED];
+};
+
+struct cvp_dsp_rsp_context {
+	struct completion work;
 };
 
 struct cvp_dsp_apps {
 	struct rpmsg_device *chan;
 	struct mutex smd_mutex;
+	struct mutex reg_buffer_mutex;
+	struct mutex dereg_buffer_mutex;
 	int rpmsg_register;
 	uint32_t cdsp_state;
 	uint32_t cvp_shutdown;
+	struct completion reg_buffer_work;
+	struct completion dereg_buffer_work;
+	struct completion shutdown_work;
 };
 
-static struct completion work;
 
 static struct cvp_dsp_apps gfa_cv;
 
-static struct cvpd_cmd_msg cmd_msg;
+static struct cvp_dsp_cmd_msg cmd_msg;
 
-static struct cvpd_rsp_msg cmd_msg_rsp;
+static struct cvp_dsp_rsp_msg cmd_msg_rsp;
 
 static int cvp_dsp_send_cmd(void *msg, uint32_t len)
 {
@@ -82,7 +94,8 @@
 	int destVMperm[SRC_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC };
 
 	if (strcmp(rpdev->dev.parent->of_node->name, "cdsp")) {
-		pr_err("%s: Failed to probe rpmsg device.Node name:%s\n",
+		dprintk(CVP_ERR,
+			"%s: Failed to probe rpmsg device.Node name:%s\n",
 			__func__, rpdev->dev.parent->of_node->name);
 		err = -EINVAL;
 		goto bail;
@@ -100,15 +113,17 @@
 			msg_ptr_len, srcVM, DEST_VM_NUM, destVM,
 			destVMperm, SRC_VM_NUM);
 		if (err) {
-			pr_err("%s: Failed to hyp_assign. err=%d\n",
+			dprintk(CVP_ERR,
+				"%s: Failed to hyp_assign. err=%d\n",
 				__func__, err);
 			return err;
 		}
 		err = cvp_dsp_send_cmd_hfi_queue(
 			(phys_addr_t *)msg_ptr, msg_ptr_len);
 		if (err) {
-			pr_err("%s: Failed to send HFI Queue address. err=%d\n",
-			__func__, err);
+			dprintk(CVP_ERR,
+				"%s: Failed to send HFI Queue address. err=%d\n",
+				__func__, err);
 			goto bail;
 		}
 		mutex_lock(&me->smd_mutex);
@@ -116,7 +131,8 @@
 		mutex_unlock(&me->smd_mutex);
 	}
 
-	pr_info("%s: Successfully probed. cdsp_state=%d cvp_shutdown=%d\n",
+	dprintk(CVP_INFO,
+		"%s: Successfully probed. cdsp_state=%d cvp_shutdown=%d\n",
 		__func__, cdsp_state, cvp_shutdown);
 bail:
 	return err;
@@ -130,17 +146,36 @@
 	me->chan = NULL;
 	me->cdsp_state = STATUS_SSR;
 	mutex_unlock(&me->smd_mutex);
-	pr_info("%s: CDSP SSR triggered\n", __func__);
+	dprintk(CVP_INFO,
+		"%s: CDSP SSR triggered\n", __func__);
 }
 
 static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
 	void *data, int len, void *priv, u32 addr)
 {
-	int *rpmsg_resp = (int *)data;
+	struct cvp_dsp_rsp_msg *dsp_response =
+		(struct cvp_dsp_rsp_msg *)data;
+	struct cvp_dsp_apps *me = &gfa_cv;
 
-	cmd_msg_rsp.ret_val = *rpmsg_resp;
-	complete(&work);
-
+	dprintk(CVP_DBG,
+		"%s: cmd_msg_type=0x%x dsp_response->ret_val =0x%x\n"
+		, __func__, dsp_response->cmd_msg_type, dsp_response->ret_val);
+	switch (dsp_response->cmd_msg_type) {
+	case CVP_DSP_REGISTER_BUFFER:
+		complete(&me->reg_buffer_work);
+		break;
+	case CVP_DSP_DEREGISTER_BUFFER:
+		complete(&me->dereg_buffer_work);
+		break;
+	case CVP_DSP_SHUTDOWN:
+		complete(&me->shutdown_work);
+		break;
+	default:
+		dprintk(CVP_ERR,
+		"%s: Invalid cmd_msg_type received from dsp: %d\n",
+		__func__, dsp_response->cmd_msg_type);
+		break;
+	}
 	return 0;
 }
 
@@ -148,7 +183,7 @@
 	uint32_t size_in_bytes)
 {
 	int err;
-	struct cvpd_cmd_msg local_cmd_msg;
+	struct cvp_dsp_cmd_msg local_cmd_msg;
 	struct cvp_dsp_apps *me = &gfa_cv;
 	int srcVM[SRC_VM_NUM] = {VMID_HLOS};
 	int destVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
@@ -158,27 +193,37 @@
 	local_cmd_msg.cmd_msg_type = CVP_DSP_SEND_HFI_CMD_QUEUE;
 	local_cmd_msg.msg_ptr = (uint64_t)phys_addr;
 	local_cmd_msg.msg_ptr_len = size_in_bytes;
+	local_cmd_msg.ddr_type = of_fdt_get_ddrtype();
+	if (local_cmd_msg.ddr_type < 0) {
+		dprintk(CVP_ERR,
+			"%s: Incorrect DDR type value %d\n",
+			__func__, local_cmd_msg.ddr_type);
+	}
+
 	mutex_lock(&me->smd_mutex);
 	cmd_msg.msg_ptr = (uint64_t)phys_addr;
 	cmd_msg.msg_ptr_len = (size_in_bytes);
 	mutex_unlock(&me->smd_mutex);
 
-	pr_debug("%s :: address of buffer, PA=0x%pK  size_buff=%d\n",
-		__func__, phys_addr, size_in_bytes);
+	dprintk(CVP_DBG,
+		"%s :: address of buffer, PA=0x%pK  size_buff=%d ddr_type=%d\n",
+		__func__, phys_addr, size_in_bytes, local_cmd_msg.ddr_type);
 
 	err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr,
 		local_cmd_msg.msg_ptr_len, srcVM, SRC_VM_NUM, destVM,
 		destVMperm, DEST_VM_NUM);
 	if (err) {
-		pr_err("%s: Failed in hyp_assign. err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: Failed in hyp_assign. err=%d\n",
 			__func__, err);
 		return err;
 	}
 
 	err = cvp_dsp_send_cmd
-			 (&local_cmd_msg, sizeof(struct cvpd_cmd_msg));
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
 	if (err != 0)
-		pr_err("%s: cvp_dsp_send_cmd failed with err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 	else {
 		mutex_lock(&me->smd_mutex);
@@ -193,7 +238,7 @@
 int cvp_dsp_suspend(uint32_t session_flag)
 {
 	int err = 0;
-	struct cvpd_cmd_msg local_cmd_msg;
+	struct cvp_dsp_cmd_msg local_cmd_msg;
 	struct cvp_dsp_apps *me = &gfa_cv;
 	uint32_t cdsp_state;
 
@@ -206,9 +251,10 @@
 
 	local_cmd_msg.cmd_msg_type = CVP_DSP_SUSPEND;
 	err = cvp_dsp_send_cmd
-			 (&local_cmd_msg, sizeof(struct cvpd_cmd_msg));
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
 	if (err != 0)
-		pr_err("%s: cvp_dsp_send_cmd failed with err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 
 	return err;
@@ -217,7 +263,7 @@
 int cvp_dsp_resume(uint32_t session_flag)
 {
 	int err;
-	struct cvpd_cmd_msg local_cmd_msg;
+	struct cvp_dsp_cmd_msg local_cmd_msg;
 	struct cvp_dsp_apps *me = &gfa_cv;
 	uint32_t cdsp_state;
 
@@ -230,9 +276,10 @@
 
 	local_cmd_msg.cmd_msg_type = CVP_DSP_RESUME;
 	err = cvp_dsp_send_cmd
-			 (&local_cmd_msg, sizeof(struct cvpd_cmd_msg));
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
 	if (err != 0)
-		pr_err("%s: cvp_dsp_send_cmd failed with err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 
 	return err;
@@ -242,19 +289,20 @@
 {
 	struct cvp_dsp_apps *me = &gfa_cv;
 	int err, local_cmd_msg_rsp;
-	struct cvpd_cmd_msg local_cmd_msg;
+	struct cvp_dsp_cmd_msg local_cmd_msg;
 	int srcVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
 	int destVM[SRC_VM_NUM] = {VMID_HLOS};
 	int destVMperm[SRC_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC };
 
 	local_cmd_msg.cmd_msg_type = CVP_DSP_SHUTDOWN;
 	err = cvp_dsp_send_cmd
-			 (&local_cmd_msg, sizeof(struct cvpd_cmd_msg));
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
 	if (err != 0)
-		pr_err("%s: cvp_dsp_send_cmd failed with err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 
-	wait_for_completion(&work);
+	wait_for_completion(&me->shutdown_work);
 
 	mutex_lock(&me->smd_mutex);
 	me->cvp_shutdown = STATUS_SSR;
@@ -267,18 +315,106 @@
 			local_cmd_msg.msg_ptr_len, srcVM, DEST_VM_NUM,
 			destVM,	destVMperm, SRC_VM_NUM);
 		if (err) {
-			pr_err("%s: Failed to hyp_assign. err=%d\n",
+			dprintk(CVP_ERR,
+				"%s: Failed to hyp_assign. err=%d\n",
 				__func__, err);
 			return err;
 		}
 	} else {
-		pr_err("%s: Skipping hyp_assign as CDSP sent invalid response=%d\n",
+		dprintk(CVP_ERR,
+			"%s: Skipping hyp_assign as CDSP sent invalid response=%d\n",
 			__func__, local_cmd_msg_rsp);
 	}
 
 	return err;
 }
 
+int cvp_dsp_register_buffer(uint32_t iova_buff_addr,
+	uint32_t buff_index, uint32_t buff_size,
+	uint32_t session_id)
+{
+	struct cvp_dsp_cmd_msg local_cmd_msg;
+	int err;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	local_cmd_msg.cmd_msg_type = CVP_DSP_REGISTER_BUFFER;
+	local_cmd_msg.iova_buff_addr = iova_buff_addr;
+	local_cmd_msg.buff_index = buff_index;
+	local_cmd_msg.buff_size = buff_size;
+	local_cmd_msg.session_id = session_id;
+	dprintk(CVP_DBG,
+		"%s: cmd_msg_type=0x%x, iova_buff_addr=0x%x buff_index=0x%x\n",
+		__func__, local_cmd_msg.cmd_msg_type, iova_buff_addr,
+		local_cmd_msg.buff_index);
+	dprintk(CVP_DBG,
+		"%s: buff_size=0x%x session_id=0x%x\n",
+		__func__, local_cmd_msg.buff_size, local_cmd_msg.session_id);
+
+	mutex_lock(&me->reg_buffer_mutex);
+	err = cvp_dsp_send_cmd
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
+	if (err != 0) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
+			__func__, err);
+		mutex_unlock(&me->reg_buffer_mutex);
+		return err;
+	}
+
+	dprintk(CVP_DBG,
+		"%s: calling wait_for_completion work=%pK\n",
+		__func__, &me->reg_buffer_work);
+	wait_for_completion(&me->reg_buffer_work);
+	mutex_unlock(&me->reg_buffer_mutex);
+	dprintk(CVP_DBG,
+			"%s: done calling wait_for_completion\n", __func__);
+
+	return err;
+}
+
+int cvp_dsp_deregister_buffer(uint32_t iova_buff_addr,
+	uint32_t buff_index, uint32_t buff_size,
+	uint32_t session_id)
+{
+	struct cvp_dsp_cmd_msg local_cmd_msg;
+	int err;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	local_cmd_msg.cmd_msg_type = CVP_DSP_DEREGISTER_BUFFER;
+	local_cmd_msg.iova_buff_addr = iova_buff_addr;
+	local_cmd_msg.buff_index = buff_index;
+	local_cmd_msg.buff_size = buff_size;
+	local_cmd_msg.session_id = session_id;
+	dprintk(CVP_DBG,
+		"%s: cmd_msg_type=0x%x, iova_buff_addr=0x%x buff_index=0x%x\n",
+		__func__, local_cmd_msg.cmd_msg_type, iova_buff_addr,
+		local_cmd_msg.buff_index);
+	dprintk(CVP_DBG,
+			"%s: buff_size=0x%x session_id=0x%x\n",
+		__func__, local_cmd_msg.buff_size, local_cmd_msg.session_id);
+
+	mutex_lock(&me->dereg_buffer_mutex);
+	err = cvp_dsp_send_cmd
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
+	if (err != 0) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
+			__func__, err);
+		mutex_unlock(&me->dereg_buffer_mutex);
+		return err;
+	}
+
+	dprintk(CVP_DBG,
+			"%s: calling wait_for_completion work=%pK\n",
+			__func__, &me->dereg_buffer_work);
+	wait_for_completion(&me->dereg_buffer_work);
+	dprintk(CVP_DBG,
+			"%s: done calling wait_for_completion\n", __func__);
+	mutex_unlock(&me->dereg_buffer_mutex);
+
+	return err;
+}
+
 static const struct rpmsg_device_id cvp_dsp_rpmsg_match[] = {
 	{ CVP_APPS_DSP_GLINK_GUID },
 	{ },
@@ -299,13 +435,18 @@
 	struct cvp_dsp_apps *me = &gfa_cv;
 	int err;
 
-	init_completion(&work);
 	mutex_init(&me->smd_mutex);
+	mutex_init(&me->reg_buffer_mutex);
+	mutex_init(&me->dereg_buffer_mutex);
+	init_completion(&me->shutdown_work);
+	init_completion(&me->reg_buffer_work);
+	init_completion(&me->dereg_buffer_work);
 	me->cvp_shutdown = STATUS_INIT;
 	me->cdsp_state = STATUS_INIT;
 	err = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
 	if (err) {
-		pr_err("%s : register_rpmsg_driver failed with err %d\n",
+		dprintk(CVP_ERR,
+			"%s : register_rpmsg_driver failed with err %d\n",
 			__func__, err);
 		goto register_bail;
 	}
@@ -325,6 +466,8 @@
 	me->cvp_shutdown = STATUS_DEINIT;
 	me->cdsp_state = STATUS_DEINIT;
 	mutex_destroy(&me->smd_mutex);
+	mutex_destroy(&me->reg_buffer_mutex);
+	mutex_destroy(&me->dereg_buffer_mutex);
 	if (me->rpmsg_register == 1)
 		unregister_rpmsg_driver(&cvp_dsp_rpmsg_client);
 }
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
index 6d7a3fc..d200942 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
@@ -7,6 +7,7 @@
 #define MSM_CVP_DSP_H
 
 #include <linux/types.h>
+#include "msm_cvp_debug.h"
 
 #define CVP_APPS_DSP_GLINK_GUID "cvp-glink-apps-dsp"
 #define CVP_APPS_DSP_SMD_GUID "cvp-smd-apps-dsp"
@@ -51,5 +52,29 @@
  */
 int cvp_dsp_shutdown(uint32_t session_flag);
 
+/*
+ * API to register iova buffer address with CDSP
+ *
+ * @iova_buff_addr: IOVA buffer address
+ * @buff_index:     buffer index
+ * @buff_size:      size in bytes of cvp buffer
+ * @session_id:     cvp session id
+ */
+int cvp_dsp_register_buffer(uint32_t iova_buff_addr,
+	uint32_t buff_index, uint32_t buff_size,
+	uint32_t session_id);
+
+/*
+ * API to de-register iova buffer address from CDSP
+ *
+ * @iova_buff_addr: IOVA buffer address
+ * @buff_index:     buffer index
+ * @buff_size:      size in bytes of cvp buffer
+ * @session_id:     cvp session id
+ */
+int cvp_dsp_deregister_buffer(uint32_t iova_buff_addr,
+	uint32_t buff_index, uint32_t buff_size,
+	uint32_t session_id);
+
 #endif // MSM_CVP_DSP_H
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_internal.h b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
index c3deeba..28b31fc 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_internal.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
@@ -146,24 +146,6 @@
 	bool turbo;
 };
 
-struct cvp_input_cr_data {
-	struct list_head list;
-	u32 index;
-	u32 input_cr;
-};
-
-struct recon_buf {
-	struct list_head list;
-	u32 buffer_index;
-	u32 CR;
-	u32 CF;
-};
-
-struct eos_buf {
-	struct list_head list;
-	struct msm_smem smem;
-};
-
 struct internal_buf {
 	struct list_head list;
 	enum hal_buffer buffer_type;
@@ -172,55 +154,39 @@
 	bool mark_remove;
 };
 
-struct msm_cvp_csc_coeff {
-	u32 *vpe_csc_custom_matrix_coeff;
-	u32 *vpe_csc_custom_bias_coeff;
-	u32 *vpe_csc_custom_limit_coeff;
-};
-
-struct msm_cvp_buf_data {
-	struct list_head list;
-	u32 index;
-	u32 mark_data;
-	u32 mark_target;
-};
-
 struct msm_cvp_common_data {
 	char key[128];
 	int value;
 };
 
-struct msm_cvp_codec_data {
-	u32 fourcc;
-	enum session_type session_type;
-	int vpp_cycles;
-	int vsp_cycles;
-	int low_power_cycles;
-};
-
-enum efuse_purpose {
-	SKU_VERSION = 0,
-};
-
 enum sku_version {
 	SKU_VERSION_0 = 0,
 	SKU_VERSION_1,
 	SKU_VERSION_2,
 };
 
-struct msm_cvp_efuse_data {
-	u32 start_address;
-	u32 size;
-	u32 mask;
-	u32 shift;
-	enum efuse_purpose purpose;
-};
-
 enum vpu_version {
 	VPU_VERSION_4 = 1,
 	VPU_VERSION_5,
 };
 
+struct msm_cvp_ubwc_config_data {
+	struct {
+		u32 max_channel_override : 1;
+		u32 mal_length_override : 1;
+		u32 hb_override : 1;
+		u32 bank_swzl_level_override : 1;
+		u32 bank_spreading_override : 1;
+		u32 reserved : 27;
+	} override_bit_info;
+
+	u32 max_channels;
+	u32 mal_length;
+	u32 highest_bank_bit;
+	u32 bank_swzl_level;
+	u32 bank_spreading;
+};
+
 #define IS_VPU_4(ver) \
 	(ver == VPU_VERSION_4)
 
@@ -230,15 +196,11 @@
 struct msm_cvp_platform_data {
 	struct msm_cvp_common_data *common_data;
 	unsigned int common_data_length;
-	struct msm_cvp_codec_data *codec_data;
-	unsigned int codec_data_length;
-	struct msm_cvp_csc_coeff csc_data;
-	struct msm_cvp_efuse_data *efuse_data;
-	unsigned int efuse_data_length;
 	unsigned int sku_version;
 	phys_addr_t gcc_register_base;
 	uint32_t gcc_register_size;
 	uint32_t vpu_ver;
+	struct msm_cvp_ubwc_config_data *ubwc_config;
 };
 
 struct msm_cvp_format {
@@ -252,19 +214,6 @@
 	u32 output_min_count;
 };
 
-struct msm_cvp_format_constraint {
-	u32 fourcc;
-	u32 num_planes;
-	u32 y_stride_multiples;
-	u32 y_max_stride;
-	u32 y_min_plane_buffer_height_multiple;
-	u32 y_buffer_alignment;
-	u32 uv_stride_multiples;
-	u32 uv_max_stride;
-	u32 uv_min_plane_buffer_height_multiple;
-	u32 uv_buffer_alignment;
-};
-
 struct msm_cvp_drv {
 	struct mutex lock;
 	struct list_head cores;
@@ -385,6 +334,22 @@
 	int (*decide_work_mode)(struct msm_cvp_inst *inst);
 };
 
+#define MAX_NUM_MSGS_PER_SESSION	128
+#define CVP_MAX_WAIT_TIME	2000
+
+struct session_msg {
+	struct list_head node;
+	struct hfi_msg_session_hdr pkt;
+};
+
+struct cvp_session_queue {
+	spinlock_t lock;
+	unsigned int msg_count;
+	struct list_head msgs;
+	wait_queue_head_t wq;
+	struct kmem_cache *msg_cache;
+};
+
 struct msm_cvp_core {
 	struct list_head list;
 	struct mutex lock;
@@ -418,33 +383,21 @@
 	struct mutex sync_lock, lock, flush_lock;
 	struct msm_cvp_core *core;
 	enum session_type session_type;
+	struct cvp_session_queue session_queue;
 	void *session;
 	struct session_prop prop;
 	enum instance_state state;
 	struct msm_cvp_format fmts[MAX_PORT_NUM];
 	struct buf_queue bufq[MAX_PORT_NUM];
 	struct msm_cvp_list freqs;
-	struct msm_cvp_list input_crs;
-	struct msm_cvp_list scratchbufs;
 	struct msm_cvp_list persistbufs;
-	struct msm_cvp_list pending_getpropq;
-	struct msm_cvp_list outputbufs;
-	struct msm_cvp_list reconbufs;
-	struct msm_cvp_list eosbufs;
 	struct msm_cvp_list registeredbufs;
 	struct msm_cvp_list cvpbufs;
-	struct msm_cvp_list etb_data;
-	struct msm_cvp_list fbd_data;
-	struct msm_cvp_list dfs_config;
 	struct buffer_requirements buff_req;
 	struct v4l2_ctrl_handler ctrl_handler;
 	struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
-	struct v4l2_ctrl **cluster;
 	struct v4l2_fh event_handler;
 	struct msm_smem *extradata_handle;
-	bool in_reconfig;
-	u32 reconfig_width;
-	u32 reconfig_height;
 	struct dentry *debugfs_root;
 	void *priv;
 	struct msm_cvp_debug debug;
@@ -456,48 +409,17 @@
 	enum buffer_mode_type buffer_mode_set[MAX_PORT_NUM];
 	enum multi_stream stream_output_mode;
 	struct v4l2_ctrl **ctrls;
-	int bit_depth;
 	struct kref kref;
-	bool in_flush;
-	u32 pic_struct;
-	u32 colour_space;
-	u32 profile;
-	u32 level;
-	u32 entropy_mode;
-	u32 grid_enable;
-	u32 frame_quality;
 	struct msm_cvp_codec_data *codec_data;
-	struct hal_hdr10_pq_sei hdr10_sei_params;
 	struct batch_mode batch;
 };
 
 extern struct msm_cvp_drv *cvp_driver;
 
-struct msm_cvp_ctrl_cluster {
-	struct v4l2_ctrl **cluster;
-	struct list_head list;
-};
-
-struct msm_cvp_ctrl {
-	u32 id;
-	char name[MAX_NAME_LENGTH];
-	enum v4l2_ctrl_type type;
-	s64 minimum;
-	s64 maximum;
-	s64 default_value;
-	u32 step;
-	u32 menu_skip_mask;
-	u32 flags;
-	const char * const *qmenu;
-};
-
 void cvp_handle_cmd_response(enum hal_command_response cmd, void *data);
 int msm_cvp_trigger_ssr(struct msm_cvp_core *core,
 	enum hal_ssr_trigger_type type);
 int msm_cvp_noc_error_info(struct msm_cvp_core *core);
-bool heic_encode_session_supported(struct msm_cvp_inst *inst);
-int msm_cvp_check_session_supported(struct msm_cvp_inst *inst);
-int msm_cvp_check_scaling_supported(struct msm_cvp_inst *inst);
 void msm_cvp_queue_v4l2_event(struct msm_cvp_inst *inst, int event_type);
 
 enum msm_cvp_flags {
@@ -517,19 +439,7 @@
 struct msm_cvp_internal_buffer {
 	struct list_head list;
 	struct msm_smem smem;
-	struct msm_cvp_buffer buf;
-};
-
-struct msm_cvp_internal_send_cmd {
-	struct list_head list;
-	struct msm_smem smem;
-	struct msm_cvp_send_cmd send_cmd;
-};
-
-struct msm_cvp_internal_dfsconfig {
-	struct list_head list;
-	struct msm_smem smem;
-	struct msm_cvp_dfsconfig dfsconfig;
+	struct cvp_kmd_buffer buf;
 };
 
 void msm_cvp_comm_handle_thermal_event(void);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_platform.c b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
index dfc2855..cea328c 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_platform.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
@@ -15,71 +15,30 @@
 #include <linux/types.h>
 #include <linux/version.h>
 #include <linux/io.h>
+#include <linux/of_fdt.h>
 #include "msm_cvp_internal.h"
 #include "msm_cvp_debug.h"
 
+#define DDR_TYPE_LPDDR4 0x6
+#define DDR_TYPE_LPDDR4X 0x7
+#define DDR_TYPE_LPDDR4Y 0x8
+#define DDR_TYPE_LPDDR5 0x9
 
-#define CODEC_ENTRY(n, p, vsp, vpp, lp) \
+#define UBWC_CONFIG(mco, mlo, hbo, bslo, bso, rs, mc, ml, hbb, bsl, bsp) \
 {	\
-	.fourcc = n,		\
-	.session_type = p,	\
-	.vsp_cycles = vsp,	\
-	.vpp_cycles = vpp,	\
-	.low_power_cycles = lp	\
+	.override_bit_info.max_channel_override = mco,	\
+	.override_bit_info.mal_length_override = mlo,	\
+	.override_bit_info.hb_override = hbo,	\
+	.override_bit_info.bank_swzl_level_override = bslo,	\
+	.override_bit_info.bank_spreading_override = bso,	\
+	.override_bit_info.reserved = rs,	\
+	.max_channels = mc,	\
+	.mal_length = ml,	\
+	.highest_bank_bit = hbb,	\
+	.bank_swzl_level = bsl,	\
+	.bank_spreading = bsp,	\
 }
 
-#define EFUSE_ENTRY(sa, s, m, sh, p) \
-{	\
-	.start_address = sa,		\
-	.size = s,	\
-	.mask = m,	\
-	.shift = sh,	\
-	.purpose = p	\
-}
-
-/*FIXME: hard coded AXI_REG_START_ADDR???*/
-#define GCC_VIDEO_AXI_REG_START_ADDR	0x10B024
-#define GCC_VIDEO_AXI_REG_SIZE		0xC
-
-static struct msm_cvp_codec_data default_codec_data[] =  {
-	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_CVP_ENCODER, 125, 675, 320),
-	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_CVP_DECODER, 125, 675, 320),
-};
-
-/* Update with 855 data */
-static struct msm_cvp_codec_data sm8150_codec_data[] =  {
-	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_CVP_ENCODER, 10, 675, 320),
-	CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_CVP_ENCODER, 10, 675, 320),
-	CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_CVP_ENCODER, 10, 675, 320),
-	CODEC_ENTRY(V4L2_PIX_FMT_TME, MSM_CVP_ENCODER, 0, 540, 540),
-	CODEC_ENTRY(V4L2_PIX_FMT_MPEG2, MSM_CVP_DECODER, 10, 200, 200),
-	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_CVP_DECODER, 10, 200, 200),
-	CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_CVP_DECODER, 10, 200, 200),
-	CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_CVP_DECODER, 10, 200, 200),
-	CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_CVP_DECODER, 10, 200, 200),
-};
-
-
-/*
- * Custom conversion coefficients for resolution: 176x144 negative
- * coeffs are converted to s4.9 format
- * (e.g. -22 converted to ((1 << 13) - 22)
- * 3x3 transformation matrix coefficients in s4.9 fixed point format
- */
-static u32 vpe_csc_custom_matrix_coeff[HAL_MAX_MATRIX_COEFFS] = {
-	470, 8170, 8148, 0, 490, 50, 0, 34, 483
-};
-
-/* offset coefficients in s9 fixed point format */
-static u32 vpe_csc_custom_bias_coeff[HAL_MAX_BIAS_COEFFS] = {
-	34, 0, 4
-};
-
-/* clamping value for Y/U/V([min,max] for Y/U/V) */
-static u32 vpe_csc_custom_limit_coeff[HAL_MAX_LIMIT_COEFFS] = {
-	16, 235, 16, 240, 16, 240
-};
-
 static struct msm_cvp_common_data default_common_data[] = {
 	{
 		.key = "qcom,never-unload-fw",
@@ -94,7 +53,7 @@
 	},
 	{
 		.key = "qcom,sw-power-collapse",
-		.value = 1,
+		.value = 0,
 	},
 	{
 		.key = "qcom,domain-attr-non-fatal-faults",
@@ -170,35 +129,28 @@
 	},
 };
 
+/* Default UBWC config for LPDDR5 */
+static struct msm_cvp_ubwc_config_data kona_ubwc_data[] = {
+	UBWC_CONFIG(1, 1, 1, 0, 0, 0, 8, 32, 16, 0, 0),
+};
+
 
 static struct msm_cvp_platform_data default_data = {
-	.codec_data = default_codec_data,
-	.codec_data_length =  ARRAY_SIZE(default_codec_data),
 	.common_data = default_common_data,
 	.common_data_length =  ARRAY_SIZE(default_common_data),
-	.csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff,
-	.csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff,
-	.csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff,
-	.efuse_data = NULL,
-	.efuse_data_length = 0,
 	.sku_version = 0,
 	.gcc_register_base = 0,
 	.gcc_register_size = 0,
 	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = 0x0,
 };
 
 static struct msm_cvp_platform_data sm8250_data = {
-	.codec_data = sm8150_codec_data,
-	.codec_data_length =  ARRAY_SIZE(sm8150_codec_data),
 	.common_data = sm8250_common_data,
 	.common_data_length =  ARRAY_SIZE(sm8250_common_data),
-	.csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff,
-	.csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff,
-	.csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff,
-	.efuse_data = NULL,
-	.efuse_data_length = 0,
 	.sku_version = 0,
 	.vpu_ver = VPU_VERSION_5,
+	.ubwc_config = kona_ubwc_data,
 };
 
 static const struct of_device_id msm_cvp_dt_match[] = {
@@ -213,19 +165,33 @@
 
 void *cvp_get_drv_data(struct device *dev)
 {
-	struct msm_cvp_platform_data *driver_data = NULL;
+	struct msm_cvp_platform_data *driver_data;
 	const struct of_device_id *match;
+	uint32_t ddr_type = DDR_TYPE_LPDDR5;
 
-	if (!IS_ENABLED(CONFIG_OF) || !dev->of_node) {
-		driver_data = &default_data;
+	driver_data = &default_data;
+
+	if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
 		goto exit;
-	}
 
 	match = of_match_node(msm_cvp_dt_match, dev->of_node);
 
 	if (match)
 		driver_data = (struct msm_cvp_platform_data *)match->data;
 
+	if (!strcmp(match->compatible, "qcom,kona-cvp")) {
+		ddr_type = of_fdt_get_ddrtype();
+		if (ddr_type == -ENOENT) {
+			dprintk(CVP_ERR,
+				"Failed to get ddr type, use LPDDR5\n");
+		}
+
+		if (driver_data->ubwc_config &&
+			(ddr_type == DDR_TYPE_LPDDR4 ||
+			ddr_type == DDR_TYPE_LPDDR4X ||
+			ddr_type == DDR_TYPE_LPDDR4Y))
+			driver_data->ubwc_config->highest_bank_bit = 15;
+	}
 exit:
 	return driver_data;
 }
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
index ca47d00..4387061a 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
@@ -19,7 +19,6 @@
 };
 
 #define PERF_GOV "performance"
-#define DEFAULT_CVP_CLK_SVS2
 
 static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
 {
@@ -329,14 +328,8 @@
 /* A comparator to compare loads (needed later on) */
 static int cmp(const void *a, const void *b)
 {
-#ifdef DEFAULT_CVP_CLK_SVS2
 	return ((struct allowed_clock_rates_table *)a)->clock_rate -
 		((struct allowed_clock_rates_table *)b)->clock_rate;
-#else
-	/* want to sort in reverse so flip the comparison */
-	return ((struct allowed_clock_rates_table *)b)->clock_rate -
-		((struct allowed_clock_rates_table *)a)->clock_rate;
-#endif
 }
 
 static int msm_cvp_load_allowed_clocks_table(
@@ -708,9 +701,6 @@
 	platform_data = core->platform_data;
 	res = &core->resources;
 
-	res->codec_data_count = platform_data->codec_data_length;
-	res->codec_data = platform_data->codec_data;
-
 	res->sku_version = platform_data->sku_version;
 
 	res->fw_name = "cvpss";
@@ -766,12 +756,11 @@
 	res->bus_devfreq_on = find_key_value(platform_data,
 			"qcom,use-devfreq-scale-bus");
 
-	res->csc_coeff_data = &platform_data->csc_data;
-
 	res->gcc_register_base = platform_data->gcc_register_base;
 	res->gcc_register_size = platform_data->gcc_register_size;
 
 	res->vpu_ver = platform_data->vpu_ver;
+	res->ubwc_config = platform_data->ubwc_config;
 	return rc;
 
 }
@@ -907,8 +896,17 @@
 		goto remove_cb;
 	}
 
-	if (cb->is_secure)
+	if (cb->is_secure) {
 		secure_vmid = get_secure_vmid(cb);
+		rc = iommu_domain_set_attr(cb->mapping->domain,
+			DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s - Couldn't arm_iommu_set_attr vmid\n",
+				__func__);
+			goto release_mapping;
+		}
+	}
 
 	if (res->cache_pagetables) {
 		int cache_pagetables = 1;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_resources.h b/drivers/media/platform/msm/cvp/msm_cvp_resources.h
index 4b4e149..14f1eda 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_resources.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_resources.h
@@ -186,13 +186,11 @@
 	bool cache_pagetables;
 	bool decode_batching;
 	bool dcvs;
-	struct msm_cvp_codec_data *codec_data;
-	int codec_data_count;
-	struct msm_cvp_csc_coeff *csc_coeff_data;
 	struct msm_cvp_mem_cdsp mem_cdsp;
 	uint32_t vpu_ver;
 	uint32_t fw_cycles;
 	uint32_t bus_devfreq_on;
+	struct msm_cvp_ubwc_config_data *ubwc_config;
 };
 
 static inline bool is_iommu_present(struct msm_cvp_platform_resources *res)
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
index 79a84aa..3abea69 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
@@ -72,31 +72,25 @@
 	rc = msm_cvp_close(cvp_inst);
 	filp->private_data = NULL;
 	trace_msm_v4l2_cvp_close_end("msm v4l2_close end");
-	return rc;
+	return 0;
 }
 
 static int msm_cvp_v4l2_querycap(struct file *filp, void *fh,
 			struct v4l2_capability *cap)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(filp, fh);
-
-	return msm_cvp_querycap((void *)cvp_inst, cap);
+	return -EINVAL;
 }
 
 int msm_cvp_v4l2_enum_fmt(struct file *file, void *fh,
 					struct v4l2_fmtdesc *f)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_enum_fmt((void *)cvp_inst, f);
+	return -EINVAL;
 }
 
 int msm_cvp_v4l2_s_fmt(struct file *file, void *fh,
 					struct v4l2_format *f)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_s_fmt((void *)cvp_inst, f);
+	return 0;
 }
 
 int msm_cvp_v4l2_g_fmt(struct file *file, void *fh,
@@ -112,7 +106,7 @@
 {
 	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
 
-	return msm_cvp_s_ctrl((void *)cvp_inst, a);
+	return v4l2_s_ctrl(NULL, &cvp_inst->ctrl_handler, a);
 }
 
 int msm_cvp_v4l2_g_ctrl(struct file *file, void *fh,
@@ -120,23 +114,19 @@
 {
 	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
 
-	return msm_cvp_g_ctrl((void *)cvp_inst, a);
+	return v4l2_g_ctrl(&cvp_inst->ctrl_handler, a);
 }
 
 int msm_cvp_v4l2_s_ext_ctrl(struct file *file, void *fh,
 					struct v4l2_ext_controls *a)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_s_ext_ctrl((void *)cvp_inst, a);
+	return -EINVAL;
 }
 
 int msm_cvp_v4l2_g_ext_ctrl(struct file *file, void *fh,
 					struct v4l2_ext_controls *a)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_g_ext_ctrl((void *)cvp_inst, a);
+	return 0;
 }
 
 int msm_cvp_v4l2_reqbufs(struct file *file, void *fh,
@@ -150,29 +140,25 @@
 int msm_cvp_v4l2_qbuf(struct file *file, void *fh,
 				struct v4l2_buffer *b)
 {
-	return msm_cvp_qbuf(get_cvp_inst(file, fh), b);
+	return 0;
 }
 
 int msm_cvp_v4l2_dqbuf(struct file *file, void *fh,
 				struct v4l2_buffer *b)
 {
-	return msm_cvp_dqbuf(get_cvp_inst(file, fh), b);
+	return 0;
 }
 
 int msm_cvp_v4l2_streamon(struct file *file, void *fh,
 				enum v4l2_buf_type i)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_streamon((void *)cvp_inst, i);
+	return 0;
 }
 
 int msm_cvp_v4l2_streamoff(struct file *file, void *fh,
 				enum v4l2_buf_type i)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_streamoff((void *)cvp_inst, i);
+	return 0;
 }
 
 static int msm_cvp_v4l2_subscribe_event(struct v4l2_fh *fh,
@@ -196,24 +182,18 @@
 static int msm_cvp_v4l2_decoder_cmd(struct file *file, void *fh,
 				struct v4l2_decoder_cmd *dec)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_comm_cmd((void *)cvp_inst, (union msm_v4l2_cmd *)dec);
+	return 0;
 }
 
 static int msm_cvp_v4l2_encoder_cmd(struct file *file, void *fh,
 				struct v4l2_encoder_cmd *enc)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_comm_cmd((void *)cvp_inst, (union msm_v4l2_cmd *)enc);
+	return 0;
 }
 static int msm_cvp_v4l2_s_parm(struct file *file, void *fh,
 			struct v4l2_streamparm *a)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_comm_s_parm(cvp_inst, a);
+	return 0;
 }
 static int msm_cvp_v4l2_g_parm(struct file *file, void *fh,
 		struct v4l2_streamparm *a)
@@ -224,9 +204,7 @@
 static int msm_cvp_v4l2_g_crop(struct file *file, void *fh,
 			struct v4l2_crop *a)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_g_crop(cvp_inst, a);
+	return -EINVAL;
 }
 
 static int msm_cvp_v4l2_enum_framesizes(struct file *file, void *fh,
@@ -240,9 +218,7 @@
 static int msm_cvp_v4l2_queryctrl(struct file *file, void *fh,
 	struct v4l2_queryctrl *ctrl)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_query_ctrl((void *)cvp_inst, ctrl);
+	return -EINVAL;
 }
 
 static long msm_cvp_v4l2_default(struct file *file, void *fh,
@@ -282,9 +258,6 @@
 	.vidioc_default = msm_cvp_v4l2_default,
 };
 
-static const struct v4l2_ioctl_ops msm_v4l2_enc_ioctl_ops = {
-};
-
 static unsigned int msm_cvp_v4l2_poll(struct file *filp,
 	struct poll_table_struct *pt)
 {
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_private.c b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
index 530f18e..a578d8f55 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_private.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
@@ -4,12 +4,138 @@
  */
 
 #include "msm_v4l2_private.h"
+#include "cvp_hfi_api.h"
 
-static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
+static int _get_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
+		struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+	struct cvp_kmd_hfi_packet *u;
+
+	u = &up->data.hfi_pkt;
+
+	if (get_user(pkt_hdr->size, &u->pkt_data[0]))
+		return -EFAULT;
+
+	if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+		return -EFAULT;
+
+	if (get_pkt_index(pkt_hdr) < 0) {
+		dprintk(CVP_DBG, "user mode provides incorrect hfi\n");
+		goto set_default_pkt_hdr;
+	}
+
+	if (pkt_hdr->size > MAX_HFI_PKT_SIZE*sizeof(unsigned int)) {
+		dprintk(CVP_ERR, "user HFI packet too large %x\n",
+				pkt_hdr->size);
+		return -EINVAL;
+	}
+
+	return 0;
+
+set_default_pkt_hdr:
+	pkt_hdr->size = sizeof(struct hfi_msg_session_hdr);
+	return 0;
+}
+
+static int _get_fence_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
+		struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+	struct cvp_kmd_hfi_fence_packet *u;
+
+	u = &up->data.hfi_fence_pkt;
+
+	if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+		return -EFAULT;
+
+	pkt_hdr->size = (MAX_HFI_FENCE_OFFSET + MAX_HFI_FENCE_SIZE)
+			* sizeof(unsigned int);
+
+	if (pkt_hdr->size > (MAX_HFI_PKT_SIZE*sizeof(unsigned int)))
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_pkt_from_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_pkt;
+	u = &up->data.hfi_pkt;
+	for (i = 0; i < size; i++)
+		if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_fence_pkt_from_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_fence_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_fence_pkt;
+	u = &up->data.hfi_fence_pkt;
+	for (i = 0; i < MAX_HFI_FENCE_OFFSET; i++) {
+		if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+	}
+	for (i = 0; i < MAX_HFI_FENCE_SIZE; i++) {
+		if (get_user(k->fence_data[i], &u->fence_data[i]))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static int _copy_pkt_to_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_pkt;
+	u = &up->data.hfi_pkt;
+	for (i = 0; i < size; i++)
+		if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	return 0;
+}
+
+static int _copy_fence_pkt_to_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_fence_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_fence_pkt;
+	u = &up->data.hfi_fence_pkt;
+	for (i = 0; i < MAX_HFI_FENCE_OFFSET; i++) {
+		if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+	}
+	for (i = 0; i < MAX_HFI_FENCE_SIZE; i++) {
+		if (put_user(k->fence_data[i], &u->fence_data[i]))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static int convert_from_user(struct cvp_kmd_arg *kp, unsigned long arg)
 {
 	int rc = 0;
 	int i;
-	struct msm_cvp_arg __user *up = compat_ptr(arg);
+	struct cvp_kmd_arg __user *up = compat_ptr(arg);
+	struct cvp_hal_session_cmd_pkt pkt_hdr;
 
 	if (!kp || !up) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -20,9 +146,9 @@
 		return -EFAULT;
 
 	switch (kp->type) {
-	case MSM_CVP_GET_SESSION_INFO:
+	case CVP_KMD_GET_SESSION_INFO:
 	{
-		struct msm_cvp_session_info *k, *u;
+		struct cvp_kmd_session_info *k, *u;
 
 		k = &kp->data.session;
 		u = &up->data.session;
@@ -33,9 +159,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_REQUEST_POWER:
+	case CVP_KMD_REQUEST_POWER:
 	{
-		struct msm_cvp_request_power *k, *u;
+		struct cvp_kmd_request_power *k, *u;
 
 		k = &kp->data.req_power;
 		u = &up->data.req_power;
@@ -49,9 +175,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_REGISTER_BUFFER:
+	case CVP_KMD_REGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *k, *u;
+		struct cvp_kmd_buffer *k, *u;
 
 		k = &kp->data.regbuf;
 		u = &up->data.regbuf;
@@ -68,9 +194,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_UNREGISTER_BUFFER:
+	case CVP_KMD_UNREGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *k, *u;
+		struct cvp_kmd_buffer *k, *u;
 
 		k = &kp->data.unregbuf;
 		u = &up->data.unregbuf;
@@ -87,12 +213,10 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_HFI_SEND_CMD:
+	case CVP_KMD_HFI_SEND_CMD:
 	{
-		struct msm_cvp_send_cmd *k, *u;
+		struct cvp_kmd_send_cmd *k, *u;
 
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_SEND_CMD\n",
-				__func__);
 		k = &kp->data.send_cmd;
 		u = &up->data.send_cmd;
 		if (get_user(k->cmd_address_fd, &u->cmd_address_fd) ||
@@ -103,89 +227,42 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_SEND_CMD_PKT:
+	case CVP_KMD_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_HFI_DFS_FRAME_CMD:
+	case CVP_KMD_HFI_DME_CONFIG_CMD:
+	case CVP_KMD_HFI_DME_FRAME_CMD:
+	case CVP_KMD_HFI_PERSIST_CMD:
 	{
-		struct msm_cvp_dfsconfig *k, *u;
-
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n",
-				__func__);
-		k = &kp->data.dfsconfig;
-		u = &up->data.dfsconfig;
-		if (get_user(k->cmd_address, &u->cmd_address) ||
-			get_user(k->cmd_size, &u->cmd_size) ||
-			get_user(k->packet_type, &u->packet_type) ||
-			get_user(k->session_id, &u->session_id) ||
-			get_user(k->srcbuffer_format, &u->srcbuffer_format) ||
-			get_user(
-			k->left_plane_info.stride[HFI_MAX_PLANES - 1],
-			&u->left_plane_info.stride[HFI_MAX_PLANES - 1]) ||
-			get_user(
-			k->left_plane_info.buf_size[HFI_MAX_PLANES - 1],
-			&u->left_plane_info.buf_size[HFI_MAX_PLANES - 1]) ||
-			get_user(
-			k->right_plane_info.stride[HFI_MAX_PLANES - 1],
-			&u->right_plane_info.stride[HFI_MAX_PLANES - 1]) ||
-			get_user(
-			k->right_plane_info.buf_size[HFI_MAX_PLANES - 1],
-			&u->right_plane_info.buf_size[HFI_MAX_PLANES - 1]) ||
-			get_user(k->width, &u->width) ||
-			get_user(k->height, &u->height) ||
-			get_user(k->occlusionmask_enable,
-				&u->occlusionmask_enable) ||
-			get_user(k->occlusioncost, &u->occlusioncost) ||
-			get_user(k->occlusionshift, &u->occlusionshift) ||
-			get_user(k->maxdisparity, &u->maxdisparity) ||
-			get_user(k->disparityoffset, &u->disparityoffset) ||
-			get_user(k->medianfilter_enable,
-				&u->medianfilter_enable) ||
-			get_user(k->occlusionbound, &u->occlusionbound) ||
-			get_user(k->occlusionfilling_enable,
-				&u->occlusionfilling_enable) ||
-			get_user(k->occlusionmaskdump,
-				&u->occlusionmaskdump) ||
-			get_user(k->clientdata.transactionid,
-				&u->clientdata.transactionid) ||
-			get_user(k->clientdata.client_data1,
-				&u->clientdata.client_data1) ||
-			get_user(k->clientdata.client_data2,
-				&u->clientdata.client_data2))
+		if (_get_pkt_hdr_from_user(up, &pkt_hdr)) {
+			dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+				kp->type, pkt_hdr.size, pkt_hdr.packet_type);
 			return -EFAULT;
-		for (i = 0; i < MAX_DFS_HFI_PARAMS; i++)
-			if (get_user(k->reserved[i], &u->reserved[i]))
-				return -EFAULT;
+		}
+
+		dprintk(CVP_DBG, "system call cmd pkt: %d 0x%x\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_pkt_from_user(kp, up, (pkt_hdr.size >> 2));
 		break;
 	}
-	case MSM_CVP_HFI_DFS_FRAME_CMD:
-	case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
 	{
-		struct msm_cvp_dfsframe *k, *u;
-
-		dprintk(CVP_DBG, "%s: Type =%d\n",
-							__func__, kp->type);
-		k = &kp->data.dfsframe;
-		u = &up->data.dfsframe;
-		if (get_user(k->cmd_address, &u->cmd_address) ||
-			get_user(k->cmd_size, &u->cmd_size) ||
-			get_user(k->packet_type, &u->packet_type) ||
-			get_user(k->session_id, &u->session_id) ||
-			get_user(k->left_buffer_index,
-				&u->left_buffer_index) ||
-			get_user(k->right_buffer_index,
-				&u->right_buffer_index) ||
-			get_user(k->disparitymap_buffer_idx,
-				&u->disparitymap_buffer_idx) ||
-			get_user(k->occlusionmask_buffer_idx,
-				&u->occlusionmask_buffer_idx) ||
-			get_user(k->clientdata.transactionid,
-				&u->clientdata.transactionid) ||
-			get_user(k->clientdata.client_data1,
-				&u->clientdata.client_data1) ||
-			get_user(k->clientdata.client_data2,
-				&u->clientdata.client_data2))
+		if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr)) {
+			dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+				kp->type, pkt_hdr.size, pkt_hdr.packet_type);
 			return -EFAULT;
+		}
 
+		dprintk(CVP_DBG, "system call cmd pkt: %d 0x%x\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_fence_pkt_from_user(kp, up, (pkt_hdr.size >> 2));
 		break;
 	}
+	case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_PERSIST_CMD_RESPONSE:
+	case CVP_KMD_RECEIVE_MSG_PKT:
+		break;
 	default:
 		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
 			__func__, kp->type);
@@ -196,11 +273,12 @@
 	return rc;
 }
 
-static int convert_to_user(struct msm_cvp_arg *kp, unsigned long arg)
+static int convert_to_user(struct cvp_kmd_arg *kp, unsigned long arg)
 {
 	int rc = 0;
-	int i;
-	struct msm_cvp_arg __user *up = compat_ptr(arg);
+	int i, size = sizeof(struct hfi_msg_session_hdr) >> 2;
+	struct cvp_kmd_arg __user *up = compat_ptr(arg);
+	struct cvp_hal_session_cmd_pkt pkt_hdr;
 
 	if (!kp || !up) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -211,9 +289,20 @@
 		return -EFAULT;
 
 	switch (kp->type) {
-	case MSM_CVP_GET_SESSION_INFO:
+	case CVP_KMD_RECEIVE_MSG_PKT:
 	{
-		struct msm_cvp_session_info *k, *u;
+		struct cvp_kmd_hfi_packet *k, *u;
+
+		k = &kp->data.hfi_pkt;
+		u = &up->data.hfi_pkt;
+		for (i = 0; i < size; i++)
+			if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+				return -EFAULT;
+		break;
+	}
+	case CVP_KMD_GET_SESSION_INFO:
+	{
+		struct cvp_kmd_session_info *k, *u;
 
 		k = &kp->data.session;
 		u = &up->data.session;
@@ -224,9 +313,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_REQUEST_POWER:
+	case CVP_KMD_REQUEST_POWER:
 	{
-		struct msm_cvp_request_power *k, *u;
+		struct cvp_kmd_request_power *k, *u;
 
 		k = &kp->data.req_power;
 		u = &up->data.req_power;
@@ -240,9 +329,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_REGISTER_BUFFER:
+	case CVP_KMD_REGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *k, *u;
+		struct cvp_kmd_buffer *k, *u;
 
 		k = &kp->data.regbuf;
 		u = &up->data.regbuf;
@@ -259,9 +348,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_UNREGISTER_BUFFER:
+	case CVP_KMD_UNREGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *k, *u;
+		struct cvp_kmd_buffer *k, *u;
 
 		k = &kp->data.unregbuf;
 		u = &up->data.unregbuf;
@@ -278,11 +367,11 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_HFI_SEND_CMD:
+	case CVP_KMD_HFI_SEND_CMD:
 	{
-		struct msm_cvp_send_cmd *k, *u;
+		struct cvp_kmd_send_cmd *k, *u;
 
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_SEND_CMD\n",
+		dprintk(CVP_DBG, "%s: CVP_KMD_HFI_SEND_CMD\n",
 					__func__);
 
 		k = &kp->data.send_cmd;
@@ -295,89 +384,32 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_SEND_CMD_PKT:
+	case CVP_KMD_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_HFI_DFS_FRAME_CMD:
+	case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DME_CONFIG_CMD:
+	case CVP_KMD_HFI_DME_FRAME_CMD:
+	case CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_PERSIST_CMD:
+	case CVP_KMD_HFI_PERSIST_CMD_RESPONSE:
 	{
-		struct msm_cvp_dfsconfig *k, *u;
-
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n",
-					__func__);
-
-		k = &kp->data.dfsconfig;
-		u = &up->data.dfsconfig;
-		if (put_user(k->cmd_address, &u->cmd_address) ||
-			put_user(k->cmd_size, &u->cmd_size) ||
-			put_user(k->packet_type, &u->packet_type) ||
-			put_user(k->session_id, &u->session_id) ||
-			put_user(k->srcbuffer_format, &u->srcbuffer_format) ||
-			put_user(
-			k->left_plane_info.stride[HFI_MAX_PLANES - 1],
-			&u->left_plane_info.stride[HFI_MAX_PLANES - 1]) ||
-			put_user(
-			k->left_plane_info.buf_size[HFI_MAX_PLANES - 1],
-			&u->left_plane_info.buf_size[HFI_MAX_PLANES - 1]) ||
-			put_user(
-			k->right_plane_info.stride[HFI_MAX_PLANES - 1],
-			&u->right_plane_info.stride[HFI_MAX_PLANES - 1]) ||
-			put_user(
-			k->right_plane_info.buf_size[HFI_MAX_PLANES - 1],
-			&u->right_plane_info.buf_size[HFI_MAX_PLANES - 1])
-			|| put_user(k->width, &u->width) ||
-			put_user(k->height, &u->height) ||
-			put_user(k->occlusionmask_enable,
-				&u->occlusionmask_enable) ||
-			put_user(k->occlusioncost, &u->occlusioncost) ||
-			put_user(k->occlusionshift, &u->occlusionshift) ||
-			put_user(k->maxdisparity, &u->maxdisparity) ||
-			put_user(
-				k->disparityoffset, &u->disparityoffset) ||
-			put_user(k->medianfilter_enable,
-				&u->medianfilter_enable) ||
-			put_user(k->occlusionbound, &u->occlusionbound) ||
-			put_user(k->occlusionfilling_enable,
-				&u->occlusionfilling_enable) ||
-			put_user(k->occlusionmaskdump,
-				&u->occlusionmaskdump) ||
-			put_user(k->clientdata.transactionid,
-				&u->clientdata.transactionid) ||
-			put_user(k->clientdata.client_data1,
-				&u->clientdata.client_data1) ||
-			put_user(k->clientdata.client_data2,
-				&u->clientdata.client_data2))
+		if (_get_pkt_hdr_from_user(up, &pkt_hdr))
 			return -EFAULT;
-		for (i = 0; i < MAX_DFS_HFI_PARAMS; i++)
-			if (put_user(k->reserved[i], &u->reserved[i]))
-				return -EFAULT;
+
+		dprintk(CVP_DBG, "Send user cmd pkt: %d %d\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_pkt_to_user(kp, up, (pkt_hdr.size >> 2));
 		break;
 	}
-	case MSM_CVP_HFI_DFS_FRAME_CMD:
-	case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
 	{
-		struct msm_cvp_dfsframe *k, *u;
-
-		dprintk(CVP_DBG, "%s: type = %d\n",
-					__func__, kp->type);
-		k = &kp->data.dfsframe;
-		u = &up->data.dfsframe;
-
-		if (put_user(k->cmd_address, &u->cmd_address) ||
-			put_user(k->cmd_size, &u->cmd_size) ||
-			put_user(k->packet_type, &u->packet_type) ||
-			put_user(k->session_id, &u->session_id) ||
-			put_user(k->left_buffer_index,
-				&u->left_buffer_index) ||
-			put_user(k->right_buffer_index,
-				&u->right_buffer_index) ||
-			put_user(k->disparitymap_buffer_idx,
-				&u->disparitymap_buffer_idx) ||
-			put_user(k->occlusionmask_buffer_idx,
-				&u->occlusionmask_buffer_idx) ||
-			put_user(k->clientdata.transactionid,
-				&u->clientdata.transactionid) ||
-			put_user(k->clientdata.client_data1,
-				&u->clientdata.client_data1) ||
-			put_user(k->clientdata.client_data2,
-				&u->clientdata.client_data2))
+		if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr))
 			return -EFAULT;
+
+		dprintk(CVP_DBG, "Send user cmd pkt: %d %d\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_fence_pkt_to_user(kp, up, (pkt_hdr.size >> 2));
 		break;
 	}
 	default:
@@ -395,7 +427,7 @@
 {
 	int rc;
 	struct msm_cvp_inst *inst;
-	struct msm_cvp_arg karg;
+	struct cvp_kmd_arg karg;
 
 	if (!filp || !filp->private_data) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -404,15 +436,18 @@
 
 	inst = container_of(filp->private_data, struct msm_cvp_inst,
 			event_handler);
-	memset(&karg, 0, sizeof(struct msm_cvp_arg));
+	memset(&karg, 0, sizeof(struct cvp_kmd_arg));
 
 	/*
 	 * the arg points to user space memory and needs
 	 * to be converted to kernel space before using it.
 	 * Check do_video_ioctl() for more details.
 	 */
-	if (convert_from_user(&karg, arg))
+	if (convert_from_user(&karg, arg)) {
+		dprintk(CVP_ERR, "%s: failed to get from user cmd %x\n",
+			__func__, karg.type);
 		return -EFAULT;
+	}
 
 	rc = msm_cvp_private((void *)inst, cmd, &karg);
 	if (rc) {
@@ -421,8 +456,11 @@
 		return -EINVAL;
 	}
 
-	if (convert_to_user(&karg, arg))
+	if (convert_to_user(&karg, arg)) {
+		dprintk(CVP_ERR, "%s: failed to copy to user cmd %x\n",
+			__func__, karg.type);
 		return -EFAULT;
+	}
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index ac122e1..672b207 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -55,6 +55,15 @@
 	NPU_PWRLEVEL_OFF = 0xFFFFFFFF,
 };
 
+#define NPU_ERR(fmt, args...)                            \
+	pr_err("NPU_ERR: %s: %d " fmt "\n", __func__,  __LINE__, ##args)
+#define NPU_WARN(fmt, args...)                           \
+	pr_warn("NPU_WARN: %s: %d " fmt "\n", __func__,  __LINE__, ##args)
+#define NPU_INFO(fmt, args...)                           \
+	pr_info("NPU_INFO: %s: %d " fmt "\n", __func__,  __LINE__, ##args)
+#define NPU_DBG(fmt, args...)                           \
+	pr_debug("NPU_DBG: %s: %d " fmt "\n", __func__,  __LINE__, ##args)
+
 /* -------------------------------------------------------------------------
  * Data Structures
  * -------------------------------------------------------------------------
@@ -159,7 +168,6 @@
 	uint32_t num_pwrlevels;
 
 	struct device *devbw;
-	uint32_t bwmon_enabled;
 	uint32_t uc_pwrlevel;
 	uint32_t cdsprm_pwrlevel;
 	uint32_t fmax_pwrlevel;
@@ -206,7 +214,6 @@
 	struct npu_io_data tcm_io;
 	struct npu_io_data qdsp_io;
 	struct npu_io_data apss_shared_io;
-	struct npu_io_data bwmon_io;
 	struct npu_io_data qfprom_io;
 
 	uint32_t core_clk_num;
diff --git a/drivers/media/platform/msm/npu/npu_dbg.c b/drivers/media/platform/msm/npu/npu_dbg.c
index f1486a2..b0dfc85 100644
--- a/drivers/media/platform/msm/npu/npu_dbg.c
+++ b/drivers/media/platform/msm/npu/npu_dbg.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -24,9 +22,9 @@
 	uint32_t reg_val;
 
 	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START);
-	pr_info("fw jobs execute started count = %d\n", reg_val);
+	NPU_INFO("fw jobs execute started count = %d\n", reg_val);
 	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END);
-	pr_info("fw jobs execute finished count = %d\n", reg_val);
+	NPU_INFO("fw jobs execute finished count = %d\n", reg_val);
 	reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
-	pr_info("fw jobs aco parser debug = %d\n", reg_val);
+	NPU_INFO("fw jobs aco parser debug = %d\n", reg_val);
 }
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index df83e2a..987e182 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -120,7 +118,7 @@
 	buf[count] = 0;	/* end of string */
 
 	cnt = sscanf(buf, "%zx %x", &off, &data);
-	pr_debug("%s %s 0x%zx, 0x%08x\n", __func__, buf, off, data);
+	NPU_DBG("%s 0x%zx, 0x%08x\n", buf, off, data);
 
 	return count;
 	if (cnt < 2)
@@ -133,7 +131,7 @@
 
 	npu_disable_core_power(npu_dev);
 
-	pr_debug("write: addr=%zx data=%x\n", off, data);
+	NPU_DBG("write: addr=%zx data=%x\n", off, data);
 
 	return count;
 }
@@ -193,9 +191,9 @@
 		return 0; /* done reading */
 
 	len = min(count, debugfs->buf_len - (size_t) *ppos);
-	pr_debug("read %zi %zi\n", count, debugfs->buf_len - (size_t) *ppos);
+	NPU_DBG("read %zi %zi\n", count, debugfs->buf_len - (size_t) *ppos);
 	if (copy_to_user(user_buf, debugfs->buf + *ppos, len)) {
-		pr_err("failed to copy to user\n");
+		NPU_ERR("failed to copy to user\n");
 		return -EFAULT;
 	}
 
@@ -216,7 +214,7 @@
 	struct npu_device *npu_dev = file->private_data;
 	struct npu_debugfs_ctx *debugfs;
 
-	pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+	NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
 	npu_dev = g_npu_dev;
 	debugfs = &npu_dev->debugfs_ctx;
 
@@ -231,7 +229,7 @@
 	cnt = sscanf(buf, "%zx %x", &off, &reg_cnt);
 	if (cnt == 1)
 		reg_cnt = DEFAULT_REG_DUMP_NUM;
-	pr_debug("reg off = %zx, %d cnt=%d\n", off, reg_cnt, cnt);
+	NPU_DBG("reg off = %zx, %d cnt=%d\n", off, reg_cnt, cnt);
 	if (cnt >= 1) {
 		debugfs->reg_off = off;
 		debugfs->reg_cnt = reg_cnt;
@@ -248,7 +246,7 @@
 	struct npu_device *npu_dev = file->private_data;
 	struct npu_debugfs_ctx *debugfs;
 
-	pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+	NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
 	npu_dev = g_npu_dev;
 	debugfs = &npu_dev->debugfs_ctx;
 
@@ -259,7 +257,7 @@
 		debugfs->reg_off, debugfs->reg_cnt);
 
 	if (copy_to_user(user_buf, buf, len)) {
-		pr_err("failed to copy to user\n");
+		NPU_ERR("failed to copy to user\n");
 		return -EFAULT;
 	}
 
@@ -278,7 +276,7 @@
 	struct npu_device *npu_dev = file->private_data;
 	struct npu_debugfs_ctx *debugfs;
 
-	pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+	NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
 	npu_dev = g_npu_dev;
 	debugfs = &npu_dev->debugfs_ctx;
 
@@ -298,7 +296,7 @@
 
 			if (copy_to_user(dst_addr, src_addr,
 				remaining_to_end)) {
-				pr_err("%s failed to copy to user\n", __func__);
+				NPU_ERR("failed to copy to user\n");
 				mutex_unlock(&debugfs->log_lock);
 				return -EFAULT;
 			}
@@ -307,7 +305,7 @@
 			if (copy_to_user(dst_addr, src_addr,
 				debugfs->log_num_bytes_buffered -
 				remaining_to_end)) {
-				pr_err("%s failed to copy to user\n", __func__);
+				NPU_ERR("failed to copy to user\n");
 				mutex_unlock(&debugfs->log_lock);
 				return -EFAULT;
 			}
@@ -318,7 +316,7 @@
 			if (copy_to_user(user_buf, (debugfs->log_buf +
 				debugfs->log_read_index),
 				debugfs->log_num_bytes_buffered)) {
-				pr_err("%s failed to copy to user\n", __func__);
+				NPU_ERR("failed to copy to user\n");
 				mutex_unlock(&debugfs->log_lock);
 				return -EFAULT;
 			}
@@ -350,7 +348,7 @@
 	int32_t rc = 0;
 	uint32_t val;
 
-	pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+	NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
 	npu_dev = g_npu_dev;
 	debugfs = &npu_dev->debugfs_ctx;
 
@@ -366,14 +364,14 @@
 		buf[count-1] = 0;/* remove line feed */
 
 	if (strcmp(buf, "on") == 0) {
-		pr_info("triggering fw_init\n");
+		NPU_INFO("triggering fw_init\n");
 		if (fw_init(npu_dev) != 0)
-			pr_info("error in fw_init\n");
+			NPU_INFO("error in fw_init\n");
 	} else if (strcmp(buf, "off") == 0) {
-		pr_info("triggering fw_deinit\n");
+		NPU_INFO("triggering fw_deinit\n");
 		fw_deinit(npu_dev, false, true);
 	} else if (strcmp(buf, "ssr") == 0) {
-		pr_info("trigger error irq\n");
+		NPU_INFO("trigger error irq\n");
 		if (npu_enable_core_power(npu_dev))
 			return -EPERM;
 
@@ -381,20 +379,20 @@
 		REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(0), 2);
 		npu_disable_core_power(npu_dev);
 	} else if (strcmp(buf, "ssr_wdt") == 0) {
-		pr_info("trigger wdt irq\n");
+		NPU_INFO("trigger wdt irq\n");
 		npu_disable_post_pil_clocks(npu_dev);
 	} else if (strcmp(buf, "loopback") == 0) {
-		pr_debug("loopback test\n");
+		NPU_DBG("loopback test\n");
 		rc = npu_host_loopback_test(npu_dev);
-		pr_debug("loopback test end: %d\n", rc);
+		NPU_DBG("loopback test end: %d\n", rc);
 	} else {
 		rc = kstrtou32(buf, 10, &val);
 		if (rc) {
-			pr_err("Invalid input for power level settings\n");
+			NPU_ERR("Invalid input for power level settings\n");
 		} else {
 			val = min(val, npu_dev->pwrctrl.max_pwrlevel);
 			npu_dev->pwrctrl.active_pwrlevel = val;
-			pr_info("setting power state to %d\n", val);
+			NPU_INFO("setting power state to %d\n", val);
 		}
 	}
 
@@ -414,62 +412,62 @@
 
 	debugfs->root = debugfs_create_dir("npu", NULL);
 	if (IS_ERR_OR_NULL(debugfs->root)) {
-		pr_err("debugfs_create_dir for npu failed, error %ld\n",
+		NPU_ERR("debugfs_create_dir for npu failed, error %ld\n",
 			PTR_ERR(debugfs->root));
 		return -ENODEV;
 	}
 
 	if (!debugfs_create_file("reg", 0644, debugfs->root,
 		npu_dev, &npu_reg_fops)) {
-		pr_err("debugfs_create_file reg fail\n");
+		NPU_ERR("debugfs_create_file reg fail\n");
 		goto err;
 	}
 
 	if (!debugfs_create_file("off", 0644, debugfs->root,
 		npu_dev, &npu_off_fops)) {
-		pr_err("debugfs_create_file off fail\n");
+		NPU_ERR("debugfs_create_file off fail\n");
 		goto err;
 	}
 
 	if (!debugfs_create_file("log", 0644, debugfs->root,
 		npu_dev, &npu_log_fops)) {
-		pr_err("debugfs_create_file log fail\n");
+		NPU_ERR("debugfs_create_file log fail\n");
 		goto err;
 	}
 
 	if (!debugfs_create_file("ctrl", 0644, debugfs->root,
 		npu_dev, &npu_ctrl_fops)) {
-		pr_err("debugfs_create_file ctrl fail\n");
+		NPU_ERR("debugfs_create_file ctrl fail\n");
 		goto err;
 	}
 
 	if (!debugfs_create_bool("sys_cache_disable", 0644,
 		debugfs->root, &(host_ctx->sys_cache_disable))) {
-		pr_err("debugfs_creat_bool fail for sys cache\n");
+		NPU_ERR("debugfs_creat_bool fail for sys cache\n");
 		goto err;
 	}
 
 	if (!debugfs_create_u32("fw_dbg_mode", 0644,
 		debugfs->root, &(host_ctx->fw_dbg_mode))) {
-		pr_err("debugfs_create_u32 fail for fw_dbg_mode\n");
+		NPU_ERR("debugfs_create_u32 fail for fw_dbg_mode\n");
 		goto err;
 	}
 
 	if (!debugfs_create_u32("fw_state", 0444,
 		debugfs->root, &(host_ctx->fw_state))) {
-		pr_err("debugfs_create_u32 fail for fw_state\n");
+		NPU_ERR("debugfs_create_u32 fail for fw_state\n");
 		goto err;
 	}
 
 	if (!debugfs_create_u32("pwr_level", 0444,
 		debugfs->root, &(pwr->active_pwrlevel))) {
-		pr_err("debugfs_create_u32 fail for pwr_level\n");
+		NPU_ERR("debugfs_create_u32 fail for pwr_level\n");
 		goto err;
 	}
 
 	if (!debugfs_create_u32("exec_flags", 0644,
 		debugfs->root, &(host_ctx->exec_flags_override))) {
-		pr_err("debugfs_create_u32 fail for exec_flags\n");
+		NPU_ERR("debugfs_create_u32 fail for exec_flags\n");
 		goto err;
 	}
 
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index 79cd797..196b51a 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -106,23 +104,10 @@
  * -------------------------------------------------------------------------
  */
 static const char * const npu_post_clocks[] = {
-	"npu_cpc_clk",
-	"npu_cpc_timer_clk"
 };
 
 static const char * const npu_exclude_rate_clocks[] = {
-	"qdss_clk",
-	"at_clk",
-	"trig_clk",
-	"sleep_clk",
 	"xo_clk",
-	"conf_noc_ahb_clk",
-	"comp_noc_axi_clk",
-	"npu_core_cti_clk",
-	"npu_core_apb_clk",
-	"npu_core_atb_clk",
-	"npu_cpc_timer_clk",
-	"qtimer_core_clk",
 	"bwmon_clk",
 	"bto_core_clk",
 	"llm_xo_clk",
@@ -134,7 +119,14 @@
 	"dsp_bwmon_ahb_clk",
 	"cal_hm0_perf_cnt_clk",
 	"cal_hm1_perf_cnt_clk",
-	"dsp_ahbs_clk"
+	"dsp_ahbs_clk",
+	"axi_clk",
+	"ahb_clk",
+	"dma_clk",
+	"llm_temp_clk",
+	"llm_curr_clk",
+	"atb_clk",
+	"s2p_clk",
 };
 
 static const struct npu_irq npu_irq_info[NPU_MAX_IRQ] = {
@@ -281,13 +273,13 @@
 
 	rc = kstrtou32(buf, 10, &val);
 	if (rc) {
-		pr_err("Invalid input for perf mode setting\n");
+		NPU_ERR("Invalid input for perf mode setting\n");
 		return -EINVAL;
 	}
 
 	val = min(val, npu_dev->pwrctrl.num_pwrlevels);
 	npu_dev->pwrctrl.perf_mode_override = val;
-	pr_info("setting uc_pwrlevel_override to %d\n", val);
+	NPU_INFO("setting uc_pwrlevel_override to %d\n", val);
 	npu_set_power_level(npu_dev, true);
 
 	return count;
@@ -333,7 +325,7 @@
 		pwr->active_pwrlevel = thermalctrl->pwr_level;
 		pwr->uc_pwrlevel = pwr->max_pwrlevel;
 		pwr->cdsprm_pwrlevel = pwr->max_pwrlevel;
-		pr_debug("setting back to power level=%d\n",
+		NPU_DBG("setting back to power level=%d\n",
 			pwr->active_pwrlevel);
 	}
 }
@@ -404,8 +396,8 @@
 	 * settings
 	 */
 	ret_level = min(therm_pwr_level, uc_pwr_level);
-	pr_debug("%s therm=%d active=%d uc=%d set level=%d\n",
-		__func__, therm_pwr_level, active_pwr_level, uc_pwr_level,
+	NPU_DBG("therm=%d active=%d uc=%d set level=%d\n",
+		therm_pwr_level, active_pwr_level, uc_pwr_level,
 		ret_level);
 
 	return ret_level;
@@ -423,7 +415,7 @@
 	pwr_level_to_cdsprm = pwr_level_to_set;
 
 	if (!pwr->pwr_vote_num) {
-		pr_debug("power is not enabled during set request\n");
+		NPU_DBG("power is not enabled during set request\n");
 		pwr->active_pwrlevel = min(pwr_level_to_set,
 			npu_dev->pwrctrl.cdsprm_pwrlevel);
 		return 0;
@@ -434,11 +426,11 @@
 
 	/* if the same as current, dont do anything */
 	if (pwr_level_to_set == pwr->active_pwrlevel) {
-		pr_debug("power level %d doesn't change\n", pwr_level_to_set);
+		NPU_DBG("power level %d doesn't change\n", pwr_level_to_set);
 		return 0;
 	}
 
-	pr_debug("setting power level to [%d]\n", pwr_level_to_set);
+	NPU_DBG("setting power level to [%d]\n", pwr_level_to_set);
 	pwr_level_idx = npu_power_level_to_index(npu_dev, pwr_level_to_set);
 	pwrlevel = &npu_dev->pwrctrl.pwrlevels[pwr_level_idx];
 
@@ -453,13 +445,13 @@
 				continue;
 		}
 
-		pr_debug("requested rate of clock [%s] to [%ld]\n",
+		NPU_DBG("requested rate of clock [%s] to [%ld]\n",
 			npu_dev->core_clks[i].clk_name, pwrlevel->clk_freq[i]);
 
 		ret = clk_set_rate(npu_dev->core_clks[i].clk,
 			pwrlevel->clk_freq[i]);
 		if (ret) {
-			pr_debug("clk_set_rate %s to %ld failed with %d\n",
+			NPU_DBG("clk_set_rate %s to %ld failed with %d\n",
 				npu_dev->core_clks[i].clk_name,
 				pwrlevel->clk_freq[i], ret);
 			break;
@@ -541,11 +533,11 @@
 				continue;
 		}
 
-		pr_debug("enabling clock %s\n", core_clks[i].clk_name);
+		NPU_DBG("enabling clock %s\n", core_clks[i].clk_name);
 
 		rc = clk_prepare_enable(core_clks[i].clk);
 		if (rc) {
-			pr_err("%s enable failed\n",
+			NPU_ERR("%s enable failed\n",
 				core_clks[i].clk_name);
 			break;
 		}
@@ -553,14 +545,14 @@
 		if (npu_is_exclude_rate_clock(core_clks[i].clk_name))
 			continue;
 
-		pr_debug("setting rate of clock %s to %ld\n",
+		NPU_DBG("setting rate of clock %s to %ld\n",
 			core_clks[i].clk_name, pwrlevel->clk_freq[i]);
 
 		rc = clk_set_rate(core_clks[i].clk,
 			pwrlevel->clk_freq[i]);
 		/* not fatal error, keep using previous clk rate */
 		if (rc) {
-			pr_err("clk_set_rate %s to %ld failed\n",
+			NPU_ERR("clk_set_rate %s to %ld failed\n",
 				core_clks[i].clk_name,
 				pwrlevel->clk_freq[i]);
 			rc = 0;
@@ -576,7 +568,7 @@
 				if (npu_is_post_clock(core_clks[i].clk_name))
 					continue;
 			}
-			pr_debug("disabling clock %s\n", core_clks[i].clk_name);
+			NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 			clk_disable_unprepare(core_clks[i].clk);
 		}
 	}
@@ -586,7 +578,7 @@
 
 static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
 {
-	int i = 0;
+	int i, rc = 0;
 	struct npu_clk *core_clks = npu_dev->core_clks;
 
 	for (i = npu_dev->core_clk_num - 1; i >= 0 ; i--) {
@@ -598,7 +590,19 @@
 				continue;
 		}
 
-		pr_debug("disabling clock %s\n", core_clks[i].clk_name);
+		/* set clock rate to 0 before disabling it */
+		if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) {
+			NPU_DBG("setting rate of clock %s to 0\n",
+				core_clks[i].clk_name);
+
+			rc = clk_set_rate(core_clks[i].clk, 0);
+			if (rc) {
+				NPU_ERR("clk_set_rate %s to 0 failed\n",
+					core_clks[i].clk_name);
+			}
+		}
+
+		NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 		clk_disable_unprepare(core_clks[i].clk);
 	}
 }
@@ -613,8 +617,7 @@
 	struct npu_device *npu_dev = cdev->devdata;
 	struct npu_thermalctrl *thermalctrl = &npu_dev->thermalctrl;
 
-	pr_debug("enter %s thermal max state=%lu\n", __func__,
-		thermalctrl->max_state);
+	NPU_DBG("thermal max state=%lu\n", thermalctrl->max_state);
 
 	*state = thermalctrl->max_state;
 
@@ -627,8 +630,7 @@
 	struct npu_device *npu_dev = cdev->devdata;
 	struct npu_thermalctrl *thermal = &npu_dev->thermalctrl;
 
-	pr_debug("enter %s thermal current state=%lu\n", __func__,
-		thermal->current_state);
+	NPU_DBG("thermal current state=%lu\n", thermal->current_state);
 
 	*state = thermal->current_state;
 
@@ -641,7 +643,7 @@
 	struct npu_device *npu_dev = cdev->devdata;
 	struct npu_thermalctrl *thermal = &npu_dev->thermalctrl;
 
-	pr_debug("enter %s request state=%lu\n", __func__, state);
+	NPU_DBG("request state=%lu\n", state);
 	if (state > thermal->max_state)
 		return -EINVAL;
 
@@ -667,11 +669,11 @@
 		for (i = 0; i < npu_dev->regulator_num; i++) {
 			rc = regulator_enable(regulators[i].regulator);
 			if (rc < 0) {
-				pr_err("%s enable failed\n",
+				NPU_ERR("%s enable failed\n",
 					regulators[i].regulator_name);
 				break;
 			}
-			pr_debug("regulator %s enabled\n",
+			NPU_DBG("regulator %s enabled\n",
 				regulators[i].regulator_name);
 		}
 	}
@@ -688,7 +690,7 @@
 	if (host_ctx->power_vote_num > 0) {
 		for (i = 0; i < npu_dev->regulator_num; i++) {
 			regulator_disable(regulators[i].regulator);
-			pr_debug("regulator %s disabled\n",
+			NPU_DBG("regulator %s disabled\n",
 				regulators[i].regulator_name);
 		}
 		host_ctx->power_vote_num--;
@@ -706,7 +708,7 @@
 	for (i = 0; i < NPU_MAX_IRQ; i++) {
 		if (npu_dev->irq[i].irq != 0) {
 			enable_irq(npu_dev->irq[i].irq);
-			pr_debug("enable irq %d\n", npu_dev->irq[i].irq);
+			NPU_DBG("enable irq %d\n", npu_dev->irq[i].irq);
 		}
 	}
 
@@ -720,7 +722,7 @@
 	for (i = 0; i < NPU_MAX_IRQ; i++) {
 		if (npu_dev->irq[i].irq != 0) {
 			disable_irq(npu_dev->irq[i].irq);
-			pr_debug("disable irq %d\n", npu_dev->irq[i].irq);
+			NPU_DBG("disable irq %d\n", npu_dev->irq[i].irq);
 		}
 	}
 }
@@ -737,27 +739,43 @@
 	if (!npu_dev->host_ctx.sys_cache_disable) {
 		npu_dev->sys_cache = llcc_slice_getd(LLCC_NPU);
 		if (IS_ERR_OR_NULL(npu_dev->sys_cache)) {
-			pr_warn("unable to init sys cache\n");
+			NPU_WARN("unable to init sys cache\n");
 			npu_dev->sys_cache = NULL;
 			npu_dev->host_ctx.sys_cache_disable = true;
 			return 0;
 		}
 
 		/* set npu side regs - program SCID */
-		reg_val = NPU_CACHE_ATTR_IDn___POR | SYS_CACHE_SCID;
+		reg_val = REGR(npu_dev, NPU_CACHEMAP0_ATTR_IDn(0));
+		reg_val = (reg_val & ~NPU_CACHEMAP_SCID_MASK) | SYS_CACHE_SCID;
 
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(0), reg_val);
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(1), reg_val);
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(2), reg_val);
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(3), reg_val);
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(4), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(0), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(1), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(2), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(3), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(4), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(0), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(1), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(2), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(3), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(4), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(0), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(1), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(2), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(3), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(4), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(0), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(1), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(2), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val);
 
-		pr_debug("prior to activate sys cache\n");
+		NPU_DBG("prior to activate sys cache\n");
 		rc = llcc_slice_activate(npu_dev->sys_cache);
 		if (rc)
-			pr_err("failed to activate sys cache\n");
+			NPU_ERR("failed to activate sys cache\n");
 		else
-			pr_debug("sys cache activated\n");
+			NPU_DBG("sys cache activated\n");
 	}
 
 	return rc;
@@ -771,10 +789,10 @@
 		if (npu_dev->sys_cache) {
 			rc = llcc_slice_deactivate(npu_dev->sys_cache);
 			if (rc) {
-				pr_err("failed to deactivate sys cache\n");
+				NPU_ERR("failed to deactivate sys cache\n");
 				return;
 			}
-			pr_debug("sys cache deactivated\n");
+			NPU_DBG("sys cache deactivated\n");
 			llcc_slice_putd(npu_dev->sys_cache);
 			npu_dev->sys_cache = NULL;
 		}
@@ -838,21 +856,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	ret = npu_host_get_info(npu_dev, &req);
 
 	if (ret) {
-		pr_err("npu_host_get_info failed\n");
+		NPU_ERR("npu_host_get_info failed\n");
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -867,21 +885,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	ret = npu_host_map_buf(client, &req);
 
 	if (ret) {
-		pr_err("npu_host_map_buf failed\n");
+		NPU_ERR("npu_host_map_buf failed\n");
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -896,21 +914,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	ret = npu_host_unmap_buf(client, &req);
 
 	if (ret) {
-		pr_err("npu_host_unmap_buf failed\n");
+		NPU_ERR("npu_host_unmap_buf failed\n");
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -927,21 +945,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
-	pr_debug("network load with perf request %d\n", req.perf_mode);
+	NPU_DBG("network load with perf request %d\n", req.perf_mode);
 
 	ret = npu_host_load_network(client, &req);
 	if (ret) {
-		pr_err("npu_host_load_network failed %d\n", ret);
+		NPU_ERR("npu_host_load_network failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		ret = -EFAULT;
 		unload_req.network_hdl = req.network_hdl;
 		npu_host_unload_network(client, &unload_req);
@@ -960,12 +978,12 @@
 
 	ret = copy_from_user(&req, argp, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	if (req.patch_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
-		pr_err("Invalid patch info num %d[max:%d]\n",
+		NPU_ERR("Invalid patch info num %d[max:%d]\n",
 			req.patch_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
 		return -EINVAL;
 	}
@@ -980,25 +998,25 @@
 			(void __user *)req.patch_info,
 			req.patch_info_num * sizeof(*patch_info));
 		if (ret) {
-			pr_err("fail to copy patch info\n");
+			NPU_ERR("fail to copy patch info\n");
 			kfree(patch_info);
 			return -EFAULT;
 		}
 	}
 
-	pr_debug("network load with perf request %d\n", req.perf_mode);
+	NPU_DBG("network load with perf request %d\n", req.perf_mode);
 
 	ret = npu_host_load_network_v2(client, &req, patch_info);
 
 	kfree(patch_info);
 	if (ret) {
-		pr_err("npu_host_load_network_v2 failed %d\n", ret);
+		NPU_ERR("npu_host_load_network_v2 failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		ret = -EFAULT;
 		unload_req.network_hdl = req.network_hdl;
 		npu_host_unload_network(client, &unload_req);
@@ -1017,21 +1035,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	ret = npu_host_unload_network(client, &req);
 
 	if (ret) {
-		pr_err("npu_host_unload_network failed %d\n", ret);
+		NPU_ERR("npu_host_unload_network failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -1047,13 +1065,13 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	if ((req.input_layer_num > MSM_NPU_MAX_INPUT_LAYER_NUM) ||
 		(req.output_layer_num > MSM_NPU_MAX_OUTPUT_LAYER_NUM)) {
-		pr_err("Invalid input/out layer num %d[max:%d] %d[max:%d]\n",
+		NPU_ERR("Invalid input/out layer num %d[max:%d] %d[max:%d]\n",
 			req.input_layer_num, MSM_NPU_MAX_INPUT_LAYER_NUM,
 			req.output_layer_num, MSM_NPU_MAX_OUTPUT_LAYER_NUM);
 		return -EINVAL;
@@ -1062,14 +1080,14 @@
 	ret = npu_host_exec_network(client, &req);
 
 	if (ret) {
-		pr_err("npu_host_exec_network failed %d\n", ret);
+		NPU_ERR("npu_host_exec_network failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -1085,18 +1103,18 @@
 
 	ret = copy_from_user(&req, argp, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	if (req.patch_buf_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
-		pr_err("Invalid patch buf info num %d[max:%d]\n",
+		NPU_ERR("Invalid patch buf info num %d[max:%d]\n",
 			req.patch_buf_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
 		return -EINVAL;
 	}
 
 	if (req.stats_buf_size > NPU_MAX_STATS_BUF_SIZE) {
-		pr_err("Invalid stats buffer size %d max %d\n",
+		NPU_ERR("Invalid stats buffer size %d max %d\n",
 			req.stats_buf_size, NPU_MAX_STATS_BUF_SIZE);
 		return -EINVAL;
 	}
@@ -1111,7 +1129,7 @@
 			(void __user *)req.patch_buf_info,
 			req.patch_buf_info_num * sizeof(*patch_buf_info));
 		if (ret) {
-			pr_err("fail to copy patch buf info\n");
+			NPU_ERR("fail to copy patch buf info\n");
 			kfree(patch_buf_info);
 			return -EFAULT;
 		}
@@ -1121,13 +1139,13 @@
 
 	kfree(patch_buf_info);
 	if (ret) {
-		pr_err("npu_host_exec_network_v2 failed %d\n", ret);
+		NPU_ERR("npu_host_exec_network_v2 failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		ret = -EFAULT;
 	}
 
@@ -1144,7 +1162,7 @@
 			(void *)&kevt->reserved[0],
 			kevt->evt.u.exec_v2_done.stats_buf_size);
 		if (ret) {
-			pr_err("fail to copy to user\n");
+			NPU_ERR("fail to copy to user\n");
 			kevt->evt.u.exec_v2_done.stats_buf_size = 0;
 			ret = -EFAULT;
 		}
@@ -1165,7 +1183,7 @@
 
 	mutex_lock(&client->list_lock);
 	if (list_empty(&client->evt_list)) {
-		pr_err("event list is empty\n");
+		NPU_ERR("event list is empty\n");
 		ret = -EINVAL;
 	} else {
 		kevt = list_first_entry(&client->evt_list,
@@ -1175,7 +1193,7 @@
 		ret = copy_to_user(argp, &kevt->evt,
 			sizeof(struct msm_npu_event));
 		if (ret) {
-			pr_err("fail to copy to user\n");
+			NPU_ERR("fail to copy to user\n");
 			ret = -EFAULT;
 		}
 		kfree(kevt);
@@ -1220,7 +1238,7 @@
 		ret = npu_receive_event(client, arg);
 		break;
 	default:
-		pr_err("unexpected IOCTL %x\n", cmd);
+		NPU_ERR("unexpected IOCTL %x\n", cmd);
 	}
 
 	return ret;
@@ -1235,7 +1253,7 @@
 
 	mutex_lock(&client->list_lock);
 	if (!list_empty(&client->evt_list)) {
-		pr_debug("poll cmd done\n");
+		NPU_DBG("poll cmd done\n");
 		rc = POLLIN | POLLRDNORM;
 	}
 	mutex_unlock(&client->list_lock);
@@ -1259,11 +1277,11 @@
 	num_clk = of_property_count_strings(pdev->dev.of_node,
 			"clock-names");
 	if (num_clk <= 0) {
-		pr_err("clocks are not defined\n");
+		NPU_ERR("clocks are not defined\n");
 		rc = -EINVAL;
 		goto clk_err;
 	} else if (num_clk > NUM_MAX_CLK_NUM) {
-		pr_err("number of clocks %d exceeds limit\n", num_clk);
+		NPU_ERR("number of clocks %d exceeds limit\n", num_clk);
 		rc = -EINVAL;
 		goto clk_err;
 	}
@@ -1276,7 +1294,7 @@
 			sizeof(core_clks[i].clk_name));
 		core_clks[i].clk = devm_clk_get(&pdev->dev, clock_name);
 		if (IS_ERR(core_clks[i].clk)) {
-			pr_err("unable to get clk: %s\n", clock_name);
+			NPU_ERR("unable to get clk: %s\n", clock_name);
 			rc = -EINVAL;
 			break;
 		}
@@ -1299,12 +1317,12 @@
 			"qcom,proxy-reg-names");
 	if (num <= 0) {
 		rc = -EINVAL;
-		pr_err("regulator not defined\n");
+		NPU_ERR("regulator not defined\n");
 		goto regulator_err;
 	}
 	if (num > NPU_MAX_REGULATOR_NUM) {
 		rc = -EINVAL;
-		pr_err("regulator number %d is over the limit %d\n", num,
+		NPU_ERR("regulator number %d is over the limit %d\n", num,
 			NPU_MAX_REGULATOR_NUM);
 		num = NPU_MAX_REGULATOR_NUM;
 	}
@@ -1317,7 +1335,7 @@
 				sizeof(regulators[i].regulator_name));
 		regulators[i].regulator = devm_regulator_get(&pdev->dev, name);
 		if (IS_ERR(regulators[i].regulator)) {
-			pr_err("unable to get regulator: %s\n", name);
+			NPU_ERR("unable to get regulator: %s\n", name);
 			rc = -EINVAL;
 			break;
 		}
@@ -1348,17 +1366,17 @@
 		struct npu_pwrlevel *level;
 
 		if (of_property_read_u32(child, "reg", &index)) {
-			pr_err("Can't find reg property\n");
+			NPU_ERR("Can't find reg property\n");
 			return -EINVAL;
 		}
 
 		if (of_property_read_u32(child, "vreg", &pwr_level)) {
-			pr_err("Can't find vreg property\n");
+			NPU_ERR("Can't find vreg property\n");
 			return -EINVAL;
 		}
 
 		if (index >= NPU_MAX_PWRLEVELS) {
-			pr_err("pwrlevel index %d is out of range\n",
+			NPU_ERR("pwrlevel index %d is out of range\n",
 				index);
 			continue;
 		}
@@ -1368,7 +1386,7 @@
 
 		if (of_property_read_u32_array(child, "clk-freq",
 			clk_array_values, npu_dev->core_clk_num)) {
-			pr_err("pwrlevel index %d read clk-freq failed %d\n",
+			NPU_ERR("pwrlevel index %d read clk-freq failed %d\n",
 				index, npu_dev->core_clk_num);
 			return -EINVAL;
 		}
@@ -1387,7 +1405,7 @@
 
 			clk_rate = clk_round_rate(npu_dev->core_clks[i].clk,
 				clk_array_values[i]);
-			pr_debug("clk %s rate [%u]:[%u]\n",
+			NPU_DBG("clk %s rate [%u]:[%u]\n",
 				npu_dev->core_clks[i].clk_name,
 				clk_array_values[i], clk_rate);
 			level->clk_freq[i] = clk_rate;
@@ -1399,7 +1417,7 @@
 		fmax = (npu_qfprom_reg_read(npu_dev,
 			QFPROM_FMAX_REG_OFFSET) & QFPROM_FMAX_BITS_MASK) >>
 			QFPROM_FMAX_BITS_SHIFT;
-		pr_debug("fmax %x\n", fmax);
+		NPU_DBG("fmax %x\n", fmax);
 
 		switch (fmax) {
 		case 1:
@@ -1419,7 +1437,7 @@
 	}
 
 	of_property_read_u32(node, "initial-pwrlevel", &init_level_index);
-	pr_debug("initial-pwrlevel %d\n", init_level_index);
+	NPU_DBG("initial-pwrlevel %d\n", init_level_index);
 
 	if (init_level_index >= pwr->num_pwrlevels)
 		init_level_index = pwr->num_pwrlevels - 1;
@@ -1428,10 +1446,10 @@
 		init_level_index);
 	if (init_power_level > pwr->max_pwrlevel) {
 		init_power_level = pwr->max_pwrlevel;
-		pr_debug("Adjust init power level to %d\n", init_power_level);
+		NPU_DBG("Adjust init power level to %d\n", init_power_level);
 	}
 
-	pr_debug("init power level %d max %d min %d\n", init_power_level,
+	NPU_DBG("init power level %d max %d min %d\n", init_power_level,
 		pwr->max_pwrlevel, pwr->min_pwrlevel);
 	pwr->active_pwrlevel = pwr->default_pwrlevel = init_power_level;
 	pwr->uc_pwrlevel = pwr->max_pwrlevel;
@@ -1446,14 +1464,12 @@
 	struct platform_device *pdev = npu_dev->pdev;
 	struct device_node *node;
 	int ret = 0;
-	struct platform_device *p2dev;
-	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 
 	/* Power levels */
 	node = of_find_node_by_name(pdev->dev.of_node, "qcom,npu-pwrlevels");
 
 	if (!node) {
-		pr_err("unable to find 'qcom,npu-pwrlevels'\n");
+		NPU_ERR("unable to find 'qcom,npu-pwrlevels'\n");
 		return -EINVAL;
 	}
 
@@ -1461,26 +1477,6 @@
 	if (ret)
 		return ret;
 
-	/* Parse Bandwidth */
-	node = of_parse_phandle(pdev->dev.of_node,
-				"qcom,npubw-dev", 0);
-
-	if (node) {
-		/* Set to 1 initially - we assume bwmon is on */
-		pwr->bwmon_enabled = 1;
-		p2dev = of_find_device_by_node(node);
-		if (p2dev) {
-			pwr->devbw = &p2dev->dev;
-		} else {
-			pr_err("parser power level failed\n");
-			ret = -EINVAL;
-			return ret;
-		}
-	} else {
-		pr_warn("bwdev is not defined in dts\n");
-		pwr->devbw = NULL;
-	}
-
 	return ret;
 }
 
@@ -1505,13 +1501,13 @@
 		npu_dev->irq[i].irq = platform_get_irq_byname(
 			npu_dev->pdev, npu_dev->irq[i].name);
 		if (npu_dev->irq[i].irq < 0) {
-			pr_err("get_irq for %s failed\n\n",
+			NPU_ERR("get_irq for %s failed\n\n",
 				npu_dev->irq[i].name);
 			ret = -EINVAL;
 			break;
 		}
 
-		pr_debug("irq %s: %d\n", npu_dev->irq[i].name,
+		NPU_DBG("irq %s: %d\n", npu_dev->irq[i].name,
 			npu_dev->irq[i].irq);
 		irq_set_status_flags(npu_dev->irq[i].irq,
 						IRQ_NOAUTOEN);
@@ -1520,7 +1516,7 @@
 				irq_type, npu_dev->irq[i].name,
 				npu_dev);
 		if (ret) {
-			pr_err("devm_request_irq(%s:%d) failed\n",
+			NPU_ERR("devm_request_irq(%s:%d) failed\n",
 				npu_dev->irq[i].name,
 				npu_dev->irq[i].irq);
 			break;
@@ -1543,7 +1539,7 @@
 
 		mbox_aop->chan = mbox_request_channel(&mbox_aop->client, 0);
 		if (IS_ERR(mbox_aop->chan)) {
-			pr_warn("aop mailbox is not available\n");
+			NPU_WARN("aop mailbox is not available\n");
 			mbox_aop->chan = NULL;
 		}
 	}
@@ -1581,7 +1577,7 @@
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "core");
 	if (!res) {
-		pr_err("unable to get core resource\n");
+		NPU_ERR("unable to get core resource\n");
 		rc = -ENODEV;
 		goto error_get_dev_num;
 	}
@@ -1590,17 +1586,17 @@
 	npu_dev->core_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->core_io.size);
 	if (unlikely(!npu_dev->core_io.base)) {
-		pr_err("unable to map core\n");
+		NPU_ERR("unable to map core\n");
 		rc = -ENOMEM;
 		goto error_get_dev_num;
 	}
-	pr_debug("core phy address=0x%llx virt=%pK\n",
+	NPU_DBG("core phy address=0x%llx virt=%pK\n",
 		res->start, npu_dev->core_io.base);
 
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "tcm");
 	if (!res) {
-		pr_err("unable to get tcm resource\n");
+		NPU_ERR("unable to get tcm resource\n");
 		rc = -ENODEV;
 		goto error_get_dev_num;
 	}
@@ -1609,17 +1605,17 @@
 	npu_dev->tcm_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->tcm_io.size);
 	if (unlikely(!npu_dev->tcm_io.base)) {
-		pr_err("unable to map tcm\n");
+		NPU_ERR("unable to map tcm\n");
 		rc = -ENOMEM;
 		goto error_get_dev_num;
 	}
-	pr_debug("tcm phy address=0x%llx virt=%pK\n",
+	NPU_DBG("tcm phy address=0x%llx virt=%pK\n",
 		res->start, npu_dev->tcm_io.base);
 
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "qdsp");
 	if (!res) {
-		pr_err("unable to get qdsp resource\n");
+		NPU_ERR("unable to get qdsp resource\n");
 		rc = -ENODEV;
 		goto error_get_dev_num;
 	}
@@ -1628,17 +1624,17 @@
 	npu_dev->qdsp_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->qdsp_io.size);
 	if (unlikely(!npu_dev->qdsp_io.base)) {
-		pr_err("unable to map qdsp\n");
+		NPU_ERR("unable to map qdsp\n");
 		rc = -ENOMEM;
 		goto error_get_dev_num;
 	}
-	pr_debug("qdsp phy address=0x%x virt=%pK\n",
+	NPU_DBG("qdsp phy address=0x%llx virt=%pK\n",
 		res->start, npu_dev->qdsp_io.base);
 
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "apss_shared");
 	if (!res) {
-		pr_err("unable to get apss_shared resource\n");
+		NPU_ERR("unable to get apss_shared resource\n");
 		rc = -ENODEV;
 		goto error_get_dev_num;
 	}
@@ -1647,46 +1643,28 @@
 	npu_dev->apss_shared_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->apss_shared_io.size);
 	if (unlikely(!npu_dev->apss_shared_io.base)) {
-		pr_err("unable to map apss_shared\n");
+		NPU_ERR("unable to map apss_shared\n");
 		rc = -ENOMEM;
 		goto error_get_dev_num;
 	}
-	pr_debug("apss_shared phy address=0x%x virt=%pK\n",
+	NPU_DBG("apss_shared phy address=0x%llx virt=%pK\n",
 		res->start, npu_dev->apss_shared_io.base);
 
 	res = platform_get_resource_byname(pdev,
-		IORESOURCE_MEM, "bwmon");
-	if (!res) {
-		pr_info("unable to get bwmon resource\n");
-	} else {
-		npu_dev->bwmon_io.size = resource_size(res);
-		npu_dev->bwmon_io.phy_addr = res->start;
-		npu_dev->bwmon_io.base = devm_ioremap(&pdev->dev, res->start,
-						npu_dev->bwmon_io.size);
-		if (unlikely(!npu_dev->bwmon_io.base)) {
-			pr_err("unable to map bwmon\n");
-			rc = -ENOMEM;
-			goto error_get_dev_num;
-		}
-		pr_debug("bwmon phy address=0x%llx virt=%pK\n",
-			res->start, npu_dev->bwmon_io.base);
-	}
-
-	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "qfprom_physical");
 	if (!res) {
-		pr_info("unable to get qfprom_physical resource\n");
+		NPU_INFO("unable to get qfprom_physical resource\n");
 	} else {
 		npu_dev->qfprom_io.size = resource_size(res);
 		npu_dev->qfprom_io.phy_addr = res->start;
 		npu_dev->qfprom_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->qfprom_io.size);
 		if (unlikely(!npu_dev->qfprom_io.base)) {
-			pr_err("unable to map qfprom_physical\n");
+			NPU_ERR("unable to map qfprom_physical\n");
 			rc = -ENOMEM;
 			goto error_get_dev_num;
 		}
-		pr_debug("qfprom_physical phy address=0x%llx virt=%pK\n",
+		NPU_DBG("qfprom_physical phy address=0x%llx virt=%pK\n",
 			res->start, npu_dev->qfprom_io.base);
 	}
 
@@ -1717,14 +1695,14 @@
 	/* character device might be optional */
 	rc = alloc_chrdev_region(&npu_dev->dev_num, 0, 1, DRIVER_NAME);
 	if (rc < 0) {
-		pr_err("alloc_chrdev_region failed: %d\n", rc);
+		NPU_ERR("alloc_chrdev_region failed: %d\n", rc);
 		goto error_get_dev_num;
 	}
 
 	npu_dev->class = class_create(THIS_MODULE, CLASS_NAME);
 	if (IS_ERR(npu_dev->class)) {
 		rc = PTR_ERR(npu_dev->class);
-		pr_err("class_create failed: %d\n", rc);
+		NPU_ERR("class_create failed: %d\n", rc);
 		goto error_class_create;
 	}
 
@@ -1732,7 +1710,7 @@
 		npu_dev->dev_num, NULL, DRIVER_NAME);
 	if (IS_ERR(npu_dev->device)) {
 		rc = PTR_ERR(npu_dev->device);
-		pr_err("device_create failed: %d\n", rc);
+		NPU_ERR("device_create failed: %d\n", rc);
 		goto error_class_device_create;
 	}
 
@@ -1740,15 +1718,15 @@
 	rc = cdev_add(&npu_dev->cdev,
 			MKDEV(MAJOR(npu_dev->dev_num), 0), 1);
 	if (rc < 0) {
-		pr_err("cdev_add failed %d\n", rc);
+		NPU_ERR("cdev_add failed %d\n", rc);
 		goto error_cdev_add;
 	}
 	dev_set_drvdata(npu_dev->device, npu_dev);
-	pr_debug("drvdata %pK %pK\n", dev_get_drvdata(&pdev->dev),
+	NPU_DBG("drvdata %pK %pK\n", dev_get_drvdata(&pdev->dev),
 		dev_get_drvdata(npu_dev->device));
 	rc = sysfs_create_group(&npu_dev->device->kobj, &npu_fs_attr_group);
 	if (rc) {
-		pr_err("unable to register npu sysfs nodes\n");
+		NPU_ERR("unable to register npu sysfs nodes\n");
 		goto error_res_init;
 	}
 
@@ -1774,7 +1752,7 @@
 
 	rc = npu_host_init(npu_dev);
 	if (rc) {
-		pr_err("unable to init host\n");
+		NPU_ERR("unable to init host\n");
 		goto error_driver_init;
 	}
 
@@ -1846,7 +1824,7 @@
 
 	rc = platform_driver_register(&npu_driver);
 	if (rc)
-		pr_err("register failed %d\n", rc);
+		NPU_ERR("register failed %d\n", rc);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index b26d221..85e8187 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -169,9 +167,9 @@
 	}
 
 	if (status == 0)
-		pr_debug("Cmd Msg put on Command Queue - SUCCESSS\n");
+		NPU_DBG("Cmd Msg put on Command Queue - SUCCESSS\n");
 	else
-		pr_err("Cmd Msg put on Command Queue - FAILURE\n");
+		NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n");
 
 	return status;
 }
@@ -232,7 +230,7 @@
 	MEMR(npu_dev, (void *)((size_t)read_ptr), packet, 4);
 	packet_size = *((uint32_t *)packet);
 
-	pr_debug("target_que: %d, packet_size: %d\n",
+	NPU_DBG("target_que: %d, packet_size: %d\n",
 			target_que,
 			packet_size);
 
diff --git a/drivers/media/platform/msm/npu/npu_hw.h b/drivers/media/platform/msm/npu/npu_hw.h
index c328a8eb..7a884dc 100644
--- a/drivers/media/platform/msm/npu/npu_hw.h
+++ b/drivers/media/platform/msm/npu/npu_hw.h
@@ -12,8 +12,11 @@
  */
 #define NPU_HW_VERSION (0x00000000)
 #define NPU_MASTERn_IPC_IRQ_OUT(n) (0x00001004+0x1000*(n))
-#define NPU_CACHE_ATTR_IDn___POR 0x00011100
-#define NPU_CACHE_ATTR_IDn(n) (0x00000800+0x4*(n))
+#define NPU_CACHEMAP0_ATTR_IDn(n) (0x00000800+0x4*(n))
+#define NPU_CACHEMAP0_ATTR_METADATA_IDn(n) (0x00000814+0x4*(n))
+#define NPU_CACHEMAP1_ATTR_IDn(n) (0x00000830+0x4*(n))
+#define NPU_CACHEMAP1_ATTR_METADATA_IDn(n) (0x00000844+0x4*(n))
+#define NPU_CACHEMAP_SCID_MASK 0x0000001F
 #define NPU_MASTERn_IPC_IRQ_IN_CTRL(n) (0x00001008+0x1000*(n))
 #define NPU_MASTER0_IPC_IRQ_IN_CTRL__IRQ_SOURCE_SELECT___S 4
 #define NPU_MASTERn_IPC_IRQ_OUT_CTRL(n) (0x00001004+0x1000*(n))
@@ -44,13 +47,6 @@
 #define NPU_GPR14 (0x00000138)
 #define NPU_GPR15 (0x0000013C)
 
-#define BWMON2_SAMPLING_WINDOW (0x000003A8)
-#define BWMON2_BYTE_COUNT_THRESHOLD_HIGH (0x000003AC)
-#define BWMON2_BYTE_COUNT_THRESHOLD_MEDIUM (0x000003B0)
-#define BWMON2_BYTE_COUNT_THRESHOLD_LOW (0x000003B4)
-#define BWMON2_ZONE_ACTIONS (0x000003B8)
-#define BWMON2_ZONE_COUNT_THRESHOLD (0x000003BC)
-
 #define NPU_QDSP6SS_IPC 0x00088000
 #define NPU_QDSP6SS_IPC1 0x00088004
 
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c
index d876c47..f2862ab 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu/npu_hw_access.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -26,8 +24,7 @@
 {
 	uint32_t ret = 0;
 
-	ret = readl_relaxed(npu_dev->core_io.base + off);
-	__iormb();
+	ret = readl(npu_dev->core_io.base + off);
 	return ret;
 }
 
@@ -41,8 +38,7 @@
 {
 	uint32_t ret = 0;
 
-	ret = readl_relaxed(npu_dev->qdsp_io.base + off);
-	__iormb();
+	ret = readl(npu_dev->qdsp_io.base + off);
 	return ret;
 }
 
@@ -56,8 +52,7 @@
 {
 	uint32_t ret = 0;
 
-	ret = readl_relaxed(npu_dev->apss_shared_io.base + off);
-	__iormb();
+	ret = readl(npu_dev->apss_shared_io.base + off);
 	return ret;
 }
 
@@ -68,30 +63,12 @@
 	__iowmb();
 }
 
-uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off)
-{
-	uint32_t ret = 0;
-
-	ret = readl_relaxed(npu_dev->bwmon_io.base + off);
-	__iormb();
-	return ret;
-}
-
-void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off,
-	uint32_t val)
-{
-	writel_relaxed(val, npu_dev->bwmon_io.base + off);
-	__iowmb();
-}
-
 uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off)
 {
 	uint32_t ret = 0;
 
-	if (npu_dev->qfprom_io.base) {
-		ret = readl_relaxed(npu_dev->qfprom_io.base + off);
-		__iormb();
-	}
+	if (npu_dev->qfprom_io.base)
+		ret = readl(npu_dev->qfprom_io.base + off);
 
 	return ret;
 }
@@ -109,7 +86,7 @@
 	uint32_t i = 0;
 	uint32_t num = 0;
 
-	pr_debug("write dst_off %x size %x\n", dst_off, size);
+	NPU_DBG("write dst_off %zx size %x\n", dst_off, size);
 	num = size/4;
 	for (i = 0; i < num; i++) {
 		writel_relaxed(src_ptr32[i], npu_dev->tcm_io.base + dst_off);
@@ -136,7 +113,7 @@
 	uint32_t i = 0;
 	uint32_t num = 0;
 
-	pr_debug("read src_off %x size %x\n", src_off, size);
+	NPU_DBG("read src_off %zx size %x\n", src_off, size);
 
 	num = size/4;
 	for (i = 0; i < num; i++) {
@@ -201,7 +178,7 @@
 
 	if (ret_val) {
 		/* mapped already, treat as invalid request */
-		pr_err("ion buf has been mapped\n");
+		NPU_ERR("ion buf has been mapped\n");
 		ret_val = NULL;
 	} else {
 		ret_val = kzalloc(sizeof(*ret_val), GFP_KERNEL);
@@ -268,7 +245,7 @@
 
 	ion_buf = npu_alloc_npu_ion_buffer(client, buf_hdl, size);
 	if (!ion_buf) {
-		pr_err("%s fail to alloc npu_ion_buffer\n", __func__);
+		NPU_ERR("fail to alloc npu_ion_buffer\n");
 		ret = -ENOMEM;
 		return ret;
 	}
@@ -277,7 +254,7 @@
 
 	ion_buf->dma_buf = dma_buf_get(ion_buf->fd);
 	if (IS_ERR_OR_NULL(ion_buf->dma_buf)) {
-		pr_err("dma_buf_get failed %d\n", ion_buf->fd);
+		NPU_ERR("dma_buf_get failed %d\n", ion_buf->fd);
 		ret = -ENOMEM;
 		ion_buf->dma_buf = NULL;
 		goto map_end;
@@ -296,7 +273,7 @@
 	ion_buf->table = dma_buf_map_attachment(ion_buf->attachment,
 			DMA_BIDIRECTIONAL);
 	if (IS_ERR(ion_buf->table)) {
-		pr_err("npu dma_buf_map_attachment failed\n");
+		NPU_ERR("npu dma_buf_map_attachment failed\n");
 		ret = -ENOMEM;
 		ion_buf->table = NULL;
 		goto map_end;
@@ -307,9 +284,9 @@
 	ion_buf->iova = ion_buf->table->sgl->dma_address;
 	ion_buf->size = ion_buf->dma_buf->size;
 	*addr = ion_buf->iova;
-	pr_debug("mapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
+	NPU_DBG("mapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
 		ion_buf->size);
-	pr_debug("physical address 0x%llx\n", sg_phys(ion_buf->table->sgl));
+	NPU_DBG("physical address 0x%llx\n", sg_phys(ion_buf->table->sgl));
 map_end:
 	if (ret)
 		npu_mem_unmap(client, buf_hdl, 0);
@@ -324,7 +301,7 @@
 		buf_hdl);
 
 	if (!ion_buf)
-		pr_err("%s cant find ion buf\n", __func__);
+		NPU_ERR("cant find ion buf\n");
 	else
 		dma_sync_sg_for_cpu(&(npu_dev->pdev->dev), ion_buf->table->sgl,
 			ion_buf->table->nents, DMA_BIDIRECTIONAL);
@@ -357,12 +334,12 @@
 	/* clear entry and retrieve the corresponding buffer */
 	ion_buf = npu_get_npu_ion_buffer(client, buf_hdl);
 	if (!ion_buf) {
-		pr_err("%s could not find buffer\n", __func__);
+		NPU_ERR("could not find buffer\n");
 		return;
 	}
 
 	if (ion_buf->iova != addr)
-		pr_warn("unmap address %llu doesn't match %llu\n", addr,
+		NPU_WARN("unmap address %llu doesn't match %llu\n", addr,
 			ion_buf->iova);
 
 	if (ion_buf->table)
@@ -374,7 +351,7 @@
 		dma_buf_put(ion_buf->dma_buf);
 	npu_dev->smmu_ctx.attach_cnt--;
 
-	pr_debug("unmapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
+	NPU_DBG("unmapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
 		ion_buf->size);
 	npu_free_npu_ion_buffer(client, buf_hdl);
 }
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.h b/drivers/media/platform/msm/npu/npu_hw_access.h
index d893faa..24da853 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.h
+++ b/drivers/media/platform/msm/npu/npu_hw_access.h
@@ -56,9 +56,6 @@
 uint32_t npu_apss_shared_reg_read(struct npu_device *npu_dev, uint32_t off);
 void npu_apss_shared_reg_write(struct npu_device *npu_dev, uint32_t off,
 	uint32_t val);
-uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off);
-void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off,
-	uint32_t val);
 void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
 	uint32_t size);
 int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index d1b766e..3c716da 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -88,20 +86,12 @@
 		goto enable_sys_cache_fail;
 	}
 
-	/* Boot the NPU subsystem */
-	host_ctx->subsystem_handle = subsystem_get_local("npu");
-	if (IS_ERR(host_ctx->subsystem_handle)) {
-		pr_err("pil load npu fw failed\n");
-		ret = -ENODEV;
-		goto subsystem_get_fail;
-	}
-
 	/* Clear control/status registers */
 	REGW(npu_dev, REG_NPU_FW_CTRL_STATUS, 0x0);
 	REGW(npu_dev, REG_NPU_HOST_CTRL_VALUE, 0x0);
 	REGW(npu_dev, REG_FW_TO_HOST_EVENT, 0x0);
 
-	pr_debug("fw_dbg_mode %x\n", host_ctx->fw_dbg_mode);
+	NPU_DBG("fw_dbg_mode %x\n", host_ctx->fw_dbg_mode);
 	reg_val = 0;
 	if (host_ctx->fw_dbg_mode & FW_DBG_MODE_PAUSE)
 		reg_val |= HOST_CTRL_STATUS_FW_PAUSE_VAL;
@@ -109,33 +99,33 @@
 	if (host_ctx->fw_dbg_mode & FW_DBG_DISABLE_WDOG)
 		reg_val |= HOST_CTRL_STATUS_DISABLE_WDOG_VAL;
 
+	/* Enable clock gating only if the HW access platform allows it */
+	if (npu_hw_clk_gating_enabled())
+		reg_val |= HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_VAL;
+
 	REGW(npu_dev, REG_NPU_HOST_CTRL_STATUS, reg_val);
 	/* Read back to flush all registers for fw to read */
 	REGR(npu_dev, REG_NPU_HOST_CTRL_STATUS);
 
+	/* Initialize the host side IPC before fw boots up */
+	npu_host_ipc_pre_init(npu_dev);
+
+	/* Boot the NPU subsystem */
+	host_ctx->subsystem_handle = subsystem_get_local("npu");
+	if (IS_ERR(host_ctx->subsystem_handle)) {
+		NPU_ERR("pil load npu fw failed\n");
+		ret = -ENODEV;
+		goto subsystem_get_fail;
+	}
+
 	/* Post PIL clocks */
 	if (npu_enable_post_pil_clocks(npu_dev)) {
 		ret = -EPERM;
 		goto enable_post_clk_fail;
 	}
 
-	/*
-	 * Set logging state and clock gating state
-	 * during FW bootup initialization
-	 */
-	reg_val = REGR(npu_dev, REG_NPU_HOST_CTRL_STATUS);
-
-	/* Enable clock gating only if the HW access platform allows it */
-	if (npu_hw_clk_gating_enabled())
-		reg_val |= HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_VAL;
-
-	REGW(npu_dev, REG_NPU_HOST_CTRL_STATUS, reg_val);
-
-	/* Initialize the host side IPC */
-	npu_host_ipc_pre_init(npu_dev);
-
 	/* Keep reading ctrl status until NPU is ready */
-	pr_debug("waiting for status ready from fw\n");
+	NPU_DBG("waiting for status ready from fw\n");
 
 	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
 		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
@@ -157,11 +147,11 @@
 	reinit_completion(&host_ctx->fw_deinit_done);
 
 	mutex_unlock(&host_ctx->lock);
-	pr_debug("firmware init complete\n");
+	NPU_DBG("firmware init complete\n");
 
 	/* Set logging state */
 	if (!npu_hw_log_enabled()) {
-		pr_debug("fw logging disabled\n");
+		NPU_DBG("fw logging disabled\n");
 		turn_off_fw_logging(npu_dev);
 	}
 
@@ -170,11 +160,12 @@
 wait_fw_ready_fail:
 	npu_disable_post_pil_clocks(npu_dev);
 enable_post_clk_fail:
-	subsystem_put_local(host_ctx->subsystem_handle);
 subsystem_get_fail:
-	npu_disable_sys_cache(npu_dev);
 enable_sys_cache_fail:
+	npu_disable_sys_cache(npu_dev);
 	npu_disable_core_power(npu_dev);
+	if (!IS_ERR(host_ctx->subsystem_handle))
+		subsystem_put_local(host_ctx->subsystem_handle);
 enable_pw_fail:
 	host_ctx->fw_state = FW_DISABLED;
 	mutex_unlock(&host_ctx->lock);
@@ -192,10 +183,10 @@
 	if (!ssr && (host_ctx->fw_ref_cnt > 0))
 		host_ctx->fw_ref_cnt--;
 
-	pr_debug("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt);
+	NPU_DBG("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt);
 
 	if (host_ctx->fw_state != FW_ENABLED) {
-		pr_err("fw is not enabled\n");
+		NPU_ERR("fw is not enabled\n");
 		mutex_unlock(&host_ctx->lock);
 		return;
 	}
@@ -218,17 +209,17 @@
 		ret = npu_host_ipc_send_cmd(npu_dev,
 			IPC_QUEUE_CMD_HIGH_PRIORITY, &cmd_shutdown_pkt);
 
-		pr_debug("NPU_IPC_CMD_SHUTDOWN sent status: %d\n", ret);
+		NPU_DBG("NPU_IPC_CMD_SHUTDOWN sent status: %d\n", ret);
 
 		if (ret) {
-			pr_err("npu_host_ipc_send_cmd failed\n");
+			NPU_ERR("npu_host_ipc_send_cmd failed\n");
 		} else {
 			/* Keep reading ctrl status until NPU shuts down */
-			pr_debug("waiting for shutdown status from fw\n");
+			NPU_DBG("waiting for shutdown status from fw\n");
 			if (wait_for_status_ready(npu_dev,
 				REG_NPU_FW_CTRL_STATUS,
 				FW_CTRL_STATUS_SHUTDOWN_DONE_VAL)) {
-				pr_err("wait for fw shutdown timedout\n");
+				NPU_ERR("wait for fw shutdown timedout\n");
 				ret = -ETIMEDOUT;
 			}
 		}
@@ -236,8 +227,6 @@
 
 	npu_disable_post_pil_clocks(npu_dev);
 	npu_disable_sys_cache(npu_dev);
-	subsystem_put_local(host_ctx->subsystem_handle);
-	host_ctx->fw_state = FW_DISABLED;
 
 	/*
 	 * if fw is still alive, notify dsp before power off
@@ -251,6 +240,9 @@
 
 	npu_disable_core_power(npu_dev);
 
+	subsystem_put_local(host_ctx->subsystem_handle);
+	host_ctx->fw_state = FW_DISABLED;
+
 	if (ssr) {
 		/* mark all existing network to error state */
 		for (i = 0; i < MAX_LOADED_NETWORK; i++) {
@@ -262,7 +254,7 @@
 
 	complete(&host_ctx->fw_deinit_done);
 	mutex_unlock(&host_ctx->lock);
-	pr_debug("firmware deinit complete\n");
+	NPU_DBG("firmware deinit complete\n");
 	npu_notify_aop(npu_dev, false);
 }
 
@@ -277,7 +269,6 @@
 	mutex_init(&host_ctx->lock);
 	atomic_set(&host_ctx->ipc_trans_id, 1);
 
-	host_ctx->sys_cache_disable = true;
 	host_ctx->wq = npu_create_wq(host_ctx, "irq_hdl", host_irq_wq,
 		&host_ctx->irq_work);
 	if (!host_ctx->wq)
@@ -305,7 +296,7 @@
 	struct npu_device *npu_dev = (struct npu_device *)ptr;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
-	pr_debug("NPU irq %d\n", irq);
+	NPU_DBG("NPU irq %d\n", irq);
 	INTERRUPT_ACK(npu_dev, irq);
 
 	/* Check that the event thread currently is running */
@@ -331,7 +322,7 @@
 		return 0;
 
 	if (host_ctx->wdg_irq_sts)
-		pr_info("watchdog irq triggered\n");
+		NPU_INFO("watchdog irq triggered\n");
 
 	fw_deinit(npu_dev, true, force);
 	host_ctx->wdg_irq_sts = 0;
@@ -344,14 +335,14 @@
 		if (network->is_valid && network->cmd_pending &&
 			network->fw_error) {
 			if (network->cmd_async) {
-				pr_debug("async cmd, queue ssr event\n");
+				NPU_DBG("async cmd, queue ssr event\n");
 				kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR;
 				kevt.evt.u.ssr.network_hdl =
 					network->network_hdl;
 				if (npu_queue_event(network->client, &kevt))
-					pr_err("queue npu event failed\n");
+					NPU_ERR("queue npu event failed\n");
 			} else {
-				pr_debug("complete network %llx\n",
+				NPU_DBG("complete network %llx\n",
 					network->id);
 				complete(&network->cmd_done);
 			}
@@ -394,10 +385,10 @@
 	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_CMD_HIGH_PRIORITY,
 		&log_packet);
 
-	pr_debug("NPU_IPC_CMD_CONFIG_LOG sent status: %d\n", ret);
+	NPU_DBG("NPU_IPC_CMD_CONFIG_LOG sent status: %d\n", ret);
 
 	if (ret)
-		pr_err("npu_host_ipc_send_cmd failed\n");
+		NPU_ERR("npu_host_ipc_send_cmd failed\n");
 }
 
 static int wait_for_status_ready(struct npu_device *npu_dev,
@@ -416,12 +407,12 @@
 		msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
 		wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
 		if (wait_cnt >= max_wait_ms) {
-			pr_err("timeout wait for status %x[%x] in reg %x\n",
+			NPU_ERR("timeout wait for status %x[%x] in reg %x\n",
 				status_bits, ctrl_sts, status_reg);
 			return -EPERM;
 		}
 	}
-	pr_debug("status %x[reg %x] ready received\n", status_bits, status_reg);
+	NPU_DBG("status %x[reg %x] ready received\n", status_bits, status_reg);
 	return 0;
 }
 
@@ -439,25 +430,25 @@
 	int buf_size, rc = 0;
 
 	if (!npu_dev->mbox_aop.chan) {
-		pr_warn("aop mailbox channel is not available\n");
+		NPU_WARN("aop mailbox channel is not available\n");
 		return 0;
 	}
 
 	buf_size = scnprintf(buf, MAX_LEN, "{class: bcm, res: npu_on, val: %d}",
 		on ? 1 : 0);
 	if (buf_size < 0) {
-		pr_err("prepare qmp notify buf failed\n");
+		NPU_ERR("prepare qmp notify buf failed\n");
 		return -EINVAL;
 	}
 
-	pr_debug("send msg %s to aop\n", buf);
+	NPU_DBG("send msg %s to aop\n", buf);
 	memset(&pkt, 0, sizeof(pkt));
 	pkt.size = (buf_size + 3) & ~0x3;
 	pkt.data = buf;
 
 	rc = mbox_send_message(npu_dev->mbox_aop.chan, &pkt);
 	if (rc < 0)
-		pr_err("qmp message send failed, ret=%d\n", rc);
+		NPU_ERR("qmp message send failed, ret=%d\n", rc);
 
 	return rc;
 }
@@ -498,7 +489,7 @@
 	}
 
 	if (i == MAX_LOADED_NETWORK) {
-		pr_err("No free network\n");
+		NPU_ERR("No free network\n");
 		return NULL;
 	}
 
@@ -534,12 +525,12 @@
 	}
 
 	if ((i == MAX_LOADED_NETWORK) || !network->is_valid) {
-		pr_err("network hdl invalid %d\n", hdl);
+		NPU_ERR("network hdl invalid %d\n", hdl);
 		return NULL;
 	}
 
 	if (client && (client != network->client)) {
-		pr_err("network %lld doesn't belong to this client\n",
+		NPU_ERR("network %lld doesn't belong to this client\n",
 			network->id);
 		return NULL;
 	}
@@ -557,13 +548,13 @@
 
 	if (id < 1 || id > MAX_LOADED_NETWORK ||
 		!ctx->networks[id - 1].is_valid) {
-		pr_err("Invalid network id %d\n", (int32_t)id);
+		NPU_ERR("Invalid network id %d\n", (int32_t)id);
 		return NULL;
 	}
 
 	network = &ctx->networks[id - 1];
 	if (client && (client != network->client)) {
-		pr_err("network %lld doesn't belong to this client\n", id);
+		NPU_ERR("network %lld doesn't belong to this client\n", id);
 		return NULL;
 	}
 
@@ -586,7 +577,7 @@
 			memset(network, 0, sizeof(struct npu_network));
 			ctx->network_num--;
 		} else {
-			pr_warn("network %lld:%d is in use\n", network->id,
+			NPU_WARN("network %lld:%d is in use\n", network->id,
 				atomic_read(&network->ref_cnt));
 		}
 	}
@@ -626,28 +617,28 @@
 		struct ipc_msg_execute_pkt *exe_rsp_pkt =
 			(struct ipc_msg_execute_pkt *)msg;
 
-		pr_debug("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
+		NPU_DBG("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
 			exe_rsp_pkt->header.status);
-		pr_debug("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
-		pr_debug("e2e_IPC_time: %d (in tick count)\n",
+		NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
+		NPU_DBG("e2e_IPC_time: %d (in tick count)\n",
 			exe_rsp_pkt->stats.e2e_ipc_tick_count);
-		pr_debug("aco_load_time: %d (in tick count)\n",
+		NPU_DBG("aco_load_time: %d (in tick count)\n",
 			exe_rsp_pkt->stats.aco_load_tick_count);
-		pr_debug("aco_execute_time: %d (in tick count)\n",
+		NPU_DBG("aco_execute_time: %d (in tick count)\n",
 			exe_rsp_pkt->stats.aco_execution_tick_count);
-		pr_debug("total_num_layers: %d\n",
+		NPU_DBG("total_num_layers: %d\n",
 			exe_rsp_pkt->stats.exe_stats.total_num_layers);
 
 		network = get_network_by_hdl(host_ctx, NULL,
 			exe_rsp_pkt->network_hdl);
 		if (!network) {
-			pr_err("can't find network %x\n",
+			NPU_ERR("can't find network %x\n",
 				exe_rsp_pkt->network_hdl);
 			break;
 		}
 
 		if (network->trans_id != exe_rsp_pkt->header.trans_id) {
-			pr_err("execute_pkt trans_id is not match %d:%d\n",
+			NPU_ERR("execute_pkt trans_id is not match %d:%d\n",
 				network->trans_id,
 				exe_rsp_pkt->header.trans_id);
 			network_put(network);
@@ -660,14 +651,14 @@
 		if (!network->cmd_async) {
 			complete(&network->cmd_done);
 		} else {
-			pr_debug("async cmd, queue event\n");
+			NPU_DBG("async cmd, queue event\n");
 			kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_DONE;
 			kevt.evt.u.exec_done.network_hdl =
 				exe_rsp_pkt->network_hdl;
 			kevt.evt.u.exec_done.exec_result =
 				exe_rsp_pkt->header.status;
 			if (npu_queue_event(network->client, &kevt))
-				pr_err("queue npu event failed\n");
+				NPU_ERR("queue npu event failed\n");
 		}
 		network_put(network);
 
@@ -679,29 +670,29 @@
 			(struct ipc_msg_execute_pkt_v2 *)msg;
 		uint32_t stats_size = 0;
 
-		pr_debug("NPU_IPC_MSG_EXECUTE_V2_DONE status: %d\n",
+		NPU_DBG("NPU_IPC_MSG_EXECUTE_V2_DONE status: %d\n",
 			exe_rsp_pkt->header.status);
-		pr_debug("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
+		NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
 
 		network = get_network_by_hdl(host_ctx, NULL,
 			exe_rsp_pkt->network_hdl);
 		if (!network) {
-			pr_err("can't find network %x\n",
+			NPU_ERR("can't find network %x\n",
 				exe_rsp_pkt->network_hdl);
 			break;
 		}
 
 		if (network->trans_id != exe_rsp_pkt->header.trans_id) {
-			pr_err("execute_pkt_v2 trans_id is not match %d:%d\n",
+			NPU_ERR("execute_pkt_v2 trans_id is not match %d:%d\n",
 				network->trans_id,
 				exe_rsp_pkt->header.trans_id);
 			network_put(network);
 			break;
 		}
 
-		pr_debug("network id : %lld\n", network->id);
+		NPU_DBG("network id : %lld\n", network->id);
 		stats_size = exe_rsp_pkt->header.size - sizeof(*exe_rsp_pkt);
-		pr_debug("stats_size %d:%d\n", exe_rsp_pkt->header.size,
+		NPU_DBG("stats_size %d:%d\n", exe_rsp_pkt->header.size,
 			stats_size);
 		stats_size = stats_size < network->stats_buf_size ?
 			stats_size : network->stats_buf_size;
@@ -714,7 +705,7 @@
 		network->cmd_ret_status = exe_rsp_pkt->header.status;
 
 		if (network->cmd_async) {
-			pr_debug("async cmd, queue event\n");
+			NPU_DBG("async cmd, queue event\n");
 			kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_V2_DONE;
 			kevt.evt.u.exec_v2_done.network_hdl =
 				exe_rsp_pkt->network_hdl;
@@ -724,7 +715,7 @@
 			kevt.reserved[0] = (uint64_t)network->stats_buf;
 			kevt.reserved[1] = (uint64_t)network->stats_buf_u;
 			if (npu_queue_event(network->client, &kevt))
-				pr_err("queue npu event failed\n");
+				NPU_ERR("queue npu event failed\n");
 		} else {
 			complete(&network->cmd_done);
 		}
@@ -737,7 +728,7 @@
 		struct ipc_msg_load_pkt *load_rsp_pkt =
 			(struct ipc_msg_load_pkt *)msg;
 
-		pr_debug("NPU_IPC_MSG_LOAD_DONE status: %d, trans_id: %d\n",
+		NPU_DBG("NPU_IPC_MSG_LOAD_DONE status: %d, trans_id: %d\n",
 			load_rsp_pkt->header.status,
 			load_rsp_pkt->header.trans_id);
 
@@ -745,16 +736,16 @@
 		 * the upper 16 bits in returned network_hdl is
 		 * the network ID
 		 */
-		pr_debug("network_hdl: %x\n", load_rsp_pkt->network_hdl);
+		NPU_DBG("network_hdl: %x\n", load_rsp_pkt->network_hdl);
 		network_id = load_rsp_pkt->network_hdl >> 16;
 		network = get_network_by_id(host_ctx, NULL, network_id);
 		if (!network) {
-			pr_err("can't find network %d\n", network_id);
+			NPU_ERR("can't find network %d\n", network_id);
 			break;
 		}
 
 		if (network->trans_id != load_rsp_pkt->header.trans_id) {
-			pr_err("load_rsp_pkt trans_id is not match %d:%d\n",
+			NPU_ERR("load_rsp_pkt trans_id is not match %d:%d\n",
 				network->trans_id,
 				load_rsp_pkt->header.trans_id);
 			network_put(network);
@@ -774,20 +765,20 @@
 		struct ipc_msg_unload_pkt *unload_rsp_pkt =
 			(struct ipc_msg_unload_pkt *)msg;
 
-		pr_debug("NPU_IPC_MSG_UNLOAD_DONE status: %d, trans_id: %d\n",
+		NPU_DBG("NPU_IPC_MSG_UNLOAD_DONE status: %d, trans_id: %d\n",
 			unload_rsp_pkt->header.status,
 			unload_rsp_pkt->header.trans_id);
 
 		network = get_network_by_hdl(host_ctx, NULL,
 			unload_rsp_pkt->network_hdl);
 		if (!network) {
-			pr_err("can't find network %x\n",
+			NPU_ERR("can't find network %x\n",
 				unload_rsp_pkt->network_hdl);
 			break;
 		}
 
 		if (network->trans_id != unload_rsp_pkt->header.trans_id) {
-			pr_err("unload_rsp_pkt trans_id is not match %d:%d\n",
+			NPU_ERR("unload_rsp_pkt trans_id is not match %d:%d\n",
 				network->trans_id,
 				unload_rsp_pkt->header.trans_id);
 			network_put(network);
@@ -806,13 +797,13 @@
 		struct ipc_msg_loopback_pkt *lb_rsp_pkt =
 			(struct ipc_msg_loopback_pkt *)msg;
 
-		pr_debug("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n",
+		NPU_DBG("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n",
 			lb_rsp_pkt->loopbackParams);
 		complete_all(&host_ctx->loopback_done);
 		break;
 	}
 	default:
-		pr_err("Not supported apps response received %d\n",
+		NPU_ERR("Not supported apps response received %d\n",
 			msg_id);
 		break;
 	}
@@ -829,12 +820,12 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (host_ctx->fw_state == FW_DISABLED) {
-		pr_warn("handle npu session msg when FW is disabled\n");
+		NPU_WARN("handle npu session msg when FW is disabled\n");
 		goto skip_read_msg;
 	}
 
 	while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_APPS_RSP, msg) == 0) {
-		pr_debug("received from msg queue\n");
+		NPU_DBG("received from msg queue\n");
 		app_msg_proc(host_ctx, msg);
 	}
 
@@ -859,7 +850,7 @@
 		npu_process_log_message(npu_dev, log_msg, size);
 		break;
 	default:
-		pr_err("unsupported log response received %d\n", msg_id);
+		NPU_ERR("unsupported log response received %d\n", msg_id);
 		break;
 	}
 }
@@ -876,12 +867,12 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (host_ctx->fw_state == FW_DISABLED) {
-		pr_warn("handle npu session msg when FW is disabled\n");
+		NPU_WARN("handle npu session msg when FW is disabled\n");
 		goto skip_read_msg;
 	}
 
 	while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_LOG, msg) == 0) {
-		pr_debug("received from log queue\n");
+		NPU_DBG("received from log queue\n");
 		log_msg_proc(npu_dev, msg);
 	}
 
@@ -922,7 +913,7 @@
 	if (host_ctx->fw_error && (host_ctx->fw_state == FW_ENABLED) &&
 		!wait_for_completion_interruptible_timeout(
 		&host_ctx->fw_deinit_done, NW_CMD_TIMEOUT))
-		pr_warn("npu: wait for fw_deinit_done time out\n");
+		NPU_WARN("npu: wait for fw_deinit_done time out\n");
 
 	npu_mem_unmap(client, unmap_ioctl->buf_ion_hdl,
 		unmap_ioctl->npu_phys_addr);
@@ -937,13 +928,13 @@
 
 	if (network->fw_error || host_ctx->fw_error ||
 		(host_ctx->fw_state == FW_DISABLED)) {
-		pr_err("fw is in error state or disabled, can't send network cmd\n");
+		NPU_ERR("fw is in error state or disabled\n");
 		ret = -EIO;
 	} else if (network->cmd_pending) {
-		pr_err("Another cmd is pending\n");
+		NPU_ERR("Another cmd is pending\n");
 		ret = -EBUSY;
 	} else {
-		pr_debug("Send cmd %d network id %lld\n",
+		NPU_DBG("Send cmd %d network id %lld\n",
 			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
 			network->id);
 		network->cmd_async = async;
@@ -967,10 +958,10 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (host_ctx->fw_error || (host_ctx->fw_state == FW_DISABLED)) {
-		pr_err("fw is in error state or disabled, can't send misc cmd\n");
+		NPU_ERR("fw is in error state or disabled\n");
 		ret = -EIO;
 	} else {
-		pr_debug("Send cmd %d\n",
+		NPU_DBG("Send cmd %d\n",
 			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type);
 		ret = npu_host_ipc_send_cmd(npu_dev, q_idx, cmd_ptr);
 	}
@@ -992,7 +983,7 @@
 	param->variable_size_in_bits =
 		layer_info->patch_info.variable_size_in_bits;
 
-	pr_debug("copy_patch_data: %x %d %x %x %x %x\n",
+	NPU_DBG("copy_patch_data: %x %d %x %x %x %x\n",
 		param->value,
 		param->chunk_id,
 		param->loc_offset,
@@ -1011,7 +1002,7 @@
 		patch_info->instruction_size_in_bytes;
 	param->shift_value_in_bits = patch_info->shift_value_in_bits;
 	param->variable_size_in_bits = patch_info->variable_size_in_bits;
-	pr_debug("copy_patch_data_v2: %x %d %x %x %x %x\n",
+	NPU_DBG("copy_patch_data_v2: %x %d %x %x %x %x\n",
 		param->value,
 		param->chunk_id,
 		param->loc_offset,
@@ -1035,7 +1026,7 @@
 			max_perf_mode = network->perf_mode;
 		network++;
 	}
-	pr_debug("max perf mode for networks: %d\n", max_perf_mode);
+	NPU_DBG("max perf mode for networks: %d\n", max_perf_mode);
 
 	return max_perf_mode;
 }
@@ -1079,7 +1070,7 @@
 
 	ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
 	if (ret) {
-		pr_err("network load failed due to power level set\n");
+		NPU_ERR("network load failed due to power level set\n");
 		goto error_free_network;
 	}
 
@@ -1098,7 +1089,7 @@
 	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, &load_packet, false);
 	if (ret) {
-		pr_err("NPU_IPC_CMD_LOAD sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_LOAD sent failed: %d\n", ret);
 		goto error_free_network;
 	}
 
@@ -1111,17 +1102,17 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (!ret) {
-		pr_err_ratelimited("NPU_IPC_CMD_LOAD time out\n");
+		NPU_ERR("NPU_IPC_CMD_LOAD time out\n");
 		ret = -ETIMEDOUT;
 		goto error_free_network;
 	} else if (ret < 0) {
-		pr_err("NPU_IPC_CMD_LOAD is interrupted by signal\n");
+		NPU_ERR("NPU_IPC_CMD_LOAD is interrupted by signal\n");
 		goto error_free_network;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during load network\n");
+		NPU_ERR("fw is in error state during load network\n");
 		goto error_free_network;
 	}
 
@@ -1193,17 +1184,17 @@
 
 	/* verify mapped physical address */
 	if (!npu_mem_verify_addr(client, network->phy_add)) {
-		pr_err("Invalid network address %llx\n", network->phy_add);
+		NPU_ERR("Invalid network address %llx\n", network->phy_add);
 		ret = -EINVAL;
 		goto error_free_network;
 	}
 
-	pr_debug("network address %llx\n", network->phy_add);
+	NPU_DBG("network address %llx\n", network->phy_add);
 	networks_perf_mode = find_networks_perf_mode(host_ctx);
 
 	ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
 	if (ret) {
-		pr_err("network load failed due to power level set\n");
+		NPU_ERR("network load failed due to power level set\n");
 		goto error_free_network;
 	}
 
@@ -1224,7 +1215,7 @@
 	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, load_packet, false);
 	if (ret) {
-		pr_debug("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
+		NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
 		goto error_free_network;
 	}
 
@@ -1238,17 +1229,17 @@
 	mutex_lock(&host_ctx->lock);
 
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_LOAD time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n");
 		ret = -ETIMEDOUT;
 		goto error_free_network;
 	} else if (ret < 0) {
-		pr_err("NPU_IPC_CMD_LOAD_V2 is interrupted by signal\n");
+		NPU_ERR("NPU_IPC_CMD_LOAD_V2 is interrupted by signal\n");
 		goto error_free_network;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during load_v2 network\n");
+		NPU_ERR("fw is in error state during load_v2 network\n");
 		goto error_free_network;
 	}
 
@@ -1294,18 +1285,18 @@
 	}
 
 	if (!network->is_active) {
-		pr_err("network is not active\n");
+		NPU_ERR("network is not active\n");
 		network_put(network);
 		mutex_unlock(&host_ctx->lock);
 		return -EINVAL;
 	}
 
 	if (network->fw_error) {
-		pr_err("fw in error state, skip unload network in fw\n");
+		NPU_ERR("fw in error state, skip unload network in fw\n");
 		goto free_network;
 	}
 
-	pr_debug("Unload network %lld\n", network->id);
+	NPU_DBG("Unload network %lld\n", network->id);
 	/* prepare IPC packet for UNLOAD */
 	unload_packet.header.cmd_type = NPU_IPC_CMD_UNLOAD;
 	unload_packet.header.size = sizeof(struct ipc_cmd_unload_pkt);
@@ -1319,13 +1310,13 @@
 	ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false);
 
 	if (ret) {
-		pr_err("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
 		/*
 		 * If another command is running on this network,
 		 * don't free_network now.
 		 */
 		if (ret == -EBUSY) {
-			pr_err("Network is running, retry later\n");
+			NPU_ERR("Network is running, retry later\n");
 			network_put(network);
 			mutex_unlock(&host_ctx->lock);
 			return ret;
@@ -1343,22 +1334,22 @@
 	mutex_lock(&host_ctx->lock);
 
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_UNLOAD time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out\n");
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_network;
 	} else if (ret < 0) {
-		pr_err("Wait for unload done interrupted by signal\n");
+		NPU_ERR("Wait for unload done interrupted by signal\n");
 		network->cmd_pending = false;
 		goto free_network;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during unload network\n");
+		NPU_ERR("fw is in error state during unload network\n");
 	} else {
 		ret = network->cmd_ret_status;
-		pr_debug("unload network status %d\n", ret);
+		NPU_DBG("unload network status %d\n", ret);
 	}
 
 free_network:
@@ -1373,7 +1364,7 @@
 	if (networks_perf_mode > 0) {
 		ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
 		if (ret)
-			pr_warn("restore uc power level failed\n");
+			NPU_WARN("restore uc power level failed\n");
 	}
 	mutex_unlock(&host_ctx->lock);
 	fw_deinit(npu_dev, false, true);
@@ -1402,23 +1393,23 @@
 	}
 
 	if (!network->is_active) {
-		pr_err("network is not active\n");
+		NPU_ERR("network is not active\n");
 		ret = -EINVAL;
 		goto exec_done;
 	}
 
 	if (network->fw_error) {
-		pr_err("fw is in error state\n");
+		NPU_ERR("fw is in error state\n");
 		ret = -EIO;
 		goto exec_done;
 	}
 
-	pr_debug("execute network %lld\n", network->id);
+	NPU_DBG("execute network %lld\n", network->id);
 	memset(&exec_packet, 0, sizeof(exec_packet));
 	if (exec_ioctl->patching_required) {
 		if ((exec_ioctl->input_layer_num != 1) ||
 			(exec_ioctl->output_layer_num != 1)) {
-			pr_err("Invalid input/output layer num\n");
+			NPU_ERR("Invalid input/output layer num\n");
 			ret = -EINVAL;
 			goto exec_done;
 		}
@@ -1428,7 +1419,7 @@
 		/* verify mapped physical address */
 		if (!npu_mem_verify_addr(client, input_off) ||
 			!npu_mem_verify_addr(client, output_off)) {
-			pr_err("Invalid patch buf address\n");
+			NPU_ERR("Invalid patch buf address\n");
 			ret = -EINVAL;
 			goto exec_done;
 		}
@@ -1454,12 +1445,12 @@
 	ret = npu_send_network_cmd(npu_dev, network, &exec_packet, async_ioctl);
 
 	if (ret) {
-		pr_err("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret);
 		goto exec_done;
 	}
 
 	if (async_ioctl) {
-		pr_debug("Async ioctl, return now\n");
+		NPU_DBG("Async ioctl, return now\n");
 		goto exec_done;
 	}
 
@@ -1472,24 +1463,24 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_EXECUTE time out\n");
 		/* dump debug stats */
 		npu_dump_debug_timeout_stats(npu_dev);
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto exec_done;
 	} else if (ret == -ERESTARTSYS) {
-		pr_err("Wait for execution done interrupted by signal\n");
+		NPU_ERR("Wait for execution done interrupted by signal\n");
 		network->cmd_pending = false;
 		goto exec_done;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during execute network\n");
+		NPU_ERR("fw is in error state during execute network\n");
 	} else {
 		ret = network->cmd_ret_status;
-		pr_debug("execution status %d\n", ret);
+		NPU_DBG("execution status %d\n", ret);
 	}
 
 exec_done:
@@ -1501,7 +1492,7 @@
 	 * as error in order to force npu fw to stop execution
 	 */
 	if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) {
-		pr_err("Error handling after execution failure\n");
+		NPU_ERR("Error handling after execution failure\n");
 		host_error_hdlr(npu_dev, true);
 	}
 
@@ -1531,18 +1522,18 @@
 	}
 
 	if (!network->is_active) {
-		pr_err("network is not active\n");
+		NPU_ERR("network is not active\n");
 		ret = -EINVAL;
 		goto exec_v2_done;
 	}
 
 	if (network->fw_error) {
-		pr_err("fw is in error state\n");
+		NPU_ERR("fw is in error state\n");
 		ret = -EIO;
 		goto exec_v2_done;
 	}
 
-	pr_debug("execute_v2 network %lld\n", network->id);
+	NPU_DBG("execute_v2 network %lld\n", network->id);
 	num_patch_params = exec_ioctl->patch_buf_info_num;
 	pkt_size = num_patch_params * sizeof(struct npu_patch_params_v2) +
 		sizeof(*exec_packet);
@@ -1555,17 +1546,17 @@
 
 	for (i = 0; i < num_patch_params; i++) {
 		exec_packet->patch_params[i].id = patch_buf_info[i].buf_id;
-		pr_debug("%d: patch_id: %x\n", i,
+		NPU_DBG("%d: patch_id: %x\n", i,
 			exec_packet->patch_params[i].id);
 		exec_packet->patch_params[i].value =
 			patch_buf_info[i].buf_phys_addr;
-		pr_debug("%d: patch value: %x\n", i,
+		NPU_DBG("%d: patch value: %x\n", i,
 			exec_packet->patch_params[i].value);
 
 		/* verify mapped physical address */
 		if (!npu_mem_verify_addr(client,
 			patch_buf_info[i].buf_phys_addr)) {
-			pr_err("Invalid patch value\n");
+			NPU_ERR("Invalid patch value\n");
 			ret = -EINVAL;
 			goto free_exec_packet;
 		}
@@ -1583,7 +1574,7 @@
 	network->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr;
 	network->stats_buf_size = exec_ioctl->stats_buf_size;
 
-	pr_debug("Execute_v2 flags %x stats_buf_size %d\n",
+	NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
 		exec_packet->header.flags, exec_ioctl->stats_buf_size);
 
 	/* Send it on the high priority queue */
@@ -1591,12 +1582,12 @@
 	ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl);
 
 	if (ret) {
-		pr_err("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
 		goto free_exec_packet;
 	}
 
 	if (async_ioctl) {
-		pr_debug("Async ioctl, return now\n");
+		NPU_DBG("Async ioctl, return now\n");
 		goto free_exec_packet;
 	}
 
@@ -1609,21 +1600,21 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE_V2 time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_EXECUTE_V2 time out\n");
 		/* dump debug stats */
 		npu_dump_debug_timeout_stats(npu_dev);
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_exec_packet;
 	} else if (ret == -ERESTARTSYS) {
-		pr_err("Wait for execution_v2 done interrupted by signal\n");
+		NPU_ERR("Wait for execution_v2 done interrupted by signal\n");
 		network->cmd_pending = false;
 		goto free_exec_packet;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during execute_v2 network\n");
+		NPU_ERR("fw is in error state during execute_v2 network\n");
 		goto free_exec_packet;
 	}
 
@@ -1634,11 +1625,11 @@
 			(void __user *)exec_ioctl->stats_buf_addr,
 			network->stats_buf,
 			exec_ioctl->stats_buf_size)) {
-			pr_err("copy stats to user failed\n");
+			NPU_ERR("copy stats to user failed\n");
 			exec_ioctl->stats_buf_size = 0;
 		}
 	} else {
-		pr_err("execution failed %d\n", ret);
+		NPU_ERR("execution failed %d\n", ret);
 	}
 
 free_exec_packet:
@@ -1652,7 +1643,7 @@
 	 * as error in order to force npu fw to stop execution
 	 */
 	if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) {
-		pr_err("Error handling after execution failure\n");
+		NPU_ERR("Error handling after execution failure\n");
 		host_error_hdlr(npu_dev, true);
 	}
 
@@ -1680,7 +1671,7 @@
 	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, &loopback_packet);
 
 	if (ret) {
-		pr_err("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret);
 		goto loopback_exit;
 	}
 
@@ -1690,10 +1681,10 @@
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_LOOPBACK time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_LOOPBACK time out\n");
 		ret = -ETIMEDOUT;
 	} else if (ret < 0) {
-		pr_err("Wait for loopback done interrupted by signal\n");
+		NPU_ERR("Wait for loopback done interrupted by signal\n");
 	}
 
 loopback_exit:
@@ -1715,7 +1706,7 @@
 	for (i = 0; i < MAX_LOADED_NETWORK; i++) {
 		network = &host_ctx->networks[i];
 		if (network->client == client) {
-			pr_warn("network %d is not unloaded before close\n",
+			NPU_WARN("network %d is not unloaded before close\n",
 				network->network_hdl);
 			unload_req.network_hdl = network->network_hdl;
 			npu_host_unload_network(client, &unload_req);
@@ -1726,7 +1717,7 @@
 	while (!list_empty(&client->mapped_buffer_list)) {
 		ion_buf = list_first_entry(&client->mapped_buffer_list,
 			struct npu_ion_buf, list);
-		pr_warn("unmap buffer %x:%llx\n", ion_buf->fd, ion_buf->iova);
+		NPU_WARN("unmap buffer %x:%llx\n", ion_buf->fd, ion_buf->iova);
 		unmap_req.buf_ion_hdl = ion_buf->fd;
 		unmap_req.npu_phys_addr = ion_buf->iova;
 		npu_host_unmap_buf(client, &unmap_req);
diff --git a/drivers/media/platform/msm/synx/synx.c b/drivers/media/platform/msm/synx/synx.c
index 80e1a2d..2b05cc9 100644
--- a/drivers/media/platform/msm/synx/synx.c
+++ b/drivers/media/platform/msm/synx/synx.c
@@ -533,7 +533,7 @@
 		return -EINVAL;
 	}
 
-	if (is_valid_type(external_sync.type)) {
+	if (!is_valid_type(external_sync.type)) {
 		pr_err("invalid external sync object\n");
 		return -EINVAL;
 	}
@@ -656,7 +656,7 @@
 
 	pr_debug("Enter %s\n", __func__);
 
-	if (!synx_obj)
+	if (!new_synx_obj)
 		return -EINVAL;
 
 	row = synx_from_key(synx_obj, secure_key);
@@ -1389,6 +1389,9 @@
 	.flush = synx_close,
 	.poll  = synx_poll,
 	.unlocked_ioctl = synx_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = synx_ioctl,
+#endif
 };
 
 #ifdef CONFIG_SPECTRA_CAMERA
diff --git a/drivers/media/platform/msm/synx/synx_util.c b/drivers/media/platform/msm/synx/synx_util.c
index c72fac9..829298d 100644
--- a/drivers/media/platform/msm/synx/synx_util.c
+++ b/drivers/media/platform/msm/synx/synx_util.c
@@ -516,7 +516,9 @@
 	if (!row)
 		return -EINVAL;
 
-	get_random_bytes(&row->secure_key, sizeof(row->secure_key));
+	if (!row->secure_key)
+		get_random_bytes(&row->secure_key, sizeof(row->secure_key));
+
 	return row->secure_key;
 }
 
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index f193637..aded2b3 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -782,7 +782,7 @@
 	pkt->num_properties = 1;
 	pkt->size += size;
 	pkt->rg_property_data[0] = ptype;
-	if (size)
+	if (size && pdata)
 		memcpy(&pkt->rg_property_data[1], pdata, size);
 
 	dprintk(VIDC_DBG, "Setting HAL Property = 0x%x\n", ptype);
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 90932c3..fd728af 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -15,11 +15,6 @@
 #include "msm_vidc_debug.h"
 #include "vidc_hfi.h"
 
-static enum vidc_status hfi_parse_init_done_properties(
-		struct msm_vidc_capability *capability,
-		u32 num_sessions, u8 *data_ptr, u32 num_properties,
-		u32 rem_bytes);
-
 static enum vidc_status hfi_map_err_status(u32 hfi_err)
 {
 	enum vidc_status vidc_err;
@@ -469,647 +464,6 @@
 	return 0;
 }
 
-enum hal_capability get_hal_cap_type(u32 capability_type)
-{
-	enum hal_capability hal_cap = 0;
-
-	switch (capability_type) {
-	case HFI_CAPABILITY_FRAME_WIDTH:
-		hal_cap = HAL_CAPABILITY_FRAME_WIDTH;
-		break;
-	case HFI_CAPABILITY_FRAME_HEIGHT:
-		hal_cap = HAL_CAPABILITY_FRAME_HEIGHT;
-		break;
-	case HFI_CAPABILITY_MBS_PER_FRAME:
-		hal_cap = HAL_CAPABILITY_MBS_PER_FRAME;
-		break;
-	case HFI_CAPABILITY_MBS_PER_SECOND:
-		hal_cap = HAL_CAPABILITY_MBS_PER_SECOND;
-		break;
-	case HFI_CAPABILITY_FRAMERATE:
-		hal_cap = HAL_CAPABILITY_FRAMERATE;
-		break;
-	case HFI_CAPABILITY_SCALE_X:
-		hal_cap = HAL_CAPABILITY_SCALE_X;
-		break;
-	case HFI_CAPABILITY_SCALE_Y:
-		hal_cap = HAL_CAPABILITY_SCALE_Y;
-		break;
-	case HFI_CAPABILITY_BITRATE:
-		hal_cap = HAL_CAPABILITY_BITRATE;
-		break;
-	case HFI_CAPABILITY_BFRAME:
-		hal_cap = HAL_CAPABILITY_BFRAME;
-		break;
-	case HFI_CAPABILITY_PEAKBITRATE:
-		hal_cap = HAL_CAPABILITY_PEAKBITRATE;
-		break;
-	case HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS:
-		hal_cap = HAL_CAPABILITY_HIER_P_NUM_ENH_LAYERS;
-		break;
-	case HFI_CAPABILITY_ENC_LTR_COUNT:
-		hal_cap = HAL_CAPABILITY_ENC_LTR_COUNT;
-		break;
-	case HFI_CAPABILITY_CP_OUTPUT2_THRESH:
-		hal_cap = HAL_CAPABILITY_SECURE_OUTPUT2_THRESHOLD;
-		break;
-	case HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS:
-		hal_cap = HAL_CAPABILITY_HIER_B_NUM_ENH_LAYERS;
-		break;
-	case HFI_CAPABILITY_LCU_SIZE:
-		hal_cap = HAL_CAPABILITY_LCU_SIZE;
-		break;
-	case HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS:
-		hal_cap = HAL_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS;
-		break;
-	case HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE:
-		hal_cap = HAL_CAPABILITY_MBS_PER_SECOND_POWER_SAVE;
-		break;
-	case HFI_CAPABILITY_EXTRADATA:
-		hal_cap = HAL_CAPABILITY_EXTRADATA;
-		break;
-	case HFI_CAPABILITY_PROFILE:
-		hal_cap = HAL_CAPABILITY_PROFILE;
-		break;
-	case HFI_CAPABILITY_LEVEL:
-		hal_cap = HAL_CAPABILITY_LEVEL;
-		break;
-	case HFI_CAPABILITY_I_FRAME_QP:
-		hal_cap = HAL_CAPABILITY_I_FRAME_QP;
-		break;
-	case HFI_CAPABILITY_P_FRAME_QP:
-		hal_cap = HAL_CAPABILITY_P_FRAME_QP;
-		break;
-	case HFI_CAPABILITY_B_FRAME_QP:
-		hal_cap = HAL_CAPABILITY_B_FRAME_QP;
-		break;
-	case HFI_CAPABILITY_RATE_CONTROL_MODES:
-		hal_cap = HAL_CAPABILITY_RATE_CONTROL_MODES;
-		break;
-	case HFI_CAPABILITY_BLUR_WIDTH:
-		hal_cap = HAL_CAPABILITY_BLUR_WIDTH;
-		break;
-	case HFI_CAPABILITY_BLUR_HEIGHT:
-		hal_cap = HAL_CAPABILITY_BLUR_HEIGHT;
-		break;
-	case HFI_CAPABILITY_SLICE_BYTE:
-		hal_cap = HAL_CAPABILITY_SLICE_BYTE;
-		break;
-	case HFI_CAPABILITY_SLICE_MB:
-		hal_cap = HAL_CAPABILITY_SLICE_MB;
-		break;
-	case HFI_CAPABILITY_SECURE:
-		hal_cap = HAL_CAPABILITY_SECURE;
-		break;
-	case HFI_CAPABILITY_MAX_NUM_B_FRAMES:
-		hal_cap = HAL_CAPABILITY_MAX_NUM_B_FRAMES;
-		break;
-	case HFI_CAPABILITY_MAX_VIDEOCORES:
-		hal_cap = HAL_CAPABILITY_MAX_VIDEOCORES;
-		break;
-	case HFI_CAPABILITY_MAX_WORKMODES:
-		hal_cap = HAL_CAPABILITY_MAX_WORKMODES;
-		break;
-	case HFI_CAPABILITY_UBWC_CR_STATS:
-		hal_cap = HAL_CAPABILITY_UBWC_CR_STATS;
-		break;
-	default:
-		dprintk(VIDC_DBG, "%s: unknown capablity %#x\n",
-			__func__, capability_type);
-		break;
-	}
-
-	return hal_cap;
-}
-
-static inline void copy_cap_prop(
-		struct hfi_capability_supported *in,
-		struct msm_vidc_capability *capability)
-{
-	struct hal_capability_supported *out = NULL;
-
-	if (!in || !capability) {
-		dprintk(VIDC_ERR, "%s Invalid input parameters\n",
-			__func__);
-		return;
-	}
-
-	switch (in->capability_type) {
-	case HFI_CAPABILITY_FRAME_WIDTH:
-		out = &capability->width;
-		break;
-	case HFI_CAPABILITY_FRAME_HEIGHT:
-		out = &capability->height;
-		break;
-	case HFI_CAPABILITY_MBS_PER_FRAME:
-		out = &capability->mbs_per_frame;
-		break;
-	case HFI_CAPABILITY_MBS_PER_SECOND:
-		out = &capability->mbs_per_sec;
-		break;
-	case HFI_CAPABILITY_FRAMERATE:
-		out = &capability->frame_rate;
-		break;
-	case HFI_CAPABILITY_SCALE_X:
-		out = &capability->scale_x;
-		break;
-	case HFI_CAPABILITY_SCALE_Y:
-		out = &capability->scale_y;
-		break;
-	case HFI_CAPABILITY_BITRATE:
-		out = &capability->bitrate;
-		break;
-	case HFI_CAPABILITY_BFRAME:
-		out = &capability->bframe;
-		break;
-	case HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS:
-		out = &capability->hier_p;
-		break;
-	case HFI_CAPABILITY_ENC_LTR_COUNT:
-		out = &capability->ltr_count;
-		break;
-	case HFI_CAPABILITY_CP_OUTPUT2_THRESH:
-		out = &capability->secure_output2_threshold;
-		break;
-	case HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS:
-		out = &capability->hier_b;
-		break;
-	case HFI_CAPABILITY_LCU_SIZE:
-		out = &capability->lcu_size;
-		break;
-	case HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS:
-		out = &capability->hier_p_hybrid;
-		break;
-	case HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE:
-		out = &capability->mbs_per_sec_power_save;
-		break;
-	case HFI_CAPABILITY_EXTRADATA:
-		out = &capability->extradata;
-		break;
-	case HFI_CAPABILITY_PROFILE:
-		out = &capability->profile;
-		break;
-	case HFI_CAPABILITY_LEVEL:
-		out = &capability->level;
-		break;
-	case HFI_CAPABILITY_I_FRAME_QP:
-		out = &capability->i_qp;
-		break;
-	case HFI_CAPABILITY_P_FRAME_QP:
-		out = &capability->p_qp;
-		break;
-	case HFI_CAPABILITY_B_FRAME_QP:
-		out = &capability->b_qp;
-		break;
-	case HFI_CAPABILITY_RATE_CONTROL_MODES:
-		out = &capability->rc_modes;
-		break;
-	case HFI_CAPABILITY_BLUR_WIDTH:
-		out = &capability->blur_width;
-		break;
-	case HFI_CAPABILITY_BLUR_HEIGHT:
-		out = &capability->blur_height;
-		break;
-	case HFI_CAPABILITY_SLICE_BYTE:
-		out = &capability->slice_bytes;
-		break;
-	case HFI_CAPABILITY_SLICE_MB:
-		out = &capability->slice_mbs;
-		break;
-	case HFI_CAPABILITY_SECURE:
-		out = &capability->secure;
-		break;
-	case HFI_CAPABILITY_MAX_NUM_B_FRAMES:
-		out = &capability->max_num_b_frames;
-		break;
-	case HFI_CAPABILITY_MAX_VIDEOCORES:
-		out = &capability->max_video_cores;
-		break;
-	case HFI_CAPABILITY_MAX_WORKMODES:
-		out = &capability->max_work_modes;
-		break;
-	case HFI_CAPABILITY_UBWC_CR_STATS:
-		out = &capability->ubwc_cr_stats;
-		break;
-	default:
-		dprintk(VIDC_DBG, "%s: unknown capablity %#x\n",
-			__func__, in->capability_type);
-		break;
-	}
-
-	if (out) {
-		out->capability_type = get_hal_cap_type(in->capability_type);
-		out->min = in->min;
-		out->max = in->max;
-		out->step_size = in->step_size;
-	}
-}
-
-static int hfi_fill_codec_info(u8 *data_ptr,
-		struct vidc_hal_sys_init_done *sys_init_done)
-{
-	u32 i;
-	u32 codecs = 0, codec_count = 0, size = 0;
-	struct msm_vidc_capability *capability;
-	u32 prop_id = *((u32 *)data_ptr);
-	u8 *orig_data_ptr = data_ptr;
-
-	if (prop_id ==  HFI_PROPERTY_PARAM_CODEC_SUPPORTED) {
-		struct hfi_codec_supported *prop;
-
-		data_ptr = data_ptr + sizeof(u32);
-		prop = (struct hfi_codec_supported *) data_ptr;
-		sys_init_done->dec_codec_supported =
-			prop->decoder_codec_supported;
-		sys_init_done->enc_codec_supported =
-			prop->encoder_codec_supported;
-		size = sizeof(struct hfi_codec_supported) + sizeof(u32);
-	} else {
-		dprintk(VIDC_WARN,
-			"%s: prop_id %#x, expected codec_supported property\n",
-			__func__, prop_id);
-	}
-
-	codecs = sys_init_done->dec_codec_supported;
-	for (i = 0; i < 8 * sizeof(codecs); i++) {
-		if ((1 << i) & codecs) {
-			capability =
-				&sys_init_done->capabilities[codec_count++];
-			capability->codec =
-				vidc_get_hal_codec((1 << i) & codecs);
-			capability->domain =
-				vidc_get_hal_domain(HFI_VIDEO_DOMAIN_DECODER);
-		}
-	}
-	codecs = sys_init_done->enc_codec_supported;
-	for (i = 0; i < 8 * sizeof(codecs); i++) {
-		if ((1 << i) & codecs) {
-			capability =
-				&sys_init_done->capabilities[codec_count++];
-			capability->codec =
-				vidc_get_hal_codec((1 << i) & codecs);
-			capability->domain =
-				vidc_get_hal_domain(HFI_VIDEO_DOMAIN_ENCODER);
-		}
-	}
-	sys_init_done->codec_count = codec_count;
-
-	prop_id = *((u32 *)(orig_data_ptr + size));
-	if (prop_id == HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED) {
-		struct hfi_max_sessions_supported *prop =
-			(struct hfi_max_sessions_supported *)
-			(orig_data_ptr + size + sizeof(u32));
-
-		sys_init_done->max_sessions_supported = prop->max_sessions;
-		size += sizeof(struct hfi_max_sessions_supported) + sizeof(u32);
-		dprintk(VIDC_DBG, "max_sessions_supported %d\n",
-				prop->max_sessions);
-	}
-	return size;
-}
-
-static int copy_profile_caps_to_sessions(struct hfi_profile_level *prof,
-		u32 profile_count, struct msm_vidc_capability *capabilities,
-		u32 num_sessions, u32 codecs, u32 domain)
-{
-	u32 i = 0, j = 0;
-	struct msm_vidc_capability *capability;
-	u32 sess_codec;
-	u32 sess_domain;
-
-	/*
-	 * iterate over num_sessions and copy all the profile capabilities
-	 * to matching sessions.
-	 */
-	for (i = 0; i < num_sessions; i++) {
-		sess_codec = 0;
-		sess_domain = 0;
-		capability = &capabilities[i];
-
-		if (capability->codec)
-			sess_codec =
-				vidc_get_hfi_codec(capability->codec);
-		if (capability->domain)
-			sess_domain =
-				vidc_get_hfi_domain(capability->domain);
-
-		if (!(sess_codec & codecs && sess_domain & domain))
-			continue;
-
-		capability->profile_level.profile_count = profile_count;
-		for (j = 0; j < profile_count; j++) {
-			/* HFI and HAL follow same enums, hence no conversion */
-			capability->profile_level.profile_level[j].profile =
-				prof[j].profile;
-			capability->profile_level.profile_level[j].level =
-				prof[j].level;
-		}
-	}
-
-	return 0;
-}
-
-static int copy_caps_to_sessions(struct hfi_capability_supported *cap,
-		u32 num_caps, struct msm_vidc_capability *capabilities,
-		u32 num_sessions, u32 codecs, u32 domain)
-{
-	u32 i = 0, j = 0;
-	struct msm_vidc_capability *capability;
-	u32 sess_codec;
-	u32 sess_domain;
-
-	/*
-	 * iterate over num_sessions and copy all the capabilities
-	 * to matching sessions.
-	 */
-	for (i = 0; i < num_sessions; i++) {
-		sess_codec = 0;
-		sess_domain = 0;
-		capability = &capabilities[i];
-
-		if (capability->codec)
-			sess_codec =
-				vidc_get_hfi_codec(capability->codec);
-		if (capability->domain)
-			sess_domain =
-				vidc_get_hfi_domain(capability->domain);
-
-		if (!(sess_codec & codecs && sess_domain & domain))
-			continue;
-
-		for (j = 0; j < num_caps; j++)
-			copy_cap_prop(&cap[j], capability);
-	}
-
-	return 0;
-}
-
-static int copy_nal_stream_format_caps_to_sessions(u32 nal_stream_format_value,
-		struct msm_vidc_capability *capabilities, u32 num_sessions,
-		u32 codecs, u32 domain)
-{
-	u32 i = 0;
-	struct msm_vidc_capability *capability;
-	u32 sess_codec;
-	u32 sess_domain;
-
-	for (i = 0; i < num_sessions; i++) {
-		sess_codec = 0;
-		sess_domain = 0;
-		capability = &capabilities[i];
-
-		if (capability->codec)
-			sess_codec =
-				vidc_get_hfi_codec(capability->codec);
-		if (capability->domain)
-			sess_domain =
-				vidc_get_hfi_domain(capability->domain);
-
-		if (!(sess_codec & codecs && sess_domain & domain))
-			continue;
-
-		capability->nal_stream_format.nal_stream_format_supported =
-				nal_stream_format_value;
-	}
-
-	return 0;
-}
-
-static enum vidc_status hfi_parse_init_done_properties(
-		struct msm_vidc_capability *capabilities,
-		u32 num_sessions, u8 *data_ptr, u32 num_properties,
-		u32 rem_bytes)
-{
-	enum vidc_status status = VIDC_ERR_NONE;
-	u32 prop_id, next_offset;
-	u32 codecs = 0, domain = 0;
-
-	while (status == VIDC_ERR_NONE && num_properties &&
-			rem_bytes >= sizeof(u32)) {
-
-		prop_id = *((u32 *)data_ptr);
-		next_offset = sizeof(u32);
-
-		switch (prop_id) {
-		case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED:
-		{
-			struct hfi_codec_mask_supported *prop =
-				(struct hfi_codec_mask_supported *)
-				(data_ptr + next_offset);
-
-			codecs = prop->codecs;
-			domain = prop->video_domains;
-			next_offset += sizeof(struct hfi_codec_mask_supported);
-			num_properties--;
-			break;
-		}
-		case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
-		{
-			struct hfi_capability_supported_info *prop =
-				(struct hfi_capability_supported_info *)
-				(data_ptr + next_offset);
-
-			if ((rem_bytes - next_offset) < prop->num_capabilities *
-				sizeof(struct hfi_capability_supported)) {
-				status = VIDC_ERR_BAD_PARAM;
-				break;
-			}
-			next_offset += sizeof(u32) +
-				prop->num_capabilities *
-				sizeof(struct hfi_capability_supported);
-
-			copy_caps_to_sessions(&prop->rg_data[0],
-					prop->num_capabilities,
-					capabilities, num_sessions,
-					codecs, domain);
-			num_properties--;
-			break;
-		}
-		case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
-		{
-			struct hfi_uncompressed_format_supported *prop =
-				(struct hfi_uncompressed_format_supported *)
-				(data_ptr + next_offset);
-			u32 num_format_entries;
-			char *fmt_ptr;
-			struct hfi_uncompressed_plane_info *plane_info;
-
-			if ((rem_bytes - next_offset) < sizeof(*prop)) {
-				status = VIDC_ERR_BAD_PARAM;
-				break;
-			}
-			num_format_entries = prop->format_entries;
-			next_offset = sizeof(*prop);
-			fmt_ptr = (char *)&prop->rg_format_info[0];
-
-			while (num_format_entries) {
-				u32 bytes_to_skip;
-
-				plane_info =
-				(struct hfi_uncompressed_plane_info *) fmt_ptr;
-
-				if ((rem_bytes - next_offset) <
-						sizeof(*plane_info)) {
-					status = VIDC_ERR_BAD_PARAM;
-					break;
-				}
-				bytes_to_skip = sizeof(*plane_info) -
-					sizeof(struct
-					hfi_uncompressed_plane_constraints) +
-					plane_info->num_planes *
-					sizeof(struct
-					hfi_uncompressed_plane_constraints);
-
-				fmt_ptr += bytes_to_skip;
-				next_offset += bytes_to_skip;
-				num_format_entries--;
-			}
-			num_properties--;
-			break;
-		}
-		case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED:
-		{
-			struct hfi_properties_supported *prop =
-				(struct hfi_properties_supported *)
-				(data_ptr + next_offset);
-			next_offset += sizeof(*prop) - sizeof(u32)
-				+ prop->num_properties * sizeof(u32);
-			num_properties--;
-			break;
-		}
-		case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
-		{
-			struct hfi_profile_level_supported *prop =
-				(struct hfi_profile_level_supported *)
-				(data_ptr + next_offset);
-
-			next_offset += sizeof(u32) +
-				prop->profile_count *
-				sizeof(struct hfi_profile_level);
-
-			if (prop->profile_count > MAX_PROFILE_COUNT) {
-				prop->profile_count = MAX_PROFILE_COUNT;
-				dprintk(VIDC_WARN,
-					"prop count exceeds max profile count\n");
-				break;
-			}
-
-			copy_profile_caps_to_sessions(
-					&prop->rg_profile_level[0],
-					prop->profile_count, capabilities,
-					num_sessions, codecs, domain);
-			num_properties--;
-			break;
-		}
-		case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
-		{
-			struct hfi_nal_stream_format_supported *prop =
-				(struct hfi_nal_stream_format_supported *)
-					(data_ptr + next_offset);
-
-			copy_nal_stream_format_caps_to_sessions(
-					prop->nal_stream_format_supported,
-					capabilities, num_sessions,
-					codecs, domain);
-
-			next_offset +=
-				sizeof(struct hfi_nal_stream_format_supported);
-			num_properties--;
-			break;
-		}
-		case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT:
-		{
-			next_offset += sizeof(u32);
-			num_properties--;
-			break;
-		}
-		case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH:
-		{
-			next_offset +=
-				sizeof(struct hfi_intra_refresh);
-			num_properties--;
-			break;
-		}
-		case HFI_PROPERTY_TME_VERSION_SUPPORTED:
-		{
-			capabilities->tme_version =
-				*((u32 *)(data_ptr + next_offset));
-			next_offset +=
-				sizeof(u32);
-			num_properties--;
-			break;
-		}
-		default:
-			dprintk(VIDC_DBG,
-				"%s: default case - data_ptr %pK, prop_id 0x%x\n",
-				__func__, data_ptr, prop_id);
-			break;
-		}
-		rem_bytes -= next_offset;
-		data_ptr += next_offset;
-	}
-
-	return status;
-}
-
-enum vidc_status hfi_process_sys_init_done_prop_read(
-	struct hfi_msg_sys_init_done_packet *pkt,
-	struct vidc_hal_sys_init_done *sys_init_done)
-{
-	enum vidc_status status = VIDC_ERR_NONE;
-	u32 rem_bytes, bytes_read, num_properties;
-	u8 *data_ptr;
-
-	if (!pkt || !sys_init_done) {
-		dprintk(VIDC_ERR,
-			"hfi_msg_sys_init_done: Invalid input\n");
-		return VIDC_ERR_FAIL;
-	}
-
-	rem_bytes = pkt->size - sizeof(struct
-			hfi_msg_sys_init_done_packet) + sizeof(u32);
-
-	if (!rem_bytes) {
-		dprintk(VIDC_ERR,
-			"hfi_msg_sys_init_done: missing_prop_info\n");
-		return VIDC_ERR_FAIL;
-	}
-
-	status = hfi_map_err_status(pkt->error_type);
-	if (status) {
-		dprintk(VIDC_ERR, "%s: status %#x\n", __func__, status);
-		return status;
-	}
-
-	data_ptr = (u8 *) &pkt->rg_property_data[0];
-	num_properties = pkt->num_properties;
-	dprintk(VIDC_DBG,
-		"%s: data_start %pK, num_properties %#x\n",
-		__func__, data_ptr, num_properties);
-	if (!num_properties) {
-		sys_init_done->capabilities = NULL;
-		dprintk(VIDC_DBG,
-			"Venus didn't set any properties in SYS_INIT_DONE");
-		return status;
-	}
-	bytes_read = hfi_fill_codec_info(data_ptr, sys_init_done);
-	data_ptr += bytes_read;
-	rem_bytes -= bytes_read;
-	num_properties--;
-
-	status = hfi_parse_init_done_properties(
-			sys_init_done->capabilities,
-			VIDC_MAX_SESSIONS, data_ptr, num_properties,
-			rem_bytes);
-	if (status) {
-		dprintk(VIDC_ERR, "%s: parse status %#x\n",
-			__func__, status);
-		return status;
-	}
-
-	return status;
-}
-
 static void hfi_process_sess_get_prop_buf_req(
 	struct hfi_msg_session_property_info_packet *prop,
 	struct buffer_requirements *buffreq)
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 30bd2a4..c71cf0c 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -67,11 +67,10 @@
 		.id = V4L2_CID_MPEG_VIDEO_UNKNOWN,
 		.name = "Invalid control",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = INT_MAX,
-		.maximum = INT_MAX,
-		.default_value = INT_MAX,
+		.minimum = 0,
+		.maximum = 0,
+		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -95,7 +94,6 @@
 				  V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_P |
 				  V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_B),
 		.step = 0,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -106,7 +104,7 @@
 		.maximum = V4L2_MPEG_MSM_VIDC_ENABLE,
 		.default_value = V4L2_MPEG_MSM_VIDC_DISABLE,
 		.step = 1,
-		},
+	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE,
 		.name = "Secure mode",
@@ -123,7 +121,6 @@
 		.minimum = EXTRADATA_NONE,
 		.maximum = EXTRADATA_DEFAULT | EXTRADATA_ADVANCED,
 		.default_value = EXTRADATA_DEFAULT,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -136,7 +133,6 @@
 			V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY,
 		.default_value =
 			V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY,
-		.menu_skip_mask = 0,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -154,9 +150,7 @@
 		(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
 		(1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
@@ -185,12 +179,9 @@
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_0) |
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
-		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_2) |
-		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN)
+		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_2)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
@@ -204,16 +195,14 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
 		.name = "HEVC Level",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
-		.maximum = V4L2_MPEG_VIDEO_HEVC_LEVEL_UNKNOWN,
+		.maximum = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
 		.default_value = V4L2_MPEG_VIDEO_HEVC_LEVEL_5,
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
@@ -228,12 +217,9 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
-		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2) |
-		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_UNKNOWN)
+		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_TIER,
@@ -246,9 +232,7 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_TIER_HIGH)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
@@ -258,9 +242,7 @@
 		.maximum = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
 		.default_value = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
 		.menu_skip_mask = ~(1 << V4L2_MPEG_VIDEO_VP8_PROFILE_0),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
@@ -277,8 +259,6 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3)
 		),
 		.qmenu = vp8_profile_level,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
@@ -290,12 +270,9 @@
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDEO_VP9_PROFILE_0) |
 		(1 << V4L2_MPEG_VIDEO_VP9_PROFILE_1) |
-		(1 << V4L2_MPEG_VIDEO_VP9_PROFILE_2) |
-		(1 << V4L2_MPEG_VIDEO_VP9_PROFILE_3)
+		(1 << V4L2_MPEG_VIDEO_VP9_PROFILE_2)
 		),
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL,
@@ -320,8 +297,6 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61)
 		),
 		.qmenu = vp9_level,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE,
@@ -335,8 +310,6 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN)
 		),
 		.qmenu = mpeg2_profile,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL,
@@ -351,8 +324,6 @@
 			(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2)
 		),
 		.qmenu = mpeg2_level,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT,
@@ -380,7 +351,6 @@
 		.maximum = INT_MAX,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -391,9 +361,7 @@
 		.maximum = MAX_NUM_CAPTURE_BUFFERS,
 		.default_value = MIN_NUM_CAPTURE_BUFFERS,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 	},
 	{
 		.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
@@ -403,9 +371,7 @@
 		.maximum = MAX_NUM_OUTPUT_BUFFERS,
 		.default_value = MIN_NUM_OUTPUT_BUFFERS,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE,
@@ -415,7 +381,6 @@
 		.maximum = (MAXIMUM_FPS << 16),
 		.default_value = (DEFAULT_FPS << 16),
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -435,7 +400,6 @@
 		.maximum = INT_MAX,
 		.default_value =  (DEFAULT_FPS << 16),
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -774,12 +738,7 @@
 	inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH;
 	inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT;
 	inst->prop.width[OUTPUT_PORT] = DEFAULT_WIDTH;
-	inst->capability.height.min = MIN_SUPPORTED_HEIGHT;
-	inst->capability.height.max = DEFAULT_HEIGHT;
-	inst->capability.width.min = MIN_SUPPORTED_WIDTH;
-	inst->capability.width.max = DEFAULT_WIDTH;
-	inst->capability.secure_output2_threshold.min = 0;
-	inst->capability.secure_output2_threshold.max = 0;
+	inst->prop.extradata_ctrls = EXTRADATA_DEFAULT;
 	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_DYNAMIC;
 	inst->stream_output_mode = HAL_VIDEO_DECODER_PRIMARY;
@@ -854,7 +813,6 @@
 		return -EINVAL;
 	}
 
-	v4l2_ctrl_unlock(ctrl);
 	dprintk(VIDC_DBG,
 		"%s: %x : control name = %s, id = 0x%x value = %d\n",
 		__func__, hash32_ptr(inst->session), ctrl->name,
@@ -905,6 +863,10 @@
 		inst->clk_data.frame_rate = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+		if (ctrl->val == EXTRADATA_NONE)
+			inst->prop.extradata_ctrls = 0;
+		else
+			inst->prop.extradata_ctrls |= ctrl->val;
 		/*
 		 * nothing to do here as inst->bufq[CAPTURE_PORT].num_planes
 		 * and inst->bufq[CAPTURE_PORT].plane_sizes[1] are already
@@ -920,27 +882,7 @@
 			inst->flags |= VIDC_REALTIME;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
-		if (((ctrl->val >> 16) < inst->capability.frame_rate.min ||
-			(ctrl->val >> 16) > inst->capability.frame_rate.max) &&
-			ctrl->val != INT_MAX) {
-			dprintk(VIDC_ERR, "Invalid operating rate %u\n",
-				(ctrl->val >> 16));
-			rc = -ENOTSUPP;
-		} else if (ctrl->val == INT_MAX) {
-			dprintk(VIDC_DBG,
-				"inst(%pK) Request for turbo mode\n", inst);
-			inst->clk_data.turbo_mode = true;
-		} else if (msm_vidc_validate_operating_rate(inst, ctrl->val)) {
-			dprintk(VIDC_ERR, "Failed to set operating rate\n");
-			rc = -ENOTSUPP;
-		} else {
-			dprintk(VIDC_DBG,
-				"inst(%pK) operating rate changed from %d to %d\n",
-				inst, inst->clk_data.operating_rate >> 16,
-					ctrl->val >> 16);
-			inst->clk_data.operating_rate = ctrl->val;
-			inst->clk_data.turbo_mode = false;
-		}
+		inst->clk_data.operating_rate = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE:
 		inst->clk_data.low_latency_mode = !!ctrl->val;
@@ -950,7 +892,6 @@
 			"Unknown control %#x\n", ctrl->id);
 		break;
 	}
-	v4l2_ctrl_lock(ctrl);
 
 	return rc;
 }
@@ -1074,6 +1015,22 @@
 			__func__, buffer_type);
 		return -EINVAL;
 	}
+	if (buffer_type == HAL_BUFFER_OUTPUT2) {
+		/*
+		 * For split mode set DPB count as well
+		 * For DPB actual count is same as min output count
+		 */
+		rc = msm_comm_set_buffer_count(inst,
+			bufreq->buffer_count_min,
+			bufreq->buffer_count_min,
+			HAL_BUFFER_OUTPUT);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: failed to set buffer count(%#x)\n",
+				__func__, buffer_type);
+			return -EINVAL;
+		}
+	}
 	rc = msm_comm_set_buffer_count(inst,
 			bufreq->buffer_count_min,
 			bufreq->buffer_count_actual,
@@ -1207,6 +1164,19 @@
 	hdev = inst->core->device;
 
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_SECURE);
+
+	if (ctrl->val) {
+		if (!(inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
+			inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+			inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9 ||
+			inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_MPEG2)) {
+			dprintk(VIDC_ERR,
+				"%s: Secure allowed for HEVC/H264/VP9/MPEG2\n",
+				__func__);
+			return -EINVAL;
+		}
+	}
+
 	dprintk(VIDC_DBG, "%s: %#x\n", __func__, ctrl->val);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
 		HFI_PROPERTY_PARAM_SECURE_SESSION, &ctrl->val, sizeof(u32));
@@ -1391,10 +1361,8 @@
 int msm_vdec_set_extradata(struct msm_vidc_inst *inst)
 {
 	uint32_t display_info = HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA;
-	struct v4l2_ctrl *ctrl;
 	u32 value = 0x0;
 
-	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
 	switch (inst->fmts[OUTPUT_PORT].fourcc) {
 	case V4L2_PIX_FMT_H264:
 	case V4L2_PIX_FMT_HEVC:
@@ -1416,6 +1384,12 @@
 	msm_comm_set_extradata(inst,
 		HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA, 0x1);
 	msm_comm_set_extradata(inst, display_info, 0x1);
+	if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9 ||
+		inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC) {
+		msm_comm_set_extradata(inst,
+			HFI_PROPERTY_PARAM_HDR10_HIST_EXTRADATA, 0x1);
+	}
+
 	msm_comm_set_extradata(inst,
 		HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB, 0x1);
 	if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC) {
@@ -1427,7 +1401,7 @@
 	}
 
 	/* Enable / Disable Advanced Extradata */
-	if (ctrl->val == EXTRADATA_ADVANCED)
+	if (inst->prop.extradata_ctrls & EXTRADATA_ADVANCED)
 		value = 0x1;
 	msm_comm_set_extradata(inst,
 		HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA, value);
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 713457c..23b422e 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -16,7 +16,11 @@
 #define MIN_BIT_RATE 32000
 #define MAX_BIT_RATE 1200000000
 #define DEFAULT_BIT_RATE 64000
+#define MIN_BIT_RATE_RATIO 0
+#define MAX_BIT_RATE_RATIO 100
+#define MAX_HIER_CODING_LAYER 6
 #define BIT_RATE_STEP 1
+#define MAX_BASE_LAYER_PRIORITY_ID 63
 #define MAX_SLICE_BYTE_SIZE ((MAX_BIT_RATE)>>3)
 #define MIN_SLICE_BYTE_SIZE 512
 #define MAX_SLICE_MB_SIZE (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
@@ -24,14 +28,15 @@
 #define QP_ENABLE_P 0x2
 #define QP_ENABLE_B 0x4
 #define MIN_QP 0
-#define MAX_QP 0x33
-#define MAX_QP_PACKED 0x333333
-#define DEFAULT_MIN_QP 0xA
-#define DEFAULT_MIN_QP_PACKED 0xA0A0A
-#define DEFAULT_MAX_QP_PACKED 0x2C2C2C
+#define MAX_QP 0x7F
+#define MAX_QP_PACKED 0x7F7F7F
+#define DEFAULT_QP 0xA
+#define DEFAULT_QP_PACKED 0xA0A0A
 #define MAX_INTRA_REFRESH_MBS ((7680 * 4320) >> 8)
 #define MAX_LTR_FRAME_COUNT 10
 #define MAX_NUM_B_FRAMES 1
+#define MIN_CBRPLUS_W 1280
+#define MIN_CBRPLUS_H 720
 
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
 #define MIN_NUM_ENC_OUTPUT_BUFFERS 4
@@ -70,9 +75,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_UNKNOWN,
 		.name = "Invalid control",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = INT_MAX,
-		.maximum = INT_MAX,
-		.default_value = INT_MAX,
+		.minimum = 0,
+		.maximum = 0,
+		.default_value = 0,
 		.step = 1,
 		.menu_skip_mask = 0,
 		.qmenu = NULL,
@@ -85,7 +90,6 @@
 		.maximum = INT_MAX,
 		.default_value = 2*DEFAULT_FPS-1,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -94,9 +98,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP,
-		.default_value = DEFAULT_MIN_QP,
+		.default_value = DEFAULT_QP,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -105,9 +108,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP,
-		.default_value = DEFAULT_MIN_QP,
+		.default_value = DEFAULT_QP,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -116,9 +118,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP,
-		.default_value = DEFAULT_MIN_QP,
+		.default_value = DEFAULT_QP,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -127,9 +128,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP_PACKED,
-		.default_value = DEFAULT_MIN_QP_PACKED,
+		.default_value = DEFAULT_QP_PACKED,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -138,9 +138,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP_PACKED,
-		.default_value = DEFAULT_MAX_QP_PACKED,
+		.default_value = DEFAULT_QP_PACKED,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -151,7 +150,6 @@
 		.maximum = MAX_NUM_B_FRAMES,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -162,9 +160,7 @@
 		.maximum = MAX_NUM_CAPTURE_BUFFERS,
 		.default_value = MIN_NUM_CAPTURE_BUFFERS,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 	},
 	{
 		.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
@@ -174,9 +170,7 @@
 		.maximum = MAX_NUM_OUTPUT_BUFFERS,
 		.default_value = MIN_NUM_OUTPUT_BUFFERS,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 	},
 
 	{
@@ -187,7 +181,6 @@
 		.maximum = 0,
 		.default_value = 0,
 		.step = 0,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -197,7 +190,6 @@
 		.minimum = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
 		.maximum = V4L2_MPEG_VIDEO_BITRATE_MODE_CQ,
 		.default_value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
-		.step = 0,
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
 		(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) |
@@ -216,7 +208,6 @@
 		.maximum = MAX_FRAME_QUALITY,
 		.default_value = DEFAULT_FRAME_QUALITY,
 		.step = FRAME_QUALITY_STEP,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -227,7 +218,6 @@
 		.maximum = 512,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -238,7 +228,6 @@
 		.maximum = (MAXIMUM_FPS << 16),
 		.default_value = (DEFAULT_FPS << 16),
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -249,7 +238,6 @@
 		.maximum = MAX_BIT_RATE,
 		.default_value = DEFAULT_BIT_RATE,
 		.step = BIT_RATE_STEP,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -278,17 +266,15 @@
 		(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
 		(1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
 		.name = "H264 Level",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
-		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN,
-		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN,
+		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_6_2,
+		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_6_2,
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
@@ -309,12 +295,9 @@
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_0) |
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
-		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_2) |
-		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN)
+		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_2)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
@@ -324,9 +307,7 @@
 		.maximum = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
 		.default_value = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
 		.menu_skip_mask = ~(1 << V4L2_MPEG_VIDEO_VP8_PROFILE_0),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
@@ -343,8 +324,6 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3)
 		),
 		.qmenu = vp8_profile_level,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
@@ -358,18 +337,16 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
 		.name = "HEVC Level",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
-		.maximum = V4L2_MPEG_VIDEO_HEVC_LEVEL_UNKNOWN,
+		.maximum = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
 		.default_value =
-			V4L2_MPEG_VIDEO_HEVC_LEVEL_UNKNOWN,
+			V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_2) |
@@ -383,12 +360,9 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
-		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2) |
-		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_UNKNOWN)
+		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_TIER,
@@ -401,9 +375,7 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_TIER_HIGH)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_ROTATE,
@@ -436,7 +408,6 @@
 		.maximum = MAX_SLICE_BYTE_SIZE,
 		.default_value = MIN_SLICE_BYTE_SIZE,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -447,7 +418,6 @@
 		.maximum = MAX_SLICE_MB_SIZE,
 		.default_value = 1,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -469,7 +439,6 @@
 		.maximum = MAX_INTRA_REFRESH_MBS,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -480,7 +449,6 @@
 		.maximum = 6,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -491,7 +459,6 @@
 		.maximum = 6,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -590,7 +557,7 @@
 		.name = "Set Hier layers",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
-		.maximum = 6,
+		.maximum = MAX_HIER_CODING_LAYER,
 		.default_value = 0,
 		.step = 1,
 		.qmenu = NULL,
@@ -599,19 +566,12 @@
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER,
 		.name = "Set Hier max layers",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 0,
-		.maximum = 6,
-		.default_value = 0,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_0,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_6,
+		.default_value =
+			V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_0,
 		.step = 1,
-		.menu_skip_mask = ~(
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_0) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_1) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_2) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_3) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_4) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_5) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_6)
-		),
+		.menu_skip_mask = 0,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE,
@@ -620,7 +580,6 @@
 		.minimum = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P,
 		.maximum = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P,
 		.default_value = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P,
-		.step = 1,
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P)
 		),
@@ -690,9 +649,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR,
 		.name = "Set layer0 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -700,9 +659,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR,
 		.name = "Set layer1 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -710,9 +669,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR,
 		.name = "Set layer2 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -720,9 +679,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR,
 		.name = "Set layer3 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -730,9 +689,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR,
 		.name = "Set layer4 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -740,9 +699,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR,
 		.name = "Set layer5 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -758,10 +717,10 @@
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID,
-		.name = "Set Base Layer ID for Hier-P",
+		.name = "Set Base Layer Priority ID for Hier-P",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
-		.maximum = 6,
+		.maximum = MAX_BASE_LAYER_PRIORITY_ID,
 		.default_value = 0,
 		.step = 1,
 		.qmenu = NULL,
@@ -891,6 +850,15 @@
 		.step = 1,
 	},
 	{
+		.id = V4L2_CID_MPEG_VIDC_VENC_RC_TIMESTAMP_DISABLE,
+		.name = "RC Timestamp disable",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.maximum = V4L2_MPEG_MSM_VIDC_ENABLE,
+		.default_value = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.step = 1,
+	},
+	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_VPE_CSC_CUSTOM_MATRIX,
 		.name = "Enable/Disable CSC Custom Matrix",
 		.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -919,10 +887,10 @@
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VENC_HDR_INFO,
-		.name = "Enable/Disable HDR INFO",
-		.type = V4L2_CTRL_TYPE_U32,
-		.minimum = 0,
-		.maximum = UINT_MAX,
+		.name = "HDR PQ information",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = INT_MIN,
+		.maximum = INT_MAX,
 		.default_value = 0,
 		.step = 1,
 	},
@@ -948,6 +916,24 @@
 		.default_value = V4L2_MPEG_MSM_VIDC_DISABLE,
 		.step = 1,
 	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER,
+		.name = "Enable/Disable Native Recorder",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.maximum = V4L2_MPEG_MSM_VIDC_ENABLE,
+		.default_value = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS,
+		.name = "Enable/Disable bitrate savings",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.maximum = V4L2_MPEG_MSM_VIDC_ENABLE,
+		.default_value = V4L2_MPEG_MSM_VIDC_ENABLE,
+		.step = 1,
+	},
 };
 
 #define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -1068,16 +1054,11 @@
 	inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH;
 	inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT;
 	inst->prop.width[OUTPUT_PORT] = DEFAULT_WIDTH;
-	inst->capability.height.min = MIN_SUPPORTED_HEIGHT;
-	inst->capability.height.max = DEFAULT_HEIGHT;
-	inst->capability.width.min = MIN_SUPPORTED_WIDTH;
-	inst->capability.width.max = DEFAULT_WIDTH;
-	inst->capability.secure_output2_threshold.min = 0;
-	inst->capability.secure_output2_threshold.max = 0;
+	inst->prop.bframe_changed = false;
+	inst->prop.extradata_ctrls = EXTRADATA_DEFAULT;
 	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_DYNAMIC;
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->clk_data.frame_rate = (DEFAULT_FPS << 16);
-	inst->capability.pixelprocess_capabilities = 0;
 
 	inst->bufq[OUTPUT_PORT].num_planes = 2;
 	inst->bufq[CAPTURE_PORT].num_planes = 1;
@@ -1116,7 +1097,7 @@
 	}
 
 	buff_req_buffer->buffer_size =
-		msm_vidc_calculate_enc_input_extra_size(inst, 0);
+		msm_vidc_calculate_enc_input_extra_size(inst);
 	inst->bufq[OUTPUT_PORT].plane_sizes[1] =
 		buff_req_buffer->buffer_size;
 
@@ -1232,7 +1213,6 @@
 	int rc = 0;
 	int i = 0;
 	struct msm_vidc_format *fmt = NULL;
-	struct v4l2_ctrl *extradata_ctrl;
 
 	if (!inst || !f) {
 		dprintk(VIDC_ERR,
@@ -1315,11 +1295,8 @@
 		 */
 		inst->bufq[fmt->type].plane_sizes[0] =
 			msm_vidc_calculate_enc_input_frame_size(inst);
-		extradata_ctrl = get_ctrl(inst,
-			V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
 		inst->bufq[fmt->type].plane_sizes[1] =
-			msm_vidc_calculate_enc_input_extra_size(inst,
-				extradata_ctrl->val);
+			msm_vidc_calculate_enc_input_extra_size(inst);
 		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
 		for (i = 0; i < inst->bufq[fmt->type].num_planes; i++) {
 			f->fmt.pix_mp.plane_fmt[i].sizeimage =
@@ -1381,40 +1358,13 @@
 	return 0;
 }
 
-void msm_venc_adjust_gop_size(struct msm_vidc_inst *inst)
-{
-	struct v4l2_ctrl *hier_ctrl;
-
-	/*
-	 * Layer encoding needs GOP size to be multiple of subgop size
-	 * And subgop size is 2 ^ number of enhancement layers
-	 */
-	hier_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
-	if (hier_ctrl->val > 1) {
-		struct v4l2_ctrl *gop_size_ctrl;
-		u32 min_gop_size;
-		u32 num_subgops;
-
-		gop_size_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE);
-		min_gop_size = (1 << (hier_ctrl->val - 1));
-		num_subgops = (gop_size_ctrl->val + (min_gop_size >> 1)) /
-				min_gop_size;
-		if (num_subgops)
-			gop_size_ctrl->val = num_subgops * min_gop_size;
-		else
-			gop_size_ctrl->val = min_gop_size;
-	}
-}
-
 int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 {
 	int rc = 0;
 	struct msm_vidc_mastering_display_colour_sei_payload *mdisp_sei = NULL;
 	struct msm_vidc_content_light_level_sei_payload *cll_sei = NULL;
 	struct hal_buffer_requirements *buff_req_buffer = NULL;
-	struct v4l2_ctrl *i_qp = NULL;
-	struct v4l2_ctrl *p_qp = NULL;
-	struct v4l2_ctrl *b_qp = NULL;
+	u32 i_qp_min, i_qp_max, p_qp_min, p_qp_max, b_qp_min, b_qp_max;
 
 	if (!inst || !inst->core || !inst->core->device || !ctrl) {
 		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -1424,13 +1374,6 @@
 	mdisp_sei = &(inst->hdr10_sei_params.disp_color_sei);
 	cll_sei = &(inst->hdr10_sei_params.cll_sei);
 
-	/*
-	 * Unlock the control prior to setting to the hardware. Otherwise
-	 * lower level code that attempts to do a get_ctrl() will end up
-	 * deadlocking.
-	 */
-	v4l2_ctrl_unlock(ctrl);
-
 	dprintk(VIDC_DBG,
 		"%s: %x : name %s, id 0x%x value %d\n",
 		__func__, hash32_ptr(inst->session), ctrl->name,
@@ -1522,27 +1465,7 @@
 		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
-		if (((ctrl->val >> 16) < inst->capability.frame_rate.min ||
-			(ctrl->val >> 16) > inst->capability.frame_rate.max) &&
-			ctrl->val != INT_MAX) {
-			dprintk(VIDC_ERR, "Invalid operating rate %u\n",
-				(ctrl->val >> 16));
-			rc = -ENOTSUPP;
-		} else if (ctrl->val == INT_MAX) {
-			dprintk(VIDC_DBG, "inst(%pK) Request for turbo mode\n",
-				inst);
-			inst->clk_data.turbo_mode = true;
-		} else if (msm_vidc_validate_operating_rate(inst, ctrl->val)) {
-			dprintk(VIDC_ERR, "Failed to set operating rate\n");
-			rc = -ENOTSUPP;
-		} else {
-			dprintk(VIDC_DBG,
-				"inst(%pK) operating rate changed from %d to %d\n",
-				inst, inst->clk_data.operating_rate >> 16,
-				ctrl->val >> 16);
-			inst->clk_data.operating_rate = ctrl->val;
-			inst->clk_data.turbo_mode = false;
-		}
+		inst->clk_data.operating_rate = ctrl->val;
 		if (inst->state == MSM_VIDC_START_DONE) {
 			rc = msm_venc_set_operating_rate(inst);
 			if (rc)
@@ -1603,8 +1526,13 @@
 		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
-		if ((ctrl->val & EXTRADATA_ENC_INPUT_ROI) ||
-			(ctrl->val & EXTRADATA_ENC_INPUT_HDR10PLUS)) {
+		if (ctrl->val == EXTRADATA_NONE)
+			inst->prop.extradata_ctrls = 0;
+		else
+			inst->prop.extradata_ctrls |= ctrl->val;
+
+		if ((inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_ROI) ||
+		(inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_HDR10PLUS)) {
 			buff_req_buffer = get_buff_req_buffer(inst,
 						HAL_BUFFER_EXTRADATA_INPUT);
 			if (!buff_req_buffer) {
@@ -1615,13 +1543,12 @@
 			}
 
 			buff_req_buffer->buffer_size =
-				msm_vidc_calculate_enc_input_extra_size(inst,
-					ctrl->val);
+				msm_vidc_calculate_enc_input_extra_size(inst);
 			inst->bufq[OUTPUT_PORT].plane_sizes[1] =
 					buff_req_buffer->buffer_size;
 		}
 
-		if (ctrl->val & EXTRADATA_ADVANCED) {
+		if (inst->prop.extradata_ctrls & EXTRADATA_ADVANCED) {
 			inst->bufq[CAPTURE_PORT].num_planes = 2;
 
 			buff_req_buffer = get_buff_req_buffer(inst,
@@ -1662,15 +1589,18 @@
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
-		i_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP);
-		p_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP);
-		b_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP);
-		if ((ctrl->val & 0xff) < i_qp->minimum ||
-			((ctrl->val >> 8) & 0xff) < p_qp->minimum ||
-			((ctrl->val >> 16) & 0xff) < b_qp->minimum ||
-			(ctrl->val & 0xff) > i_qp->maximum ||
-			((ctrl->val >> 8) & 0xff) > p_qp->maximum ||
-			((ctrl->val >> 16) & 0xff) > b_qp->maximum) {
+		i_qp_min = inst->capability.cap[CAP_I_FRAME_QP].min;
+		i_qp_max = inst->capability.cap[CAP_I_FRAME_QP].max;
+		p_qp_min = inst->capability.cap[CAP_P_FRAME_QP].min;
+		p_qp_max = inst->capability.cap[CAP_P_FRAME_QP].max;
+		b_qp_min = inst->capability.cap[CAP_B_FRAME_QP].min;
+		b_qp_max = inst->capability.cap[CAP_B_FRAME_QP].max;
+		if ((ctrl->val & 0xff) < i_qp_min ||
+			((ctrl->val >> 8) & 0xff) < p_qp_min ||
+			((ctrl->val >> 16) & 0xff) < b_qp_min ||
+			(ctrl->val & 0xff) > i_qp_max ||
+			((ctrl->val >> 8) & 0xff) > p_qp_max ||
+			((ctrl->val >> 16) & 0xff) > b_qp_max) {
 			dprintk(VIDC_ERR, "Invalid QP %#x\n", ctrl->val);
 			return -EINVAL;
 		}
@@ -1680,6 +1610,12 @@
 			inst->client_set_ctrls |= CLIENT_SET_MAX_QP;
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+		i_qp_min = inst->capability.cap[CAP_I_FRAME_QP].min;
+		i_qp_max = inst->capability.cap[CAP_I_FRAME_QP].max;
+		if (ctrl->val < i_qp_min || ctrl->val > i_qp_max) {
+			dprintk(VIDC_ERR, "Invalid I QP %#x\n", ctrl->val);
+			return -EINVAL;
+		}
 		inst->client_set_ctrls |= CLIENT_SET_I_QP;
 		if (inst->state == MSM_VIDC_START_DONE) {
 			rc = msm_venc_set_dyn_qp(inst, ctrl);
@@ -1690,9 +1626,21 @@
 		}
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
+		p_qp_min = inst->capability.cap[CAP_P_FRAME_QP].min;
+		p_qp_max = inst->capability.cap[CAP_P_FRAME_QP].max;
+		if (ctrl->val < p_qp_min || ctrl->val > p_qp_max) {
+			dprintk(VIDC_ERR, "Invalid P QP %#x\n", ctrl->val);
+			return -EINVAL;
+		}
 		inst->client_set_ctrls |= CLIENT_SET_P_QP;
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
+		b_qp_min = inst->capability.cap[CAP_B_FRAME_QP].min;
+		b_qp_max = inst->capability.cap[CAP_B_FRAME_QP].max;
+		if (ctrl->val < b_qp_min || ctrl->val > b_qp_max) {
+			dprintk(VIDC_ERR, "Invalid B QP %#x\n", ctrl->val);
+			return -EINVAL;
+		}
 		inst->client_set_ctrls |= CLIENT_SET_B_QP;
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
@@ -1704,9 +1652,39 @@
 					__func__);
 		}
 		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID:
+		if (inst->state == MSM_VIDC_START_DONE) {
+			rc = msm_venc_set_base_layer_priority_id(inst);
+			if (rc)
+				dprintk(VIDC_ERR,
+					"%s: set baselayer id failed.\n",
+					__func__);
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
+		if (inst->state == MSM_VIDC_START_DONE) {
+			rc = msm_venc_set_layer_bitrate(inst);
+			if (rc)
+				dprintk(VIDC_ERR,
+				"%s: set layer bitrate failed\n",
+				__func__);
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+		if (inst->state == MSM_VIDC_START_DONE) {
+			dprintk(VIDC_ERR,
+			"%s: Dynamic setting of Bframe is not supported\n",
+			__func__);
+			return -EINVAL;
+		}
+		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
-	case V4L2_CID_MPEG_VIDEO_B_FRAMES:
 	case V4L2_CID_ROTATE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT:
 	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
@@ -1718,19 +1696,12 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER:
 	case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR:
 	case V4L2_CID_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE:
-	case V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
 	case V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS:
@@ -1747,6 +1718,8 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM:
 	case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
 	case V4L2_CID_MPEG_VIDC_VENC_CVP_DISABLE:
+	case V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER:
+	case V4L2_CID_MPEG_VIDC_VENC_RC_TIMESTAMP_DISABLE:
 		dprintk(VIDC_DBG, "Control set: ID : %x Val : %d\n",
 			ctrl->id, ctrl->val);
 		break;
@@ -1756,7 +1729,6 @@
 		break;
 	}
 
-	v4l2_ctrl_lock(ctrl);
 	return rc;
 }
 
@@ -1921,6 +1893,16 @@
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_SECURE);
 	enable.enable = !!ctrl->val;
 
+	if (enable.enable) {
+		if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+			inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC)) {
+			dprintk(VIDC_ERR,
+				"%s: Secure mode only allowed for HEVC/H264\n",
+				__func__);
+			return -EINVAL;
+		}
+	}
+
 	dprintk(VIDC_DBG, "%s: %d\n", __func__, enable.enable);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
 		HFI_PROPERTY_PARAM_SECURE_SESSION, &enable, sizeof(enable));
@@ -1994,6 +1976,12 @@
 	}
 	hdev = inst->core->device;
 
+	if (!inst->profile) {
+		dprintk(VIDC_ERR,
+			"%s: skip as client did not set profile\n",
+			__func__);
+		return -EINVAL;
+	}
 	profile_level.profile = inst->profile;
 	profile_level.level = inst->level;
 
@@ -2038,59 +2026,83 @@
 	return rc;
 }
 
-int msm_venc_set_intra_period(struct msm_vidc_inst *inst)
+void msm_venc_decide_bframe(struct msm_vidc_inst *inst)
 {
-	int rc = 0;
-	struct hfi_device *hdev;
+	u32 width = inst->prop.width[OUTPUT_PORT];
+	u32 height = inst->prop.height[OUTPUT_PORT];
+	u32 num_mbs_per_frame, num_mbs_per_sec;
 	struct v4l2_ctrl *ctrl;
-	struct hfi_intra_period intra_period;
+	struct v4l2_ctrl *bframe_ctrl;
+	struct msm_vidc_platform_resources *res;
 
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
+	res = &inst->core->resources;
+	bframe_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
+	num_mbs_per_frame = NUM_MBS_PER_FRAME(width, height);
+	if (num_mbs_per_frame > res->max_bframe_mbs_per_frame)
+		goto disable_bframe;
+
+	num_mbs_per_sec = num_mbs_per_frame *
+		(inst->clk_data.frame_rate >> 16);
+	if (num_mbs_per_sec > res->max_bframe_mbs_per_sec)
+		goto disable_bframe;
+
+	ctrl = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER);
+	if (ctrl->val > 1)
+		goto disable_bframe;
+
+	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT);
+	if (ctrl->val)
+		goto disable_bframe;
+
+	if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+		goto disable_bframe;
+
+	if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) {
+		ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_PROFILE);
+		if ((ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) &&
+			(ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_HIGH))
+			goto disable_bframe;
+	} else if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+		goto disable_bframe;
+
+	if (inst->clk_data.low_latency_mode)
+		goto disable_bframe;
+
+	if (!bframe_ctrl->val) {
+		ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER);
+		if (ctrl->val) {
+			/*
+			 * Native recorder is enabled and bframe is not enabled
+			 * Hence, forcefully enable bframe
+			 */
+			inst->prop.bframe_changed = true;
+			bframe_ctrl->val = MAX_NUM_B_FRAMES;
+			dprintk(VIDC_DBG, "Bframe is forcefully enabled\n");
+		} else {
+			/*
+			 * Native recorder is not enabled
+			 * B-Frame is not enabled by client
+			 */
+			goto disable_bframe;
+		}
 	}
-	hdev = inst->core->device;
+	dprintk(VIDC_DBG, "Bframe can be enabled!\n");
 
-	msm_venc_adjust_gop_size(inst);
-	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE);
-	intra_period.pframes = ctrl->val;
-
-	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
-	intra_period.bframes = ctrl->val;
-
-	dprintk(VIDC_DBG, "%s: %d %d\n", __func__, intra_period.pframes,
-		intra_period.bframes);
-	rc = call_hfi_op(hdev, session_set_property, inst->session,
-		HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD, &intra_period,
-		sizeof(intra_period));
-	if (rc) {
-		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
-		return rc;
+	return;
+disable_bframe:
+	if (bframe_ctrl->val) {
+		/*
+		 * Client wanted to enable bframe but,
+		 * conditions to enable are not met
+		 * Hence, forcefully disable bframe
+		 */
+		inst->prop.bframe_changed = true;
+		bframe_ctrl->val = 0;
+		dprintk(VIDC_DBG, "Bframe is forcefully disabled!\n");
+	} else {
+		dprintk(VIDC_DBG, "Bframe is disabled\n");
 	}
-
-	return rc;
-}
-
-int msm_venc_set_request_keyframe(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	dprintk(VIDC_DBG, "%s\n", __func__);
-	rc = call_hfi_op(hdev, session_set_property, inst->session,
-		HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME, NULL, 0);
-	if (rc) {
-		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
-		return rc;
-	}
-
-	return rc;
 }
 
 int msm_venc_set_adaptive_bframes(struct msm_vidc_inst *inst)
@@ -2116,11 +2128,54 @@
 	return rc;
 }
 
-int msm_venc_set_rate_control(struct msm_vidc_inst *inst)
+void msm_venc_adjust_gop_size(struct msm_vidc_inst *inst)
+{
+	struct v4l2_ctrl *hier_ctrl;
+	struct v4l2_ctrl *bframe_ctrl;
+	struct v4l2_ctrl *gop_size_ctrl;
+
+	gop_size_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE);
+	if (inst->prop.bframe_changed) {
+		/*
+		 * BFrame size was explicitly change
+		 * Hence, adjust GOP size accordingly
+		 */
+		bframe_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
+		if (!bframe_ctrl->val)
+			/* Forcefully disabled */
+			gop_size_ctrl->val = gop_size_ctrl->val *
+					(1 + MAX_NUM_B_FRAMES);
+		else
+			/* Forcefully enabled */
+			gop_size_ctrl->val = gop_size_ctrl->val /
+					(1 + MAX_NUM_B_FRAMES);
+	}
+
+	/*
+	 * Layer encoding needs GOP size to be multiple of subgop size
+	 * And subgop size is 2 ^ number of enhancement layers
+	 */
+	hier_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
+	if (hier_ctrl->val > 1) {
+		u32 min_gop_size;
+		u32 num_subgops;
+
+		min_gop_size = (1 << (hier_ctrl->val - 1));
+		num_subgops = (gop_size_ctrl->val + (min_gop_size >> 1)) /
+				min_gop_size;
+		if (num_subgops)
+			gop_size_ctrl->val = num_subgops * min_gop_size;
+		else
+			gop_size_ctrl->val = min_gop_size;
+	}
+}
+
+int msm_venc_set_intra_period(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	u32 hfi_rc;
+	struct v4l2_ctrl *ctrl;
+	struct hfi_intra_period intra_period;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
@@ -2128,6 +2183,116 @@
 	}
 	hdev = inst->core->device;
 
+	msm_venc_adjust_gop_size(inst);
+
+	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE);
+	intra_period.pframes = ctrl->val;
+
+	/*
+	 * At this point we have already made decision on bframe
+	 * Control value gives updated bframe value.
+	 */
+	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
+	intra_period.bframes = ctrl->val;
+
+	dprintk(VIDC_DBG, "%s: %d %d\n", __func__, intra_period.pframes,
+		intra_period.bframes);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD, &intra_period,
+		sizeof(intra_period));
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+		return rc;
+	}
+
+	if (intra_period.bframes) {
+		/* Enable adaptive bframes as nbframes!= 0 */
+		rc = msm_venc_set_adaptive_bframes(inst);
+		if (rc) {
+			dprintk(VIDC_ERR, "%s: set property failed\n",
+				__func__);
+			return rc;
+		}
+	}
+	return rc;
+}
+
+int msm_venc_set_request_keyframe(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	dprintk(VIDC_DBG, "%s\n", __func__);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME, NULL, 0);
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+		return rc;
+	}
+
+	return rc;
+}
+
+int msm_venc_set_rate_control(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	u32 hfi_rc, codec;
+	u32 height, width, mbpf;
+	struct hfi_vbv_hrd_buf_size hrd_buf_size;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+	inst->clk_data.is_cbr_plus = false;
+	codec = inst->fmts[CAPTURE_PORT].fourcc;
+	height = inst->prop.height[OUTPUT_PORT];
+	width = inst->prop.width[OUTPUT_PORT];
+	mbpf = NUM_MBS_PER_FRAME(height, width);
+
+	if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR)
+		inst->rc_type = V4L2_MPEG_VIDEO_BITRATE_MODE_MBR;
+	else if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR &&
+			   inst->clk_data.low_latency_mode)
+		inst->rc_type = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
+
+	if ((inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR ||
+		inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) &&
+		(codec != V4L2_PIX_FMT_VP8)) {
+		hrd_buf_size.vbv_hrd_buf_size = 500;
+		inst->clk_data.low_latency_mode = true;
+
+		if ((width > MIN_CBRPLUS_W && height > MIN_CBRPLUS_H) ||
+			(width > MIN_CBRPLUS_H && height > MIN_CBRPLUS_W) ||
+			mbpf > NUM_MBS_PER_FRAME(720, 1280)) {
+			hrd_buf_size.vbv_hrd_buf_size = 1000;
+			inst->clk_data.is_cbr_plus = true;
+		}
+
+		dprintk(VIDC_DBG, "Set hrd_buf_size %d",
+				hrd_buf_size.vbv_hrd_buf_size);
+
+		rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session,
+			HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE,
+			(void *)&hrd_buf_size, sizeof(hrd_buf_size));
+		if (rc) {
+			dprintk(VIDC_ERR, "%s: set HRD_BUF_SIZE %u failed\n",
+					__func__,
+					hrd_buf_size.vbv_hrd_buf_size);
+			inst->clk_data.is_cbr_plus = false;
+		}
+	}
+
 	switch (inst->rc_type) {
 	case RATE_CONTROL_OFF:
 		hfi_rc = HFI_RATE_CONTROL_OFF;
@@ -2144,9 +2309,6 @@
 	case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR:
 		hfi_rc = HFI_RATE_CONTROL_CBR_VFR;
 		break;
-	case V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR:
-		hfi_rc = HFI_RATE_CONTROL_MBR_VFR;
-		break;
 	case V4L2_MPEG_VIDEO_BITRATE_MODE_CQ:
 		hfi_rc = HFI_RATE_CONTROL_CQ;
 		break;
@@ -2180,11 +2342,12 @@
 	}
 	hdev = inst->core->device;
 
-	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE);
+	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_RC_TIMESTAMP_DISABLE);
 	/*
-	 * 0 - rate control considers buffer timestamps
-	 * 1 - rate control igonres buffer timestamp and
-	 *     calculates timedelta based on frame rate
+	 * HFI values:
+	 * 0 - time delta is calculated based on buffer timestamp
+	 * 1 - ignores buffer timestamp and fw derives time delta based
+	 *     on input frame rate.
 	 */
 	enable.enable = !!ctrl->val;
 
@@ -2204,6 +2367,7 @@
 	struct hfi_device *hdev;
 	struct v4l2_ctrl *ctrl;
 	struct hfi_bitrate bitrate;
+	struct hfi_enable enable;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
@@ -2211,6 +2375,22 @@
 	}
 	hdev = inst->core->device;
 
+	if (inst->layer_bitrate) {
+		dprintk(VIDC_DBG, "%s: Layer bitrate is enabled\n", __func__);
+		return 0;
+	}
+
+	enable.enable = 0;
+	dprintk(VIDC_DBG, "%s: bitrate type: %d\n",
+		__func__, enable.enable);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE, &enable,
+		sizeof(enable));
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+		return rc;
+	}
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE);
 	bitrate.bit_rate = ctrl->val;
 	bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
@@ -2224,6 +2404,112 @@
 	return rc;
 }
 
+int msm_venc_set_layer_bitrate(struct msm_vidc_inst *inst)
+{
+	int rc = 0, i = 0;
+	struct hfi_device *hdev;
+	struct v4l2_ctrl *bitrate = NULL;
+	struct v4l2_ctrl *layer = NULL;
+	struct v4l2_ctrl *max_layer = NULL;
+	struct v4l2_ctrl *layer_br_ratios[MAX_HIER_CODING_LAYER] = {NULL};
+	struct hfi_bitrate layer_br;
+	struct hfi_enable enable;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	max_layer = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER);
+	layer = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
+
+	if (!max_layer->val || !layer->val) {
+		dprintk(VIDC_DBG,
+			"%s: Hierp layer not set. Ignore layer bitrate\n",
+			__func__);
+		goto error;
+	}
+
+	if (max_layer->val < layer->val) {
+		dprintk(VIDC_DBG,
+			"%s: Hierp layer greater than max isn't allowed\n",
+			__func__);
+		goto error;
+	}
+
+	layer_br_ratios[0] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR);
+	layer_br_ratios[1] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR);
+	layer_br_ratios[2] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR);
+	layer_br_ratios[3] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR);
+	layer_br_ratios[4] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR);
+	layer_br_ratios[5] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR);
+
+	/* Set layer bitrates only when highest layer br ratio is 100. */
+	if (layer_br_ratios[layer->val-1]->val != MAX_BIT_RATE_RATIO ||
+		layer_br_ratios[0]->val == 0) {
+		dprintk(VIDC_DBG,
+			"%s: Improper layer bitrate ratio\n",
+			__func__);
+		goto error;
+	}
+
+	for (i = layer->val - 1; i > 0; --i) {
+		if (layer_br_ratios[i]->val == 0) {
+			dprintk(VIDC_DBG,
+				"%s: Layer ratio must be non-zero\n",
+				__func__);
+			goto error;
+		}
+		layer_br_ratios[i]->val -= layer_br_ratios[i-1]->val;
+	}
+
+	enable.enable = 1;
+	dprintk(VIDC_DBG, "%s: %d\n", __func__, enable.enable);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE, &enable,
+		sizeof(enable));
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+		goto error;
+	}
+
+	bitrate = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE);
+	for (i = 0; i < layer->val; ++i) {
+		layer_br.bit_rate =
+			bitrate->val * layer_br_ratios[i]->val / 100;
+		layer_br.layer_id = i;
+		dprintk(VIDC_DBG,
+			"%s: Bitrate for Layer[%u]: [%u]\n",
+			__func__, layer_br.layer_id, layer_br.bit_rate);
+
+		rc = call_hfi_op(hdev, session_set_property, inst->session,
+			HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE, &layer_br,
+			sizeof(layer_br));
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: set property failed for layer: %u\n",
+				__func__, layer_br.layer_id);
+			goto error;
+		}
+	}
+
+	inst->layer_bitrate = true;
+	return rc;
+
+error:
+	inst->layer_bitrate = false;
+	return rc;
+}
+
 int msm_venc_set_frame_qp(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -2439,35 +2725,92 @@
 	struct v4l2_ctrl *ctrl_t;
 	struct hfi_multi_slice_control multi_slice_control;
 	int temp = 0;
+	u32 mb_per_frame, fps, mbps, bitrate, max_slices;
+	u32 slice_val, slice_mode, max_avg_slicesize;
+	u32 rc_mode, output_width, output_height;
+	struct v4l2_ctrl *rc_enable;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
-	hdev = inst->core->device;
 
 	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC &&
 		inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264)
 		return 0;
 
+	slice_mode = HFI_MULTI_SLICE_OFF;
+	slice_val = 0;
+
+	bitrate = inst->clk_data.bitrate;
+	fps = inst->clk_data.frame_rate;
+	rc_mode = inst->rc_type;
+	rc_enable = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE);
+	if (fps > 60 ||
+		(rc_enable->val &&
+		 rc_mode != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR &&
+		 rc_mode != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)) {
+		goto set_and_exit;
+	}
+
+	output_width = inst->prop.width[OUTPUT_PORT];
+	output_height = inst->prop.height[OUTPUT_PORT];
+
+	if (output_height < 128 ||
+		(inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC &&
+		 output_width < 384) ||
+		(inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
+		 output_width < 192)) {
+		goto set_and_exit;
+	}
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
-	multi_slice_control.multi_slice = HFI_MULTI_SLICE_OFF;
-	temp = 0;
 	if (ctrl->val == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
 		temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB;
-		multi_slice_control.multi_slice = HFI_MULTI_SLICE_BY_MB_COUNT;
+		slice_mode = HFI_MULTI_SLICE_BY_MB_COUNT;
 	} else if (ctrl->val == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
 		temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES;
-		multi_slice_control.multi_slice =
-			HFI_MULTI_SLICE_BY_BYTE_COUNT;
+		slice_mode = HFI_MULTI_SLICE_BY_BYTE_COUNT;
+	} else {
+		goto set_and_exit;
 	}
 
-	multi_slice_control.slice_size = 0;
-	if (temp) {
-		ctrl_t = get_ctrl(inst, temp);
-		multi_slice_control.slice_size = ctrl_t->val;
+	ctrl_t = get_ctrl(inst, temp);
+	slice_val = ctrl_t->val;
+
+	/* Update Slice Config */
+	mb_per_frame = NUM_MBS_PER_FRAME(output_height, output_width);
+	mbps = NUM_MBS_PER_SEC(output_height, output_width, fps);
+
+	if (slice_mode == HFI_MULTI_SLICE_BY_MB_COUNT) {
+		if (output_width <= 4096 || output_height <= 4096 ||
+			mb_per_frame <= NUM_MBS_PER_FRAME(4096, 2160) ||
+			mbps <= NUM_MBS_PER_SEC(4096, 2160, 60)) {
+			max_slices = inst->capability.cap[CAP_SLICE_MB].max ?
+				inst->capability.cap[CAP_SLICE_MB].max : 1;
+			slice_val = max(slice_val, mb_per_frame / max_slices);
+		}
+	} else {
+		if (output_width <= 1920 || output_height <= 1920 ||
+			mb_per_frame <= NUM_MBS_PER_FRAME(1088, 1920) ||
+			mbps <= NUM_MBS_PER_SEC(1088, 1920, 60)) {
+			max_slices = inst->capability.cap[CAP_SLICE_BYTE].max ?
+				inst->capability.cap[CAP_SLICE_BYTE].max : 1;
+			max_avg_slicesize = ((bitrate / fps) / 8) / max_slices;
+			slice_val = max(slice_val, max_avg_slicesize);
+		}
 	}
 
+	if (slice_mode == HFI_MULTI_SLICE_OFF) {
+		ctrl->val = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
+		ctrl_t->val = 0;
+	}
+
+set_and_exit:
+	multi_slice_control.multi_slice = slice_mode;
+	multi_slice_control.slice_size = slice_val;
+
+	hdev = inst->core->device;
 	dprintk(VIDC_DBG, "%s: %d %d\n", __func__,
 			multi_slice_control.multi_slice,
 			multi_slice_control.slice_size);
@@ -2484,7 +2827,8 @@
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct v4l2_ctrl *ctrl;
+	struct v4l2_ctrl *ctrl = NULL;
+	struct v4l2_ctrl *rc_mode = NULL;
 	struct hfi_intra_refresh intra_refresh;
 
 	if (!inst || !inst->core) {
@@ -2493,16 +2837,29 @@
 	}
 	hdev = inst->core->device;
 
+	rc_mode = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE_MODE);
+	if (!(rc_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR ||
+		rc_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR))
+		return 0;
+
+	/* Firmware supports only random mode */
+	intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM);
 	intra_refresh.mbs = 0;
 	if (ctrl->val) {
-		/* ignore cyclic mode if random mode is set */
-		intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM;
-		intra_refresh.mbs = ctrl->val;
+		u32 num_mbs_per_frame = 0;
+		u32 width = inst->prop.width[CAPTURE_PORT];
+		u32 height = inst->prop.height[CAPTURE_PORT];
+
+		num_mbs_per_frame = NUM_MBS_PER_FRAME(height, width);
+		intra_refresh.mbs = num_mbs_per_frame / ctrl->val;
+		if (num_mbs_per_frame % ctrl->val) {
+			intra_refresh.mbs++;
+		}
 	} else {
 		ctrl = get_ctrl(inst,
 			V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB);
-		intra_refresh.mode = HFI_INTRA_REFRESH_CYCLIC;
 		intra_refresh.mbs = ctrl->val;
 	}
 	if (!intra_refresh.mbs) {
@@ -2521,6 +2878,37 @@
 	return rc;
 }
 
+int msm_venc_set_bitrate_savings_mode(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct v4l2_ctrl *ctrl = NULL;
+	struct hfi_enable enable;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS);
+	enable.enable = !!ctrl->val;
+	if (!ctrl->val && inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) {
+		dprintk(VIDC_DBG,
+			"Can't disable bitrate savings for non-VBR_CFR\n");
+		enable.enable = 1;
+	}
+
+	dprintk(VIDC_DBG, "%s: %d\n", __func__, enable.enable);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_PARAM_VENC_BITRATE_SAVINGS, &enable,
+		sizeof(enable));
+	if (rc)
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+
+	return rc;
+}
+
 int msm_venc_set_loop_filter_mode(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -2536,6 +2924,9 @@
 	}
 	hdev = inst->core->device;
 
+	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264)
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE);
 	ctrl_a = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA);
 	ctrl_b = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA);
@@ -2570,6 +2961,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR);
 	if (ctrl->val)
 		enable.enable = true;
@@ -2599,6 +2994,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER);
 	enable.enable = !!ctrl->val;
 
@@ -2628,10 +3027,6 @@
 	if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
 		return 0;
 
-	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
-	if (ctrl->val)
-		return 0;
-
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT);
 	if (ctrl->val)
 		return 0;
@@ -2645,6 +3040,8 @@
 	/*
 	 * Hybrid HP is enabled only for H264 when
 	 * LTR and B-frame are both disabled,
+	 * Layer encoding has higher priority over B-frame
+	 * Hence, no need to check for B-frame
 	 * Rate control type is VBR and
 	 * Max layer equals layer count.
 	 */
@@ -2654,11 +3051,12 @@
 	return 0;
 }
 
-int msm_venc_set_base_layer_id(struct msm_vidc_inst *inst)
+int msm_venc_set_base_layer_priority_id(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct v4l2_ctrl *ctrl;
+	struct v4l2_ctrl *ctrl = NULL;
+	struct v4l2_ctrl *max_layer = NULL;
 	u32 baselayerid;
 
 	if (!inst || !inst->core) {
@@ -2667,9 +3065,13 @@
 	}
 	hdev = inst->core->device;
 
-	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
-		inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+	max_layer = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER);
+	if (max_layer->val <= 0) {
+		dprintk(VIDC_DBG, "%s: Layer id can only be set with Hierp\n",
+			__func__);
 		return 0;
+	}
 
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID);
 	baselayerid = ctrl->val;
@@ -2719,10 +3121,14 @@
 		hp_layer = ctrl->val - 1;
 
 	if (inst->hybrid_hp) {
+		dprintk(VIDC_DBG, "%s: Hybrid hierp layer: %d\n",
+			__func__, hp_layer);
 		rc = call_hfi_op(hdev, session_set_property, inst->session,
 			HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE,
 			&hp_layer, sizeof(hp_layer));
 	} else {
+		dprintk(VIDC_DBG, "%s: Hierp max layer: %d\n",
+			__func__, hp_layer);
 		rc = call_hfi_op(hdev, session_set_property, inst->session,
 			HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER,
 			&hp_layer, sizeof(hp_layer));
@@ -2738,6 +3144,7 @@
 	int rc = 0;
 	struct hfi_device *hdev;
 	struct v4l2_ctrl *ctrl = NULL;
+	struct v4l2_ctrl *max_layer = NULL;
 	u32 hp_layer = 0;
 
 	if (!inst || !inst->core) {
@@ -2757,8 +3164,18 @@
 		return 0;
 	}
 
+	max_layer = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER);
 	ctrl = get_ctrl(inst,
 		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
+
+	if (max_layer->val < ctrl->val) {
+		dprintk(VIDC_WARN,
+			"%s: HP layer count greater than max isn't allowed\n",
+			__func__);
+		return 0;
+	}
+
 	/*
 	 * We send enhancement layer count to FW,
 	 * hence, input 0/1 indicates absence of layer encoding.
@@ -2766,7 +3183,8 @@
 	if (ctrl->val)
 		hp_layer = ctrl->val - 1;
 
-	dprintk(VIDC_DBG, "%s: HP layer: %d\n", __func__, hp_layer);
+	dprintk(VIDC_DBG, "%s: Hierp enhancement layer: %d\n",
+		__func__, hp_layer);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
 		HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER,
 		&hp_layer, sizeof(hp_layer));
@@ -2822,7 +3240,8 @@
 	}
 	hdev = inst->core->device;
 
-	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264)
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC))
 		return 0;
 
 	ctrl_cs = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE);
@@ -2853,6 +3272,71 @@
 	return rc;
 }
 
+int msm_venc_set_rotation(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct v4l2_ctrl *rotation = NULL;
+	struct v4l2_ctrl *hflip = NULL;
+	struct v4l2_ctrl *vflip = NULL;
+	struct hfi_device *hdev;
+	struct hfi_vpe_rotation_type vpe_rotation;
+	struct hfi_frame_size frame_sz;
+
+	hdev = inst->core->device;
+
+	rotation = get_ctrl(inst, V4L2_CID_ROTATE);
+
+	vpe_rotation.rotation = HFI_ROTATE_NONE;
+	if (rotation->val == 90)
+		vpe_rotation.rotation = HFI_ROTATE_90;
+	else if (rotation->val == 180)
+		vpe_rotation.rotation = HFI_ROTATE_180;
+	else if (rotation->val ==  270)
+		vpe_rotation.rotation = HFI_ROTATE_270;
+
+	hflip = get_ctrl(inst, V4L2_CID_HFLIP);
+	vflip = get_ctrl(inst, V4L2_CID_VFLIP);
+
+	vpe_rotation.flip = HFI_FLIP_NONE;
+	if ((hflip->val == V4L2_MPEG_MSM_VIDC_ENABLE) &&
+		(vflip->val == V4L2_MPEG_MSM_VIDC_ENABLE))
+		vpe_rotation.flip = HFI_FLIP_HORIZONTAL | HFI_FLIP_VERTICAL;
+	else if (hflip->val == V4L2_MPEG_MSM_VIDC_ENABLE)
+		vpe_rotation.flip = HFI_FLIP_HORIZONTAL;
+	else if (vflip->val == V4L2_MPEG_MSM_VIDC_ENABLE)
+		vpe_rotation.flip = HFI_FLIP_VERTICAL;
+
+	dprintk(VIDC_DBG, "Set rotation = %d, flip = %d\n",
+			vpe_rotation.rotation, vpe_rotation.flip);
+	rc = call_hfi_op(hdev, session_set_property,
+				(void *)inst->session,
+				HFI_PROPERTY_PARAM_VPE_ROTATION,
+				&vpe_rotation, sizeof(vpe_rotation));
+	if (rc) {
+		dprintk(VIDC_ERR, "Set rotation/flip failed\n");
+		return rc;
+	}
+
+	/* flip the output resolution if required */
+	if (vpe_rotation.rotation == HFI_ROTATE_90 ||
+		vpe_rotation.rotation == HFI_ROTATE_270) {
+		frame_sz.buffer_type = HFI_BUFFER_OUTPUT;
+		frame_sz.width = inst->prop.height[CAPTURE_PORT];
+		frame_sz.height = inst->prop.width[CAPTURE_PORT];
+		dprintk(VIDC_DBG, "CAPTURE port width = %d, height = %d\n",
+			frame_sz.width, frame_sz.height);
+		rc = call_hfi_op(hdev, session_set_property, (void *)
+			inst->session, HFI_PROPERTY_PARAM_FRAME_SIZE,
+			&frame_sz, sizeof(frame_sz));
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set framesize\n");
+			return rc;
+		}
+	}
+	return rc;
+}
+
 int msm_venc_set_video_csc(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -2892,8 +3376,7 @@
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct v4l2_ctrl *ctrl;
-	struct v4l2_ctrl *profile;
+	struct v4l2_ctrl *ctrl = NULL;
 	struct hfi_enable enable;
 
 	if (!inst || !inst->core) {
@@ -2902,16 +3385,17 @@
 	}
 	hdev = inst->core->device;
 
-	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
-		inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264) {
+		dprintk(VIDC_DBG, "%s: skip due to %#x\n",
+			__func__, inst->fmts[CAPTURE_PORT].fourcc);
 		return 0;
+	}
 
-	if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) {
-		profile = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_PROFILE);
-		if (profile->val == V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE ||
-			profile->val ==
-			V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE)
-			return 0;
+	if (inst->profile != HFI_H264_PROFILE_HIGH &&
+		inst->profile != HFI_H264_PROFILE_CONSTRAINED_HIGH) {
+		dprintk(VIDC_DBG, "%s: skip due to %#x\n",
+			__func__, inst->profile);
+		return 0;
 	}
 
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM);
@@ -3036,12 +3520,17 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT);
 	if (!ctrl->val)
 		return 0;
-	if (ctrl->val > inst->capability.ltr_count.max) {
+	if (ctrl->val > inst->capability.cap[CAP_LTR_COUNT].max) {
 		dprintk(VIDC_ERR, "%s: invalid ltr count %d, max %d\n",
-			__func__, ctrl->val, inst->capability.ltr_count.max);
+			__func__, ctrl->val,
+			inst->capability.cap[CAP_LTR_COUNT].max);
 		return -EINVAL;
 	}
 	ltr.ltr_count =  ctrl->val;
@@ -3070,6 +3559,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME);
 	use_ltr.ref_ltr = ctrl->val;
 	use_ltr.use_constrnt = false;
@@ -3097,6 +3590,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME);
 	mark_ltr.mark_frame = ctrl->val;
 
@@ -3161,6 +3658,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH);
 	if (!ctrl->val)
 		return 0;
@@ -3211,6 +3712,7 @@
 int msm_venc_set_hdr_info(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
+	struct v4l2_ctrl *profile = NULL;
 	struct hfi_device *hdev;
 
 	if (!inst || !inst->core) {
@@ -3219,6 +3721,13 @@
 	}
 	hdev = inst->core->device;
 
+	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+		return 0;
+
+	profile = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE);
+	if (profile->val != V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)
+		return 0;
+
 	/* No conversion to HFI needed as both structures are same */
 	dprintk(VIDC_DBG, "%s: setting hdr info\n", __func__);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
@@ -3233,11 +3742,10 @@
 int msm_venc_set_extradata(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
-	struct v4l2_ctrl *ctrl;
 	struct v4l2_ctrl *cvp_ctrl;
+	u32 value = 0x0;
 
-	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
-	if (ctrl->val == EXTRADATA_NONE) {
+	if (inst->prop.extradata_ctrls == EXTRADATA_NONE) {
 		// Disable all Extradata
 		msm_comm_set_index_extradata(inst,
 			MSM_VIDC_EXTRADATA_ASPECT_RATIO, 0x0);
@@ -3252,17 +3760,17 @@
 		}
 	}
 
-	if (ctrl->val & EXTRADATA_ADVANCED)
+	if (inst->prop.extradata_ctrls & EXTRADATA_ADVANCED)
 		// Enable Advanced Extradata - LTR Info
 		msm_comm_set_extradata(inst,
 			HFI_PROPERTY_PARAM_VENC_LTR_INFO, 0x1);
 
-	if (ctrl->val & EXTRADATA_ENC_INPUT_ROI)
+	if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_ROI)
 		// Enable ROIQP Extradata
 		msm_comm_set_extradata(inst,
 			HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA, 0x1);
 
-	if (ctrl->val & EXTRADATA_ENC_INPUT_HDR10PLUS) {
+	if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_HDR10PLUS) {
 		// Enable HDR10+ Extradata
 		if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC) {
 			msm_comm_set_extradata(inst,
@@ -3273,18 +3781,23 @@
 
 	cvp_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_CVP_DISABLE);
 	if (cvp_ctrl->val == V4L2_MPEG_MSM_VIDC_ENABLE) {
-		if (ctrl->val & EXTRADATA_ENC_INPUT_CVP) {
+		if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_CVP) {
 			dprintk(VIDC_ERR,
 				"%s: invalid params\n", __func__);
 			return -EINVAL;
 		}
-
-		rc = msm_comm_set_extradata(inst,
-			HFI_PROPERTY_PARAM_VENC_CVP_METADATA_EXTRADATA, 0x0);
 	} else {
-		rc = msm_comm_set_extradata(inst,
-			HFI_PROPERTY_PARAM_VENC_CVP_METADATA_EXTRADATA, 0x1);
+		/*
+		 * For now, enable CVP metadata only if client provides it.
+		 * Once the kernel-mode CVP metadata implementation
+		 * is completed, this condition should be removed.
+		 */
+		if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_CVP)
+			value = 0x1;
+
 	}
+	rc = msm_comm_set_extradata(inst,
+		HFI_PROPERTY_PARAM_VENC_CVP_METADATA_EXTRADATA, value);
 
 	return rc;
 }
@@ -3299,46 +3812,37 @@
 	rc = msm_venc_set_frame_rate(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_color_format(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_buffer_counts(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_operating_rate(inst);
-	if (rc)
-		goto exit;
 	rc = msm_venc_set_secure_mode(inst);
 	if (rc)
 		goto exit;
 	rc = msm_venc_set_priority(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_color_format(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_sequence_header_mode(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_profile_level(inst);
 	if (rc)
 		goto exit;
-	/*
-	 * set adaptive bframes before intra period as
-	 * intra period setting may enable adaptive bframes
-	 * if bframes are present (even though client might not
-	 * have enabled adaptive bframes setting)
-	 */
-	rc = msm_venc_set_adaptive_bframes(inst);
+	rc = msm_venc_set_8x8_transform(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_intra_period(inst);
+	rc = msm_venc_set_bitrate(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_idr_period(inst);
+	rc = msm_venc_set_entropy_mode(inst);
 	if (rc)
 		goto exit;
 	rc = msm_venc_set_rate_control(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_input_timestamp_rc(inst);
+	rc = msm_venc_set_bitrate_savings_mode(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_bitrate(inst);
+	rc = msm_venc_set_input_timestamp_rc(inst);
 	if (rc)
 		goto exit;
 	rc = msm_venc_set_frame_qp(inst);
@@ -3353,45 +3857,30 @@
 	rc = msm_venc_set_grid(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_entropy_mode(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_slice_control_mode(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_intra_refresh_mode(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_loop_filter_mode(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_sequence_header_mode(inst);
-	if (rc)
-		goto exit;
 	rc = msm_venc_set_au_delimiter_mode(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_base_layer_id(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_vpx_error_resilience(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_video_signal_info(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_video_csc(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_8x8_transform(inst);
-	if (rc)
-		goto exit;
 	rc = msm_venc_set_vui_timing_info(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_hdr_info(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_vpx_error_resilience(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_nal_stream_format(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_slice_control_mode(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_loop_filter_mode(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_intra_refresh_mode(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_ltr_mode(inst);
 	if (rc)
 		goto exit;
@@ -3401,16 +3890,48 @@
 	rc = msm_venc_set_hp_layer(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_base_layer_priority_id(inst);
+	if (rc)
+		goto exit;
+	msm_venc_decide_bframe(inst);
+	rc = msm_venc_set_idr_period(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_intra_period(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_aspect_ratio(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_video_signal_info(inst);
+	if (rc)
+		goto exit;
+	/*
+	 * Layer bitrate is preferred over cumulative bitrate.
+	 * Cumulative bitrate is set only when we fall back.
+	 */
+	rc = msm_venc_set_layer_bitrate(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_bitrate(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_video_csc(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_blur_resolution(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_hdr_info(inst);
+	rc = msm_venc_set_extradata(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_extradata(inst);
+	rc = msm_venc_set_operating_rate(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_buffer_counts(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_rotation(inst);
 	if (rc)
 		goto exit;
 
diff --git a/drivers/media/platform/msm/vidc/msm_venc.h b/drivers/media/platform/msm/vidc/msm_venc.h
index 227cd5b..9b45320 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.h
+++ b/drivers/media/platform/msm/vidc/msm_venc.h
@@ -22,6 +22,7 @@
 int msm_venc_set_extradata(struct msm_vidc_inst *inst);
 int msm_venc_set_frame_rate(struct msm_vidc_inst *inst);
 int msm_venc_set_bitrate(struct msm_vidc_inst *inst);
+int msm_venc_set_layer_bitrate(struct msm_vidc_inst *inst);
 int msm_venc_set_operating_rate(struct msm_vidc_inst *inst);
 int msm_venc_set_idr_period(struct msm_vidc_inst *inst);
 int msm_venc_set_intra_period(struct msm_vidc_inst *inst);
@@ -32,4 +33,5 @@
 int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst);
 int msm_venc_set_hp_max_layer(struct msm_vidc_inst *inst);
 int msm_venc_set_hp_layer(struct msm_vidc_inst *inst);
+int msm_venc_set_base_layer_priority_id(struct msm_vidc_inst *inst);
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 39abb87..32feffd 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -122,91 +122,37 @@
 }
 EXPORT_SYMBOL(msm_vidc_enum_fmt);
 
-static void msm_vidc_ctrl_get_range(struct v4l2_queryctrl *ctrl,
-	struct hal_capability_supported *capability)
-
+int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *q_ctrl)
 {
-	ctrl->maximum = capability->max;
-	ctrl->minimum = capability->min;
-}
-
-int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl)
-{
+	int rc = 0;
 	struct msm_vidc_inst *inst = instance;
-	struct hal_profile_level_supported *prof_lev_supp;
-	struct hal_profile_level *prof_lev;
-	int rc = 0, profile_mask = 0, v4l2_prof_value = 0;
-	unsigned int i = 0, max_level = 0;
+	struct v4l2_ctrl *ctrl;
 
-	if (!inst || !ctrl)
+	if (!inst || !q_ctrl) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
-
-	switch (ctrl->id) {
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
-	case  V4L2_CID_MPEG_VIDEO_BITRATE:
-		msm_vidc_ctrl_get_range(ctrl, &inst->capability.bitrate);
-		break;
-	case V4L2_CID_MPEG_VIDEO_B_FRAMES:
-		msm_vidc_ctrl_get_range(ctrl, &inst->capability.bframe);
-		break;
-	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
-		msm_vidc_ctrl_get_range(ctrl, &inst->capability.slice_mbs);
-		break;
-	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
-		msm_vidc_ctrl_get_range(ctrl, &inst->capability.slice_bytes);
-		break;
-	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
-		msm_vidc_ctrl_get_range(ctrl, &inst->capability.frame_rate);
-		break;
-	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
-	case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
-	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
-	case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
-	{
-		prof_lev_supp = &inst->capability.profile_level;
-		for (i = 0; i < prof_lev_supp->profile_count; i++) {
-			v4l2_prof_value = msm_comm_hfi_to_v4l2(ctrl->id,
-				prof_lev_supp->profile_level[i].profile);
-			if (v4l2_prof_value == -EINVAL) {
-				dprintk(VIDC_WARN, "Invalid profile");
-				rc = -EINVAL;
-			}
-			profile_mask |= (1 << v4l2_prof_value);
-		}
-		ctrl->flags = profile_mask;
-		break;
 	}
-	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
-	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
-	case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
-	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
-	case V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL:
-	{
-		prof_lev_supp = &inst->capability.profile_level;
-		for (i = 0; i < prof_lev_supp->profile_count; i++) {
-			prof_lev = &prof_lev_supp->profile_level[i];
-			if (max_level < prof_lev->level)
-				max_level = prof_lev->level;
-		}
 
-		if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_LEVEL)
-			max_level &= ~(0xF << 28);
+	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, q_ctrl->id);
+	if (!ctrl) {
+		dprintk(VIDC_ERR, "%s: get_ctrl failed for id %d\n",
+			__func__, q_ctrl->id);
+		return -EINVAL;
+	}
+	q_ctrl->minimum = ctrl->minimum;
+	q_ctrl->maximum = ctrl->maximum;
+	/* remove tier info for HEVC level */
+	if (q_ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_LEVEL) {
+		q_ctrl->minimum &= ~(0xF << 28);
+		q_ctrl->maximum &= ~(0xF << 28);
+	}
+	if (ctrl->type == V4L2_CTRL_TYPE_MENU)
+		q_ctrl->flags = ~(ctrl->menu_skip_mask);
+	else
+		q_ctrl->flags = 0;
 
-		ctrl->maximum = msm_comm_hfi_to_v4l2(ctrl->id, max_level);
-		if (ctrl->maximum == -EINVAL) {
-			dprintk(VIDC_WARN, "Invalid max level");
-			rc = -EINVAL;
-		}
-		break;
-	}
-	default:
-		rc = -EINVAL;
-	}
+	dprintk(VIDC_DBG, "query ctrl: %s: min %d, max %d, flags %#x\n",
+		ctrl->name, q_ctrl->minimum, q_ctrl->maximum, q_ctrl->flags);
 	return rc;
 }
 EXPORT_SYMBOL(msm_vidc_query_ctrl);
@@ -596,12 +542,14 @@
 
 	capability = &inst->capability;
 	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
-	fsize->stepwise.min_width = capability->width.min;
-	fsize->stepwise.max_width = capability->width.max;
-	fsize->stepwise.step_width = capability->width.step_size;
-	fsize->stepwise.min_height = capability->height.min;
-	fsize->stepwise.max_height = capability->height.max;
-	fsize->stepwise.step_height = capability->height.step_size;
+	fsize->stepwise.min_width = capability->cap[CAP_FRAME_WIDTH].min;
+	fsize->stepwise.max_width = capability->cap[CAP_FRAME_WIDTH].max;
+	fsize->stepwise.step_width =
+		capability->cap[CAP_FRAME_WIDTH].step_size;
+	fsize->stepwise.min_height = capability->cap[CAP_FRAME_HEIGHT].min;
+	fsize->stepwise.max_height = capability->cap[CAP_FRAME_HEIGHT].max;
+	fsize->stepwise.step_height =
+		capability->cap[CAP_FRAME_HEIGHT].step_size;
 	return 0;
 }
 EXPORT_SYMBOL(msm_vidc_enum_framesizes);
@@ -794,141 +742,6 @@
 	return rc;
 }
 
-int msm_vidc_set_internal_config(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-	u32 rc_mode = RATE_CONTROL_OFF;
-	struct hfi_vbv_hdr_buf_size hrd_buf_size;
-	struct hfi_enable latency;
-	struct hfi_device *hdev;
-	u32 codec;
-	u32 mbps, fps;
-	u32 output_width, output_height;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->session_type != MSM_VIDC_ENCODER)
-		return rc;
-
-	hdev = inst->core->device;
-
-	codec = inst->fmts[CAPTURE_PORT].fourcc;
-	latency.enable =  msm_comm_g_ctrl_for_id(inst,
-			V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE);
-
-	if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR)
-		rc_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_MBR;
-	else if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR &&
-			   latency.enable == V4L2_MPEG_MSM_VIDC_ENABLE &&
-			   codec != V4L2_PIX_FMT_VP8)
-		rc_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
-
-	output_height = inst->prop.height[CAPTURE_PORT];
-	output_width = inst->prop.width[CAPTURE_PORT];
-	fps = inst->clk_data.frame_rate >> 16;
-	mbps = NUM_MBS_PER_SEC(output_height, output_width, fps);
-	if ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR ||
-		 rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) &&
-		(codec != V4L2_PIX_FMT_VP8)) {
-		if ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR &&
-		    mbps <= CBR_MB_LIMIT) ||
-		   (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR &&
-		    mbps <= CBR_VFR_MB_LIMIT))
-			hrd_buf_size.vbv_hdr_buf_size = 500;
-		else
-			hrd_buf_size.vbv_hdr_buf_size = 1000;
-		dprintk(VIDC_DBG, "Enable hdr_buf_size %d :\n",
-				hrd_buf_size.vbv_hdr_buf_size);
-		rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session,
-			HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE,
-			(void *)&hrd_buf_size, sizeof(hrd_buf_size));
-		inst->clk_data.low_latency_mode = true;
-	}
-
-	return rc;
-}
-
-static int msm_vidc_set_rotation(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-	int value = 0, hflip = 0, vflip = 0;
-	struct hfi_device *hdev;
-	struct hfi_vpe_rotation_type vpe_rotation;
-	struct hfi_frame_size frame_sz;
-
-	hdev = inst->core->device;
-
-	/* Set rotation and flip first */
-	value = msm_comm_g_ctrl_for_id(inst, V4L2_CID_ROTATE);
-	if (value < 0) {
-		dprintk(VIDC_ERR, "Get control for rotation failed\n");
-		return value;
-	}
-
-	vpe_rotation.rotation = HFI_ROTATE_NONE;
-	if (value == 90)
-		vpe_rotation.rotation = HFI_ROTATE_90;
-	else if (value == 180)
-		vpe_rotation.rotation = HFI_ROTATE_180;
-	else if (value ==  270)
-		vpe_rotation.rotation = HFI_ROTATE_270;
-
-	hflip = msm_comm_g_ctrl_for_id(inst, V4L2_CID_HFLIP);
-	if (hflip < 0) {
-		dprintk(VIDC_ERR, "Get control for hflip failed\n");
-		return value;
-	}
-
-	vflip = msm_comm_g_ctrl_for_id(inst, V4L2_CID_VFLIP);
-	if (vflip < 0) {
-		dprintk(VIDC_ERR, "Get control for vflip failed\n");
-		return value;
-	}
-
-	vpe_rotation.flip = HFI_FLIP_NONE;
-	if ((hflip == V4L2_MPEG_MSM_VIDC_ENABLE) &&
-		(vflip == V4L2_MPEG_MSM_VIDC_ENABLE))
-		vpe_rotation.flip = HFI_FLIP_HORIZONTAL | HFI_FLIP_VERTICAL;
-	else if (hflip == V4L2_MPEG_MSM_VIDC_ENABLE)
-		vpe_rotation.flip = HFI_FLIP_HORIZONTAL;
-	else if (vflip == V4L2_MPEG_MSM_VIDC_ENABLE)
-		vpe_rotation.flip = HFI_FLIP_VERTICAL;
-
-	dprintk(VIDC_DBG, "Set rotation = %d, flip = %d for capture port.\n",
-			vpe_rotation.rotation, vpe_rotation.flip);
-	rc = call_hfi_op(hdev, session_set_property,
-				(void *)inst->session,
-				HFI_PROPERTY_PARAM_VPE_ROTATION,
-				&vpe_rotation, sizeof(vpe_rotation));
-	if (rc) {
-		dprintk(VIDC_ERR, "Set rotation/flip at start stream failed\n");
-		return rc;
-	}
-
-	/* flip the output resolution if required */
-	if (vpe_rotation.rotation == HFI_ROTATE_90 ||
-		vpe_rotation.rotation == HFI_ROTATE_270) {
-		frame_sz.buffer_type = HFI_BUFFER_OUTPUT;
-		frame_sz.width = inst->prop.height[CAPTURE_PORT];
-		frame_sz.height = inst->prop.width[CAPTURE_PORT];
-		dprintk(VIDC_DBG, "CAPTURE port width = %d, height = %d\n",
-			frame_sz.width, frame_sz.height);
-		rc = call_hfi_op(hdev, session_set_property, (void *)
-			inst->session, HFI_PROPERTY_PARAM_FRAME_SIZE,
-			&frame_sz, sizeof(frame_sz));
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to set framesize for CAPTURE port\n");
-			return rc;
-		}
-	}
-	return rc;
-}
-
 static int msm_vidc_set_properties(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -959,15 +772,8 @@
 	}
 
 	b.buffer_type = HFI_BUFFER_OUTPUT;
-	if (inst->session_type == MSM_VIDC_ENCODER) {
-		rc = msm_vidc_set_rotation(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Set rotation for encoder failed %pK\n", inst);
-			goto fail_start;
-		}
-	} else if ((inst->session_type == MSM_VIDC_DECODER) &&
-			(is_secondary_output_mode(inst)))
+	if (inst->session_type == MSM_VIDC_DECODER &&
+		is_secondary_output_mode(inst))
 		b.buffer_type = HFI_BUFFER_OUTPUT2;
 
 	/* HEIC HW/FWK tiling encode is supported only for CQ RC mode */
@@ -994,14 +800,6 @@
 		goto fail_start;
 	}
 
-	rc = msm_vidc_set_internal_config(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Set internal config failed %pK\n", inst);
-		goto fail_start;
-	}
-
-
 	/* Decide work mode for current session */
 	rc = call_core_op(inst->core, decide_work_mode, inst);
 	if (rc) {
@@ -1019,7 +817,7 @@
 	}
 
 	/* Assign Core and LP mode for current session */
-	rc = msm_vidc_decide_core_and_power_mode(inst);
+	rc = call_core_op(inst->core, decide_core_and_power_mode, inst);
 	if (rc) {
 		dprintk(VIDC_ERR,
 			"This session can't be submitted to HW %pK\n", inst);
@@ -1550,6 +1348,7 @@
 				inst, v4l2_ctrl_get_name(ctrl->id));
 	return rc;
 }
+
 static int try_get_ctrl_for_instance(struct msm_vidc_inst *inst,
 	struct v4l2_ctrl *ctrl)
 {
@@ -1557,7 +1356,6 @@
 	struct hal_buffer_requirements *bufreq = NULL;
 
 	switch (ctrl->id) {
-
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
 		ctrl->val = msm_comm_hfi_to_v4l2(
 			V4L2_CID_MPEG_VIDEO_H264_PROFILE,
@@ -1586,8 +1384,6 @@
 			V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
 			inst->level);
 		break;
-
-
 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
 		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
 		if (!bufreq) {
@@ -1609,72 +1405,23 @@
 					HAL_BUFFER_INPUT);
 			return -EINVAL;
 		}
-
 		ctrl->val = bufreq->buffer_count_min_host;
 		dprintk(VIDC_DBG, "g_min: %x : hal_buffer %d min buffers %d\n",
 			hash32_ptr(inst->session), HAL_BUFFER_INPUT, ctrl->val);
 		break;
-	case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
-		ctrl->val =
-		inst->capability.nal_stream_format.nal_stream_format_supported;
+	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+		ctrl->val = inst->prop.extradata_ctrls;
 		break;
 	default:
-		/*
-		 * Other controls aren't really volatile, shouldn't need to
-		 * modify ctrl->value
-		 */
 		break;
 	}
 
 	return rc;
 }
 
-static int msm_vidc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
-	int rc = 0;
-	unsigned int c = 0;
-	struct msm_vidc_inst *inst;
-	struct v4l2_ctrl *master;
-
-	if (!ctrl) {
-		dprintk(VIDC_ERR, "%s invalid parameters for ctrl\n", __func__);
-		return -EINVAL;
-	}
-
-	inst = container_of(ctrl->handler,
-		struct msm_vidc_inst, ctrl_handler);
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s invalid parameters for inst\n", __func__);
-		return -EINVAL;
-	}
-	master = ctrl->cluster[0];
-	if (!master) {
-		dprintk(VIDC_ERR, "%s invalid parameters for master\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	for (c = 0; c < master->ncontrols; ++c) {
-		if (master->cluster[c]->flags & V4L2_CTRL_FLAG_VOLATILE) {
-			rc = try_get_ctrl_for_instance(inst,
-				master->cluster[c]);
-			if (rc) {
-				dprintk(VIDC_ERR, "Failed getting %x\n",
-					master->cluster[c]->id);
-				return rc;
-			}
-		}
-	}
-	if (rc)
-		dprintk(VIDC_ERR, "Failed getting control: Inst = %pK (%s)\n",
-				inst, v4l2_ctrl_get_name(ctrl->id));
-	return rc;
-}
-
 static const struct v4l2_ctrl_ops msm_vidc_ctrl_ops = {
 
 	.s_ctrl = msm_vidc_op_s_ctrl,
-	.g_volatile_ctrl = msm_vidc_op_g_volatile_ctrl,
 };
 
 static struct msm_vidc_inst_smem_ops  msm_vidc_smem_ops = {
@@ -1741,8 +1488,6 @@
 	inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
 	inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
 	inst->colour_space = MSM_VIDC_BT601_6_525;
-	inst->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
-	inst->level = V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN;
 	inst->smem_ops = &msm_vidc_smem_ops;
 	inst->rc_type = RATE_CONTROL_OFF;
 	inst->dpb_extra_binfo = NULL;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
index f073a48..e862a23 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
@@ -238,6 +238,7 @@
 #define CCE_TILE_OFFSET_SIZE ALIGN(32 * 4 * 4, BUFFER_ALIGNMENT_SIZE(32))
 
 #define QMATRIX_SIZE (sizeof(u32) * 128 + 256)
+#define MP2D_QPDUMP_SIZE 115200
 
 #define HFI_IRIS2_ENC_PERSIST_SIZE 102400
 
@@ -252,6 +253,9 @@
 #define SYSTEM_LAL_TILE10 192
 #define NUM_MBS_720P (((1280 + 15) >> 4) * ((720 + 15) >> 4))
 #define NUM_MBS_4k (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
+#define MB_SIZE_IN_PIXEL (16 * 16)
+#define HDR10PLUS_PAYLOAD_SIZE 1024
+#define HDR10_HIST_EXTRADATA_SIZE 4096
 
 static inline u32 calculate_h264d_scratch_size(struct msm_vidc_inst *inst,
 	u32 width, u32 height, bool is_interlaced);
@@ -283,14 +287,11 @@
 	u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled);
 
 static inline u32 calculate_h264e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes);
+	u32 width, u32 height, u32 num_ref, bool ten_bit);
 static inline u32 calculate_h265e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes);
+	u32 width, u32 height, u32 num_ref, bool ten_bit);
 static inline u32 calculate_vp8e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes);
+	u32 width, u32 height, u32 num_ref, bool ten_bit);
 
 static inline u32 calculate_enc_scratch2_size(struct msm_vidc_inst *inst,
 	u32 width, u32 height, u32 num_ref, bool ten_bit);
@@ -533,8 +534,7 @@
 			curr_req->buffer_size =
 				enc_calculators->calculate_scratch1_size(
 					inst, width, height, num_ref,
-					is_tenbit,
-					inst->clk_data.work_route);
+					is_tenbit);
 			valid_buffer_type = true;
 		} else if (curr_req->buffer_type ==
 			HAL_BUFFER_INTERNAL_SCRATCH_2) {
@@ -584,7 +584,7 @@
 	inst->buffer_size_calculators = NULL;
 	core = inst->core;
 
-	/* Change this to IRIS2 once firmware is ready with changes */
+	/* Change this to IRIS2 when ready */
 	if (core->platform_data->vpu_ver == VPU_VERSION_AR50)
 		inst->buffer_size_calculators =
 			msm_vidc_calculate_internal_buffer_sizes;
@@ -756,7 +756,7 @@
 	num_mbs = ((width + 15) >> 4) * ((height + 15) >> 4);
 	if (num_mbs > NUM_MBS_4k) {
 		div_factor = 4;
-		base_res_mbs = inst->capability.mbs_per_frame.max;
+		base_res_mbs = inst->capability.cap[CAP_MBS_PER_FRAME].max;
 	} else {
 		base_res_mbs = NUM_MBS_4k;
 		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9)
@@ -765,7 +765,19 @@
 			div_factor = 2;
 	}
 
-	frame_size = base_res_mbs * 3 / 2 / div_factor;
+	frame_size = base_res_mbs * MB_SIZE_IN_PIXEL * 3 / 2 / div_factor;
+
+	if (is_secure_session(inst)) {
+		u32 max_bitrate = inst->capability.cap[CAP_SECURE_BITRATE].max;
+
+		/*
+		 * for secure, calc frame_size based on max bitrate,
+		 * peak bitrate can be 10 times more and
+		 * frame rate assumed to be 30 fps at least
+		 */
+		frame_size = (max_bitrate * 10 / 8) / 30;
+	}
+
 	 /* multiply by 10/8 (1.25) to get size for 10 bit case */
 	if ((inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9) ||
 		(inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC))
@@ -815,21 +827,25 @@
 	u32 width, height;
 
 	/*
-	 * Encoder output size calculation:
+	 * Encoder output size calculation: 32 Align width/height
 	 * For resolution < 720p : YUVsize * 4
 	 * For resolution > 720p & <= 4K : YUVsize / 2
 	 * For resolution > 4k : YUVsize / 4
+	 * Initially frame_size = YUVsize * 2;
 	 */
-	width = inst->prop.width[CAPTURE_PORT];
-	height = inst->prop.height[CAPTURE_PORT];
-	mbs_per_frame = ((width + 15) >> 4) * ((height + 15) >> 4);
-	frame_size = (width * height * 3) >> 1;
+	width = ALIGN(inst->prop.width[CAPTURE_PORT],
+		BUFFER_ALIGNMENT_SIZE(32));
+	height = ALIGN(inst->prop.height[CAPTURE_PORT],
+		BUFFER_ALIGNMENT_SIZE(32));
+	mbs_per_frame = NUM_MBS_PER_FRAME(width, height);
+	frame_size = (width * height * 3);
+
 	if (mbs_per_frame < NUM_MBS_720P)
-		frame_size = frame_size << 2;
+		frame_size = frame_size << 1;
 	else if (mbs_per_frame <= NUM_MBS_4k)
-		frame_size = frame_size >> 1;
-	else
 		frame_size = frame_size >> 2;
+	else
+		frame_size = frame_size >> 3;
 
 	if ((inst->rc_type == RATE_CONTROL_OFF) ||
 		(inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ))
@@ -854,8 +870,7 @@
 	return (((lcu_width + 7) >> 3) << 3) * lcu_height * 2;
 }
 
-u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst,
-	u32 extra_types)
+u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst)
 {
 	u32 size = 0;
 	u32 width = inst->prop.width[OUTPUT_PORT];
@@ -866,7 +881,7 @@
 	size += sizeof(struct msm_vidc_enc_cvp_metadata_payload);
 	extradata_count++;
 
-	if (extra_types & EXTRADATA_ENC_INPUT_ROI) {
+	if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_ROI) {
 		u32 lcu_size = 16;
 
 		if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC)
@@ -876,8 +891,8 @@
 		extradata_count++;
 	}
 
-	if (extra_types & EXTRADATA_ENC_INPUT_HDR10PLUS) {
-		size += sizeof(struct hfi_hdr10_pq_sei);
+	if (inst->prop.extradata_ctrls & EXTRADATA_ENC_INPUT_HDR10PLUS) {
+		size += HDR10PLUS_PAYLOAD_SIZE;
 		extradata_count++;
 	}
 
@@ -892,13 +907,8 @@
 u32 msm_vidc_calculate_enc_output_extra_size(struct msm_vidc_inst *inst)
 {
 	u32 size = 0;
-	u32 extra_types;
-	struct v4l2_ctrl *extradata_ctrl;
 
-	extradata_ctrl = get_ctrl(inst,
-			V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
-	extra_types = extradata_ctrl->val;
-	if (extra_types & EXTRADATA_ADVANCED)
+	if (inst->prop.extradata_ctrls & EXTRADATA_ADVANCED)
 		size += sizeof(struct msm_vidc_metadata_ltr_payload);
 
 	/* Add size for extradata none */
@@ -1243,7 +1253,8 @@
 	size_singlePipe = sao_bin_buffer_size + padded_bin_size;
 	size_singlePipe = ALIGN(size_singlePipe, VENUS_DMA_ALIGNMENT);
 	bitbin_size = size_singlePipe * NUM_OF_VPP_PIPES;
-	size = ALIGN(bitbin_size, VENUS_DMA_ALIGNMENT) * total_bitbin_buffers;
+	size = ALIGN(bitbin_size, VENUS_DMA_ALIGNMENT) * total_bitbin_buffers
+			+ 512;
 
 	return size;
 }
@@ -1294,7 +1305,8 @@
 	if (split_mode_enabled)
 		vpss_lb_size = size_vpss_lb(width, height);
 
-	size = co_mv_size + nonco_mv_size + vpss_lb_size;
+	size = co_mv_size + nonco_mv_size + vpss_lb_size +
+			HDR10_HIST_EXTRADATA_SIZE;
 	return size;
 }
 
@@ -1359,7 +1371,7 @@
 	if (split_mode_enabled)
 		vpss_lb_size = size_vpss_lb(width, height);
 
-	size += vpss_lb_size;
+	size += vpss_lb_size + HDR10_HIST_EXTRADATA_SIZE;
 	return size;
 }
 
@@ -1502,13 +1514,13 @@
 	bse_slice_cmd_buffer_size = ((((8192 << 2) + 7) & (~7)) * 6);
 	bse_reg_buffer_size = ((((512 << 3) + 7) & (~7)) * 4);
 	vpp_reg_buffer_size = ((((HFI_VENUS_VPPSG_MAX_REGISTERS << 3) + 31) &
-		(~31)) * 8);
-	lambda_lut_size = ((((52 << 1) + 7) & (~7)) * 3);
+		(~31)) * 10);
+	lambda_lut_size = ((((52 << 1) + 7) & (~7)) * 11);
 	override_buffer_size = 16 * ((frame_num_lcu + 7) >> 3);
 	override_buffer_size = ALIGN(override_buffer_size,
 		VENUS_DMA_ALIGNMENT) * 2;
 	ir_buffer_size = (((frame_num_lcu << 1) + 7) & (~7)) * 3;
-	vpss_line_buf = ((16 * width_coded) + (16 * height_coded));
+	vpss_line_buf = ((((width_coded + 3) >> 2) << 5) + 256) * 16;
 	topline_bufsize_fe_1stg_sao = (16 * (width_coded >> 5));
 	topline_bufsize_fe_1stg_sao = ALIGN(topline_bufsize_fe_1stg_sao,
 		VENUS_DMA_ALIGNMENT);
@@ -1530,27 +1542,24 @@
 }
 
 static inline u32 calculate_h264e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes)
+	u32 width, u32 height, u32 num_ref, bool ten_bit)
 {
 	return calculate_enc_scratch1_size(inst, width, height, 16,
-		num_ref, ten_bit, num_vpp_pipes, false);
+		num_ref, ten_bit, NUM_OF_VPP_PIPES, false);
 }
 
 static inline u32 calculate_h265e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes)
+	u32 width, u32 height, u32 num_ref, bool ten_bit)
 {
 	return calculate_enc_scratch1_size(inst, width, height, 32,
-		num_ref, ten_bit, num_vpp_pipes, true);
+		num_ref, ten_bit, NUM_OF_VPP_PIPES, true);
 }
 
 static inline u32 calculate_vp8e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes)
+	u32 width, u32 height, u32 num_ref, bool ten_bit)
 {
 	return calculate_enc_scratch1_size(inst, width, height, 16,
-		num_ref, ten_bit, num_vpp_pipes, false);
+		num_ref, ten_bit, 1, false);
 }
 
 
@@ -1611,16 +1620,11 @@
 			16, HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_HEIGHT);
 		meta_size_y = hfi_ubwc_metadata_plane_buffer_size(
 			metadata_stride, meta_buf_height);
-		metadata_stride = hfi_ubwc_uv_metadata_plane_stride(width,
-			64, HFI_COLOR_FORMAT_YUV420_NV12_UBWC_UV_TILE_WIDTH);
-		meta_buf_height = hfi_ubwc_uv_metadata_plane_bufheight(
-			height, 16,
-			HFI_COLOR_FORMAT_YUV420_NV12_UBWC_UV_TILE_HEIGHT);
 		meta_size_c = hfi_ubwc_metadata_plane_buffer_size(
 			metadata_stride, meta_buf_height);
 		size = (aligned_height + chroma_height) * aligned_width +
 			meta_size_y + meta_size_c;
-		size = (size * ((num_ref)+1)) + 4096;
+		size = (size * ((num_ref)+2)) + 4096;
 	} else {
 		ref_buf_height = (height + (HFI_VENUS_HEIGHT_ALIGNMENT - 1))
 			& (~(HFI_VENUS_HEIGHT_ALIGNMENT - 1));
@@ -1643,13 +1647,6 @@
 		metadata_stride = hfi_ubwc_calc_metadata_plane_stride(
 			width,
 			VENUS_METADATA_STRIDE_MULTIPLE,
-			HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_WIDTH);
-		meta_buf_height = hfi_ubwc_metadata_plane_bufheight(height,
-			VENUS_METADATA_HEIGHT_MULTIPLE,
-			HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_HEIGHT);
-		metadata_stride = hfi_ubwc_calc_metadata_plane_stride(
-			width,
-			VENUS_METADATA_STRIDE_MULTIPLE,
 			HFI_COLOR_FORMAT_YUV420_TP10_UBWC_Y_TILE_WIDTH);
 		meta_buf_height = hfi_ubwc_metadata_plane_bufheight(
 			height,
@@ -1660,7 +1657,7 @@
 		meta_size_c = hfi_ubwc_metadata_plane_buffer_size(
 			metadata_stride, meta_buf_height);
 		size = ref_buf_size + meta_size_y + meta_size_c;
-		size = (size * ((num_ref)+1)) + 4096;
+		size = (size * ((num_ref)+2)) + 4096;
 	}
 	return size;
 }
@@ -1713,5 +1710,5 @@
 
 static inline u32 calculate_mpeg2d_persist1_size(void)
 {
-	return QMATRIX_SIZE;
+	return QMATRIX_SIZE + MP2D_QPDUMP_SIZE;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
index 74712f7..29fe98c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
@@ -18,8 +18,7 @@
 	u32 (*calculate_scratch_size)(struct msm_vidc_inst *inst, u32 width,
 		u32 height, u32 work_mode);
 	u32 (*calculate_scratch1_size)(struct msm_vidc_inst *inst,
-		u32 width, u32 height, u32 num_ref, bool ten_bit,
-		u32 num_vpp_pipes);
+		u32 width, u32 height, u32 num_ref, bool ten_bit);
 	u32 (*calculate_scratch2_size)(struct msm_vidc_inst *inst,
 		u32 width, u32 height, u32 num_ref, bool ten_bit);
 	u32 (*calculate_persist_size)(void);
@@ -34,8 +33,7 @@
 u32 msm_vidc_calculate_dec_output_extra_size(struct msm_vidc_inst *inst);
 u32 msm_vidc_calculate_enc_input_frame_size(struct msm_vidc_inst *inst);
 u32 msm_vidc_calculate_enc_output_frame_size(struct msm_vidc_inst *inst);
-u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst,
-	u32 extra_types);
+u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst);
 u32 msm_vidc_calculate_enc_output_extra_size(struct msm_vidc_inst *inst);
 u32 msm_vidc_set_buffer_count_for_thumbnail(struct msm_vidc_inst *inst);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 20670d7..3077152 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -27,18 +27,21 @@
 	.calc_freq = msm_vidc_calc_freq_ar50,
 	.decide_work_route = NULL,
 	.decide_work_mode = msm_vidc_decide_work_mode_ar50,
+	.decide_core_and_power_mode = NULL,
 };
 
 struct msm_vidc_core_ops core_ops_iris1 = {
 	.calc_freq = msm_vidc_calc_freq_iris1,
 	.decide_work_route = msm_vidc_decide_work_route_iris1,
 	.decide_work_mode = msm_vidc_decide_work_mode_iris1,
+	.decide_core_and_power_mode = msm_vidc_decide_core_and_power_mode_iris1,
 };
 
 struct msm_vidc_core_ops core_ops_iris2 = {
 	.calc_freq = msm_vidc_calc_freq_iris2,
 	.decide_work_route = msm_vidc_decide_work_route_iris2,
 	.decide_work_mode = msm_vidc_decide_work_mode_iris2,
+	.decide_core_and_power_mode = msm_vidc_decide_core_and_power_mode_iris2,
 };
 
 static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs)
@@ -58,7 +61,7 @@
 	struct ubwc_cr_stats_info_type ubwc_stats_info)
 {
 	unsigned long sum = 0, weighted_sum = 0;
-	unsigned long compression_ratio = 1 << 16;
+	unsigned long compression_ratio = 0;
 
 	weighted_sum =
 		32  * ubwc_stats_info.cr_stats_info0 +
@@ -84,6 +87,34 @@
 	return compression_ratio;
 }
 
+bool res_is_less_than(u32 width, u32 height,
+			u32 ref_width, u32 ref_height)
+{
+	u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
+	u32 max_side = max(ref_width, ref_height);
+
+	if (num_mbs < NUM_MBS_PER_FRAME(ref_height, ref_width) &&
+		width < max_side &&
+		height < max_side)
+		return true;
+	else
+		return false;
+}
+
+bool res_is_greater_than(u32 width, u32 height,
+				u32 ref_width, u32 ref_height)
+{
+	u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
+	u32 max_side = max(ref_width, ref_height);
+
+	if (num_mbs > NUM_MBS_PER_FRAME(ref_height, ref_width) ||
+		width > max_side ||
+		height > max_side)
+		return true;
+	else
+		return false;
+}
+
 int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst)
 {
 	int height, width;
@@ -287,17 +318,17 @@
 		vote_data[i].input_height = inst->prop.height[OUTPUT_PORT];
 		vote_data[i].output_width = inst->prop.width[CAPTURE_PORT];
 		vote_data[i].output_height = inst->prop.height[CAPTURE_PORT];
-		vote_data[i].rotation =
-			msm_comm_g_ctrl_for_id(inst, V4L2_CID_ROTATE);
 		vote_data[i].lcu_size = (codec == V4L2_PIX_FMT_HEVC ||
 				codec == V4L2_PIX_FMT_VP9) ? 32 : 16;
-		vote_data[i].b_frames_enabled =
-			msm_comm_g_ctrl_for_id(inst,
-				V4L2_CID_MPEG_VIDEO_B_FRAMES) != 0;
 
 		vote_data[i].fps = msm_vidc_get_fps(inst);
 		if (inst->session_type == MSM_VIDC_ENCODER) {
 			vote_data[i].bitrate = inst->clk_data.bitrate;
+			vote_data[i].rotation =
+				msm_comm_g_ctrl_for_id(inst, V4L2_CID_ROTATE);
+			vote_data[i].b_frames_enabled =
+				msm_comm_g_ctrl_for_id(inst,
+					V4L2_CID_MPEG_VIDEO_B_FRAMES) != 0;
 			/* scale bitrate if operating rate is larger than fps */
 			if (vote_data[i].fps > (inst->clk_data.frame_rate >> 16)
 				&& (inst->clk_data.frame_rate >> 16)) {
@@ -819,7 +850,9 @@
 	struct hfi_device *hdev;
 	unsigned long freq_core_1 = 0, freq_core_2 = 0, rate = 0;
 	unsigned long freq_core_max = 0;
-	struct msm_vidc_inst *temp = NULL;
+	struct msm_vidc_inst *inst = NULL;
+	struct msm_vidc_buffer *temp, *next;
+	u32 device_addr, filled_len;
 	int rc = 0, i = 0;
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	bool increment, decrement;
@@ -835,15 +868,34 @@
 	mutex_lock(&core->lock);
 	increment = false;
 	decrement = true;
-	list_for_each_entry(temp, &core->instances, list) {
+	list_for_each_entry(inst, &core->instances, list) {
+		device_addr = 0;
+		filled_len = 0;
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry_safe(temp, next,
+				&inst->registeredbufs.list, list) {
+			if (temp->vvb.vb2_buf.type ==
+				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+				filled_len = max(filled_len,
+					temp->vvb.vb2_buf.planes[0].bytesused);
+				device_addr = temp->smem[0].device_addr;
+			}
+		}
+		mutex_unlock(&inst->registeredbufs.lock);
 
-		if (temp->clk_data.core_id == VIDC_CORE_ID_1)
-			freq_core_1 += temp->clk_data.min_freq;
-		else if (temp->clk_data.core_id == VIDC_CORE_ID_2)
-			freq_core_2 += temp->clk_data.min_freq;
-		else if (temp->clk_data.core_id == VIDC_CORE_ID_3) {
-			freq_core_1 += temp->clk_data.min_freq;
-			freq_core_2 += temp->clk_data.min_freq;
+		if (!filled_len || !device_addr) {
+			dprintk(VIDC_DBG, "%s no input for session %x\n",
+				__func__, hash32_ptr(inst->session));
+			continue;
+		}
+
+		if (inst->clk_data.core_id == VIDC_CORE_ID_1)
+			freq_core_1 += inst->clk_data.min_freq;
+		else if (inst->clk_data.core_id == VIDC_CORE_ID_2)
+			freq_core_2 += inst->clk_data.min_freq;
+		else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+			freq_core_1 += inst->clk_data.min_freq;
+			freq_core_2 += inst->clk_data.min_freq;
 		}
 
 		freq_core_max = max_t(unsigned long, freq_core_1, freq_core_2);
@@ -857,18 +909,11 @@
 			break;
 		}
 
-		if (temp->clk_data.turbo_mode) {
-			dprintk(VIDC_PROF,
-				"Found an instance with Turbo request\n");
-			freq_core_max = msm_vidc_max_freq(core);
-			decrement = false;
-			break;
-		}
 		/* increment even if one session requested for it */
-		if (temp->clk_data.dcvs_flags & MSM_VIDC_DCVS_INCR)
+		if (inst->clk_data.dcvs_flags & MSM_VIDC_DCVS_INCR)
 			increment = true;
 		/* decrement only if all sessions requested for it */
-		if (!(temp->clk_data.dcvs_flags & MSM_VIDC_DCVS_DECR))
+		if (!(inst->clk_data.dcvs_flags & MSM_VIDC_DCVS_DECR))
 			decrement = false;
 	}
 
@@ -903,69 +948,6 @@
 	return rc;
 }
 
-int msm_vidc_validate_operating_rate(struct msm_vidc_inst *inst,
-	u32 operating_rate)
-{
-	struct msm_vidc_inst *temp;
-	struct msm_vidc_core *core;
-	unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
-	unsigned long mbs_per_second;
-	int rc = 0;
-	u32 curr_operating_rate = 0;
-
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
-		return -EINVAL;
-	}
-	core = inst->core;
-	curr_operating_rate = inst->clk_data.operating_rate >> 16;
-
-	mutex_lock(&core->lock);
-	max_freq = msm_vidc_max_freq(core);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp == inst ||
-				temp->state < MSM_VIDC_START_DONE ||
-				temp->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)
-			continue;
-
-		freq += temp->clk_data.min_freq;
-	}
-
-	freq_left = max_freq - freq;
-
-	mbs_per_second = msm_comm_get_inst_load_per_core(inst,
-		LOAD_CALC_NO_QUIRKS);
-
-	cycles = inst->clk_data.entry->vpp_cycles;
-	if (inst->session_type == MSM_VIDC_ENCODER)
-		cycles = inst->flags & VIDC_LOW_POWER ?
-			inst->clk_data.entry->low_power_cycles :
-			cycles;
-
-	load = cycles * mbs_per_second;
-
-	ops_left = load ? (freq_left / load) : 0;
-
-	operating_rate = operating_rate >> 16;
-
-	if ((curr_operating_rate * (1 + ops_left)) >= operating_rate ||
-			msm_vidc_clock_voting ||
-			inst->clk_data.buffer_counter < DCVS_FTB_WINDOW) {
-		dprintk(VIDC_DBG,
-			"Requestd operating rate is valid %u\n",
-			operating_rate);
-		rc = 0;
-	} else {
-		dprintk(VIDC_DBG,
-			"Current load is high for requested settings. Cannot set operating rate to %u\n",
-			operating_rate);
-		rc = -EINVAL;
-	}
-	mutex_unlock(&core->lock);
-
-	return rc;
-}
-
 int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
 {
 	struct msm_vidc_buffer *temp, *next;
@@ -999,7 +981,7 @@
 	if (!filled_len || !device_addr) {
 		dprintk(VIDC_DBG, "%s no input for session %x\n",
 			__func__, hash32_ptr(inst->session));
-		goto no_clock_change;
+		return 0;
 	}
 
 	freq = call_core_op(inst->core, calc_freq, inst, filled_len);
@@ -1017,7 +999,6 @@
 
 	msm_vidc_set_clocks(inst->core);
 
-no_clock_change:
 	return 0;
 }
 
@@ -1272,6 +1253,7 @@
 	int rc = 0;
 	struct hfi_device *hdev;
 	struct hfi_video_work_route pdata;
+	bool cbr_plus;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR,
@@ -1281,53 +1263,44 @@
 	}
 
 	hdev = inst->core->device;
-
+	cbr_plus = inst->clk_data.is_cbr_plus;
 	pdata.video_work_route = 4;
+
 	if (inst->session_type == MSM_VIDC_DECODER) {
 		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_MPEG2 ||
 			inst->pic_struct != MSM_VIDC_PIC_STRUCT_PROGRESSIVE)
 			pdata.video_work_route = 1;
 	} else if (inst->session_type == MSM_VIDC_ENCODER) {
-		u32 slice_mode, rc_mode;
-		u32 output_width, output_height, fps, mbps;
-		bool cbr_plus;
+		u32 slice_mode, width, height;
+		bool is_1080p_above;
 
-		if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_VP8) {
-			pdata.video_work_route = 1;
-			goto decision_done;
-		}
-
-		rc_mode = msm_comm_g_ctrl_for_id(inst,
-			V4L2_CID_MPEG_VIDEO_BITRATE_MODE);
 		slice_mode =  msm_comm_g_ctrl_for_id(inst,
 				V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
-		output_height = inst->prop.height[CAPTURE_PORT];
-		output_width = inst->prop.width[CAPTURE_PORT];
-		fps = inst->clk_data.frame_rate >> 16;
-		mbps = NUM_MBS_PER_SEC(output_height, output_width, fps);
-		cbr_plus = ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR &&
-			mbps > CBR_MB_LIMIT) ||
-			(rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR &&
-			mbps > CBR_VFR_MB_LIMIT));
+		height = inst->prop.height[OUTPUT_PORT];
+		width = inst->prop.width[OUTPUT_PORT];
+
+		is_1080p_above = res_is_greater_than(width, height, 1920, 1088);
+
 		if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES ||
-			((mbps <= NUM_MBS_PER_SEC(1920, 1088, 60)) && !cbr_plus)
-			) {
+			inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_VP8 ||
+			(!is_1080p_above && !cbr_plus)) {
 			pdata.video_work_route = 1;
-			dprintk(VIDC_DBG, "Configured work route = 1");
 		}
 	} else {
 		return -EINVAL;
 	}
 
-decision_done:
+	dprintk(VIDC_DBG, "Configurng work route = %u",
+			pdata.video_work_route);
 
-	inst->clk_data.work_route = pdata.video_work_route;
 	rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session, HFI_PROPERTY_PARAM_WORK_ROUTE,
 			(void *)&pdata, sizeof(pdata));
 	if (rc)
 		dprintk(VIDC_WARN,
 			" Failed to configure work route %pK\n", inst);
+	else
+		inst->clk_data.work_route = pdata.video_work_route;
 
 	return rc;
 }
@@ -1485,7 +1458,8 @@
 	struct hfi_device *hdev;
 	struct hfi_video_work_mode pdata;
 	struct hfi_enable latency;
-	u32 num_mbs = 0;
+	u32 width, height;
+	bool res_ok = false;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR,
@@ -1496,44 +1470,56 @@
 
 	hdev = inst->core->device;
 	pdata.video_work_mode = HFI_WORKMODE_2;
+	latency.enable = inst->clk_data.low_latency_mode;
 
-	if (inst->clk_data.low_latency_mode) {
-		pdata.video_work_mode = HFI_WORKMODE_1;
-		dprintk(VIDC_DBG, "Configured work mode = 1");
-	} else if (inst->session_type == MSM_VIDC_DECODER) {
-		num_mbs = NUM_MBS_PER_FRAME(
-					inst->prop.height[OUTPUT_PORT],
-					inst->prop.width[OUTPUT_PORT]);
+	if (inst->session_type == MSM_VIDC_DECODER) {
+		height = inst->prop.height[CAPTURE_PORT];
+		width = inst->prop.width[CAPTURE_PORT];
+		res_ok = res_is_less_than(width, height, 1280, 720);
 		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_MPEG2 ||
-			(inst->pic_struct != MSM_VIDC_PIC_STRUCT_PROGRESSIVE) ||
-			(num_mbs < NUM_MBS_PER_FRAME(720, 1280)))
+			inst->pic_struct != MSM_VIDC_PIC_STRUCT_PROGRESSIVE ||
+			inst->clk_data.low_latency_mode || res_ok) {
 			pdata.video_work_mode = HFI_WORKMODE_1;
+		}
 	} else if (inst->session_type == MSM_VIDC_ENCODER) {
-		if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_VP8) {
+		height = inst->prop.height[OUTPUT_PORT];
+		width = inst->prop.width[OUTPUT_PORT];
+		res_ok = !res_is_greater_than(width, height, 4096, 2160);
+		if (res_ok &&
+			(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_VP8 ||
+			  inst->clk_data.low_latency_mode)) {
 			pdata.video_work_mode = HFI_WORKMODE_1;
 			/* For WORK_MODE_1, set Low Latency mode by default */
-			inst->clk_data.low_latency_mode = true;
+			latency.enable = true;
 		}
 	} else {
 		return -EINVAL;
 	}
 
-	inst->clk_data.work_mode = pdata.video_work_mode;
+	dprintk(VIDC_DBG, "Configuring work mode = %u low latency = %u",
+			pdata.video_work_mode,
+			latency.enable);
+
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session,
+			HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE,
+			(void *)&latency, sizeof(latency));
+		if (rc)
+			dprintk(VIDC_WARN,
+				" Failed to configure low latency %pK\n", inst);
+		else
+			inst->clk_data.low_latency_mode = latency.enable;
+	}
+
 	rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session, HFI_PROPERTY_PARAM_WORK_MODE,
 			(void *)&pdata, sizeof(pdata));
 	if (rc)
 		dprintk(VIDC_WARN,
 			" Failed to configure Work Mode %pK\n", inst);
-
-	if (inst->clk_data.low_latency_mode &&
-		inst->session_type == MSM_VIDC_ENCODER){
-		latency.enable = true;
-		rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session,
-			HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE,
-			(void *)&latency, sizeof(latency));
-	}
+	else
+		inst->clk_data.work_mode = pdata.video_work_mode;
 
 	return rc;
 }
@@ -1541,7 +1527,7 @@
 static inline int msm_vidc_power_save_mode_enable(struct msm_vidc_inst *inst,
 	bool enable)
 {
-	u32 rc = 0, mbs_per_frame;
+	u32 rc = 0, mbs_per_frame, mbs_per_sec;
 	u32 prop_id = 0;
 	void *pdata = NULL;
 	struct hfi_device *hdev = NULL;
@@ -1554,15 +1540,17 @@
 				__func__);
 		return 0;
 	}
-	mbs_per_frame = msm_vidc_get_mbs_per_frame(inst);
-	if (mbs_per_frame > inst->core->resources.max_hq_mbs_per_frame ||
-		msm_vidc_get_fps(inst) >
-		(int) inst->core->resources.max_hq_fps) {
-		enable = true;
-	}
+
 	/* Power saving always disabled for CQ RC mode. */
-	if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+	mbs_per_frame = msm_vidc_get_mbs_per_frame(inst);
+	mbs_per_sec = mbs_per_frame * msm_vidc_get_fps(inst);
+	if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ ||
+		(mbs_per_frame <=
+		 inst->core->resources.max_hq_mbs_per_frame &&
+		 mbs_per_sec <=
+		 inst->core->resources.max_hq_mbs_per_sec)) {
 		enable = false;
+	}
 
 	prop_id = HFI_PROPERTY_CONFIG_VENC_PERF_MODE;
 	hfi_perf_mode = enable ? HFI_VENC_PERFMODE_POWER_SAVE :
@@ -1640,7 +1628,7 @@
 	return load;
 }
 
-int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
+int msm_vidc_decide_core_and_power_mode_iris1(struct msm_vidc_inst *inst)
 {
 	int rc = 0, hier_mode = 0;
 	struct hfi_device *hdev;
@@ -1649,7 +1637,7 @@
 	struct hfi_videocores_usage_type core_info;
 	u32 core0_load = 0, core1_load = 0, core0_lp_load = 0,
 		core1_lp_load = 0;
-	u32 current_inst_load = 0, current_inst_lp_load = 0,
+	u32 current_inst_load = 0, cur_inst_lp_load = 0,
 		min_load = 0, min_lp_load = 0;
 	u32 min_core_id, min_lp_core_id;
 
@@ -1685,7 +1673,7 @@
 	 * with min load. This ensures that this core is selected and
 	 * video session is set to run on the enabled core.
 	 */
-	if (inst->capability.max_video_cores.max <= VIDC_CORE_ID_1) {
+	if (inst->capability.cap[CAP_MAX_VIDEOCORES].max <= VIDC_CORE_ID_1) {
 		min_core_id = min_lp_core_id = VIDC_CORE_ID_1;
 		min_load = core0_load;
 		min_lp_load = core0_lp_load;
@@ -1694,7 +1682,7 @@
 	current_inst_load = (msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS) *
 		inst->clk_data.entry->vpp_cycles)/inst->clk_data.work_route;
 
-	current_inst_lp_load = (msm_comm_get_inst_load(inst,
+	cur_inst_lp_load = (msm_comm_get_inst_load(inst,
 		LOAD_CALC_NO_QUIRKS) * lp_cycles)/inst->clk_data.work_route;
 
 	dprintk(VIDC_DBG, "Core 0 RT Load = %d Core 1 RT Load = %d\n",
@@ -1703,38 +1691,32 @@
 		core0_lp_load, core1_lp_load);
 	dprintk(VIDC_DBG, "Max Load = %lu\n", max_freq);
 	dprintk(VIDC_DBG, "Current Load = %d Current LP Load = %d\n",
-		current_inst_load, current_inst_lp_load);
+		current_inst_load, cur_inst_lp_load);
 
-	/* Hier mode can be normal HP or Hybrid HP. */
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		/* Hier mode can be normal HP or Hybrid HP. */
+		u32 max_cores, work_mode;
 
-	hier_mode = msm_comm_g_ctrl_for_id(inst,
-		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
-
-	/* Try for preferred core based on settings. */
-	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode &&
-		inst->capability.max_video_cores.max >= VIDC_CORE_ID_3) {
-		if (current_inst_load / 2 + core0_load <= max_freq &&
+		hier_mode = msm_comm_g_ctrl_for_id(inst,
+			V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
+		max_cores = inst->capability.cap[CAP_MAX_VIDEOCORES].max;
+		work_mode = inst->clk_data.work_mode;
+		if (hier_mode && max_cores >= VIDC_CORE_ID_3 &&
+			work_mode == HFI_WORKMODE_2) {
+			if (current_inst_load / 2 + core0_load <= max_freq &&
 			current_inst_load / 2 + core1_load <= max_freq) {
-			if (inst->clk_data.work_mode == HFI_WORKMODE_2) {
 				inst->clk_data.core_id = VIDC_CORE_ID_3;
 				msm_vidc_power_save_mode_enable(inst, false);
 				goto decision_done;
 			}
-		}
-	}
-
-	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode &&
-		inst->capability.max_video_cores.max >= VIDC_CORE_ID_3) {
-		if (current_inst_lp_load / 2 +
-				core0_lp_load <= max_freq &&
-			current_inst_lp_load / 2 +
-				core1_lp_load <= max_freq) {
-			if (inst->clk_data.work_mode == HFI_WORKMODE_2) {
+			if (cur_inst_lp_load / 2 + core0_lp_load <= max_freq &&
+			cur_inst_lp_load / 2 + core1_lp_load <= max_freq) {
 				inst->clk_data.core_id = VIDC_CORE_ID_3;
 				msm_vidc_power_save_mode_enable(inst, true);
 				goto decision_done;
 			}
 		}
+
 	}
 
 	if (current_inst_load + min_load < max_freq) {
@@ -1743,7 +1725,7 @@
 			"Selected normally : Core ID = %d\n",
 				inst->clk_data.core_id);
 		msm_vidc_power_save_mode_enable(inst, false);
-	} else if (current_inst_lp_load + min_load < max_freq) {
+	} else if (cur_inst_lp_load + min_load < max_freq) {
 		/* Move current instance to LP and return */
 		inst->clk_data.core_id = min_core_id;
 		dprintk(VIDC_DBG,
@@ -1751,7 +1733,7 @@
 				inst->clk_data.core_id);
 		msm_vidc_power_save_mode_enable(inst, true);
 
-	} else if (current_inst_lp_load + min_lp_load < max_freq) {
+	} else if (cur_inst_lp_load + min_lp_load < max_freq) {
 		/* Move all instances to LP mode and return */
 		inst->clk_data.core_id = min_lp_core_id;
 		dprintk(VIDC_DBG,
@@ -1786,6 +1768,14 @@
 	return rc;
 }
 
+int msm_vidc_decide_core_and_power_mode_iris2(struct msm_vidc_inst *inst)
+{
+	inst->clk_data.core_id = VIDC_CORE_ID_1;
+	msm_print_core_status(inst->core, VIDC_CORE_ID_1);
+
+	return msm_vidc_power_save_mode_enable(inst, true);
+}
+
 void msm_vidc_init_core_clk_ops(struct msm_vidc_core *core)
 {
 	if (!core)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index 4742d37..3882f5e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -8,8 +8,6 @@
 #include "msm_vidc_internal.h"
 
 void msm_clock_data_reset(struct msm_vidc_inst *inst);
-int msm_vidc_validate_operating_rate(struct msm_vidc_inst *inst,
-	u32 operating_rate);
 int msm_vidc_set_clocks(struct msm_vidc_core *core);
 int msm_comm_vote_bus(struct msm_vidc_core *core);
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
@@ -21,7 +19,8 @@
 int msm_vidc_decide_work_mode_iris1(struct msm_vidc_inst *inst);
 int msm_vidc_decide_work_route_iris2(struct msm_vidc_inst *inst);
 int msm_vidc_decide_work_mode_iris2(struct msm_vidc_inst *inst);
-int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst);
+int msm_vidc_decide_core_and_power_mode_iris1(struct msm_vidc_inst *inst);
+int msm_vidc_decide_core_and_power_mode_iris2(struct msm_vidc_inst *inst);
 void msm_print_core_status(struct msm_vidc_core *core, u32 core_id);
 void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
 	u32 device_addr);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 1de6d23..7b0edfc 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -37,72 +37,12 @@
 #define V4L2_HEVC_LEVEL_UNKNOWN V4L2_MPEG_VIDEO_HEVC_LEVEL_UNKNOWN
 #define V4L2_VP9_LEVEL_61 V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61
 
-int h264_level_v4l2_to_hfi[] = {
-	HFI_H264_LEVEL_1,
-	HFI_H264_LEVEL_1b,
-	HFI_H264_LEVEL_11,
-	HFI_H264_LEVEL_12,
-	HFI_H264_LEVEL_13,
-	HFI_H264_LEVEL_2,
-	HFI_H264_LEVEL_21,
-	HFI_H264_LEVEL_22,
-	HFI_H264_LEVEL_3,
-	HFI_H264_LEVEL_31,
-	HFI_H264_LEVEL_32,
-	HFI_H264_LEVEL_4,
-	HFI_H264_LEVEL_41,
-	HFI_H264_LEVEL_42,
-	HFI_H264_LEVEL_5,
-	HFI_H264_LEVEL_51,
-	HFI_H264_LEVEL_52,
-	HFI_H264_LEVEL_6,
-	HFI_H264_LEVEL_61,
-	HFI_H264_LEVEL_62,
-	HFI_LEVEL_UNKNOWN,
-};
-
-int hevc_level_v4l2_to_hfi[] = {
-	HFI_HEVC_LEVEL_1,
-	HFI_HEVC_LEVEL_2,
-	HFI_HEVC_LEVEL_21,
-	HFI_HEVC_LEVEL_3,
-	HFI_HEVC_LEVEL_31,
-	HFI_HEVC_LEVEL_4,
-	HFI_HEVC_LEVEL_41,
-	HFI_HEVC_LEVEL_5,
-	HFI_HEVC_LEVEL_51,
-	HFI_HEVC_LEVEL_52,
-	HFI_HEVC_LEVEL_6,
-	HFI_HEVC_LEVEL_61,
-	HFI_HEVC_LEVEL_62,
-	HFI_LEVEL_UNKNOWN,
-};
-
-int vp9_level_v4l2_to_hfi[] = {
-	HFI_LEVEL_UNKNOWN,
-	HFI_VP9_LEVEL_1,
-	HFI_VP9_LEVEL_11,
-	HFI_VP9_LEVEL_2,
-	HFI_VP9_LEVEL_21,
-	HFI_VP9_LEVEL_3,
-	HFI_VP9_LEVEL_31,
-	HFI_VP9_LEVEL_4,
-	HFI_VP9_LEVEL_41,
-	HFI_VP9_LEVEL_5,
-	HFI_VP9_LEVEL_51,
-	HFI_VP9_LEVEL_6,
-	HFI_VP9_LEVEL_61,
-};
-
 int msm_comm_g_ctrl_for_id(struct msm_vidc_inst *inst, int id)
 {
-	int rc = 0;
-	struct v4l2_control ctrl = {
-		.id = id,
-	};
+	struct v4l2_ctrl *ctrl;
 
-	rc = msm_comm_g_ctrl(inst, &ctrl);
-	return rc ? rc : ctrl.value;
+	ctrl = get_ctrl(inst, id);
+	return ctrl->val;
 }
 
 static struct v4l2_ctrl **get_super_cluster(struct msm_vidc_inst *inst,
@@ -329,6 +269,138 @@
 	return -EINVAL;
 }
 
+static int h264_level_v4l2_to_hfi(int value)
+{
+	switch (value) {
+	case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+		return HFI_H264_LEVEL_1;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+		return HFI_H264_LEVEL_1b;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+		return HFI_H264_LEVEL_11;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+		return HFI_H264_LEVEL_12;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+		return HFI_H264_LEVEL_13;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+		return HFI_H264_LEVEL_2;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+		return HFI_H264_LEVEL_21;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+		return HFI_H264_LEVEL_22;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+		return HFI_H264_LEVEL_3;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+		return HFI_H264_LEVEL_31;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+		return HFI_H264_LEVEL_32;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+		return HFI_H264_LEVEL_4;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+		return HFI_H264_LEVEL_41;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+		return HFI_H264_LEVEL_42;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+		return HFI_H264_LEVEL_5;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+		return HFI_H264_LEVEL_51;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_5_2:
+		return HFI_H264_LEVEL_52;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_6_0:
+		return HFI_H264_LEVEL_6;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_6_1:
+		return HFI_H264_LEVEL_61;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_6_2:
+		return HFI_H264_LEVEL_62;
+	case V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN:
+		return HFI_LEVEL_UNKNOWN;
+	default:
+		goto unknown_value;
+	}
+
+unknown_value:
+	dprintk(VIDC_WARN, "Unknown level (%d)\n", value);
+	return -EINVAL;
+}
+
+static int hevc_level_v4l2_to_hfi(int value)
+{
+	switch (value) {
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+		return HFI_HEVC_LEVEL_1;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+		return HFI_HEVC_LEVEL_2;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+		return HFI_HEVC_LEVEL_21;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+		return HFI_HEVC_LEVEL_3;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+		return HFI_HEVC_LEVEL_31;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+		return HFI_HEVC_LEVEL_4;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+		return HFI_HEVC_LEVEL_41;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+		return HFI_HEVC_LEVEL_5;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+		return HFI_HEVC_LEVEL_51;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+		return HFI_HEVC_LEVEL_52;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
+		return HFI_HEVC_LEVEL_6;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
+		return HFI_HEVC_LEVEL_61;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
+		return HFI_HEVC_LEVEL_62;
+	case V4L2_MPEG_VIDEO_HEVC_LEVEL_UNKNOWN:
+		return HFI_LEVEL_UNKNOWN;
+	default:
+		goto unknown_value;
+	}
+
+unknown_value:
+	dprintk(VIDC_WARN, "Unknown level (%d)\n", value);
+	return -EINVAL;
+}
+
+static int vp9_level_v4l2_to_hfi(value)
+{
+	switch (value) {
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_1:
+		return HFI_VP9_LEVEL_1;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_11:
+		return HFI_VP9_LEVEL_11;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_2:
+		return HFI_VP9_LEVEL_2;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_21:
+		return HFI_VP9_LEVEL_21;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_3:
+		return HFI_VP9_LEVEL_3;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_31:
+		return HFI_VP9_LEVEL_31;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_4:
+		return HFI_VP9_LEVEL_4;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_41:
+		return HFI_VP9_LEVEL_41;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_5:
+		return HFI_VP9_LEVEL_5;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51:
+		return HFI_VP9_LEVEL_51;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_6:
+		return HFI_VP9_LEVEL_6;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61:
+		return HFI_VP9_LEVEL_61;
+	case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_UNUSED:
+		return HFI_LEVEL_UNKNOWN;
+	default:
+		goto unknown_value;
+	}
+
+unknown_value:
+	dprintk(VIDC_WARN, "Unknown level (%d)\n", value);
+	return -EINVAL;
+
+}
 int msm_comm_v4l2_to_hfi(int id, int value)
 {
 	switch (id) {
@@ -353,11 +425,7 @@
 			return HFI_H264_PROFILE_HIGH;
 		}
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
-		if (value >= 0 && value <= V4L2_H264_LEVEL_UNKNOWN) {
-			return h264_level_v4l2_to_hfi[value];
-		} else {
-			return h264_level_v4l2_to_hfi[V4L2_H264_LEVEL_UNKNOWN];
-		}
+		return h264_level_v4l2_to_hfi(value);
 	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
 		switch (value) {
 		case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC:
@@ -399,11 +467,7 @@
 			return HFI_VP9_PROFILE_P0;
 		}
 	case V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL:
-		if (value >= 0 && value <= V4L2_VP9_LEVEL_61) {
-			return vp9_level_v4l2_to_hfi[value];
-		} else {
-			return vp9_level_v4l2_to_hfi[V4L2_VP9_LEVEL_61];
-		}
+		return vp9_level_v4l2_to_hfi(value);
 	case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
 		switch (value) {
 		case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
@@ -416,11 +480,7 @@
 			return HFI_HEVC_PROFILE_MAIN;
 		}
 	case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
-		if (value >= 0 && value <= V4L2_HEVC_LEVEL_UNKNOWN) {
-			return hevc_level_v4l2_to_hfi[value];
-		} else {
-			return hevc_level_v4l2_to_hfi[V4L2_HEVC_LEVEL_UNKNOWN];
-		}
+		return hevc_level_v4l2_to_hfi(value);
 	case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
 		switch (value) {
 		case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
@@ -998,20 +1058,69 @@
 	return NULL;
 }
 
+static void update_capability(struct msm_vidc_codec_capability *in,
+		struct msm_vidc_capability *capability)
+{
+	if (!in || !capability) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return;
+	}
+
+	if (in->capability_type < CAP_MAX) {
+		capability->cap[in->capability_type].capability_type =
+				in->capability_type;
+		capability->cap[in->capability_type].min = in->min;
+		capability->cap[in->capability_type].max = in->max;
+		capability->cap[in->capability_type].step_size = in->step_size;
+		capability->cap[in->capability_type].default_value =
+				in->default_value;
+	} else {
+		dprintk(VIDC_ERR, "%s: invalid capability_type %d\n",
+			__func__, in->capability_type);
+	}
+}
+
+static int msm_vidc_capabilities(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	struct msm_vidc_codec_capability *platform_caps;
+	int i, j, num_platform_caps;
+
+	if (!core || !core->capabilities) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	platform_caps = core->resources.codec_caps;
+	num_platform_caps = core->resources.codec_caps_count;
+
+	dprintk(VIDC_DBG, "%s: num caps %d\n", __func__, num_platform_caps);
+	/* loop over each platform capability */
+	for (i = 0; i < num_platform_caps; i++) {
+		/* select matching core codec and update it */
+		for (j = 0; j < core->resources.codecs_count; j++) {
+			if ((platform_caps[i].domains &
+				core->capabilities[j].domain) &&
+				(platform_caps[i].codecs &
+				core->capabilities[j].codec)) {
+				/* update core capability */
+				update_capability(&platform_caps[i],
+					&core->capabilities[j]);
+			}
+		}
+	}
+
+	return rc;
+}
+
 static void handle_sys_init_done(enum hal_command_response cmd, void *data)
 {
 	struct msm_vidc_cb_cmd_done *response = data;
 	struct msm_vidc_core *core;
-	struct vidc_hal_sys_init_done *sys_init_msg;
-	u32 index;
 
 	if (!IS_HAL_SYS_CMD(cmd)) {
 		dprintk(VIDC_ERR, "%s - invalid cmd\n", __func__);
 		return;
 	}
-
-	index = SYS_MSG_INDEX(cmd);
-
 	if (!response) {
 		dprintk(VIDC_ERR,
 			"Failed to get valid response for sys init\n");
@@ -1022,41 +1131,8 @@
 		dprintk(VIDC_ERR, "Wrong device_id received\n");
 		return;
 	}
-	sys_init_msg = &response->data.sys_init_done;
-	if (!sys_init_msg) {
-		dprintk(VIDC_ERR, "sys_init_done message not proper\n");
-		return;
-	}
-
-	core->enc_codec_supported = sys_init_msg->enc_codec_supported;
-	core->dec_codec_supported = sys_init_msg->dec_codec_supported;
-
-	/* This should come from sys_init_done */
-	core->resources.max_inst_count =
-		sys_init_msg->max_sessions_supported ?
-		min_t(u32, sys_init_msg->max_sessions_supported,
-		MAX_SUPPORTED_INSTANCES) : MAX_SUPPORTED_INSTANCES;
-
-	core->resources.max_secure_inst_count =
-		core->resources.max_secure_inst_count ?
-		core->resources.max_secure_inst_count :
-		core->resources.max_inst_count;
-
-	if (core->id == MSM_VIDC_CORE_VENUS &&
-		(core->dec_codec_supported & HAL_VIDEO_CODEC_H264))
-		core->dec_codec_supported |=
-			HAL_VIDEO_CODEC_MVC;
-
-	core->codec_count = sys_init_msg->codec_count;
-	memcpy(core->capabilities, sys_init_msg->capabilities,
-		sys_init_msg->codec_count * sizeof(struct msm_vidc_capability));
-
-	dprintk(VIDC_DBG,
-		"%s: supported_codecs[%d]: enc = %#x, dec = %#x\n",
-		__func__, core->codec_count, core->enc_codec_supported,
-		core->dec_codec_supported);
-
-	complete(&(core->completions[index]));
+	dprintk(VIDC_DBG, "%s: core %pK\n", __func__, core);
+	complete(&(core->completions[SYS_MSG_INDEX(cmd)]));
 }
 
 static void put_inst_helper(struct kref *kref)
@@ -1300,33 +1376,51 @@
 		struct hal_capability_supported *cap)
 {
 	dprintk(VIDC_DBG,
-		"%-24s: %-8d %-8d %-8d\n",
-		type, cap->min, cap->max, cap->step_size);
+		"%-24s: %-10d %-10d %-10d %-10d\n",
+		type, cap->min, cap->max, cap->step_size, cap->default_value);
 }
 
 static int msm_vidc_comm_update_ctrl(struct msm_vidc_inst *inst,
-	u32 id, struct hal_capability_supported *capability)
+	u32 id, struct hal_capability_supported *cap)
 {
 	struct v4l2_ctrl *ctrl = NULL;
 	int rc = 0;
 
 	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, id);
-	if (ctrl) {
-		v4l2_ctrl_modify_range(ctrl, capability->min,
-				capability->max, ctrl->step,
-				ctrl->default_value);
-		dprintk(VIDC_DBG,
-			"%s: Updated Range = %lld --> %lld Def value = %lld\n",
-			ctrl->name, ctrl->minimum, ctrl->maximum,
-			ctrl->default_value);
-	} else {
+	if (!ctrl) {
 		dprintk(VIDC_ERR,
-			"Failed to find Conrol %d\n", id);
-		rc = -EINVAL;
+			"%s: Conrol id %d not found\n", __func__, id);
+		return -EINVAL;
 	}
 
-	return rc;
+	rc = v4l2_ctrl_modify_range(ctrl, cap->min, cap->max,
+			cap->step_size, cap->default_value);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s: failed: control name %s, min %d, max %d, step %d, default_value %d\n",
+			__func__, ctrl->name, cap->min, cap->max,
+			cap->step_size, cap->default_value);
+		goto error;
 	}
+	/*
+	 * v4l2_ctrl_modify_range() is not updating default_value,
+	 * so use v4l2_ctrl_s_ctrl() to update it.
+	 */
+	rc = v4l2_ctrl_s_ctrl(ctrl, cap->default_value);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s: failed s_ctrl: %s with value %d\n",
+			__func__, ctrl->name, cap->default_value);
+		goto error;
+	}
+	dprintk(VIDC_DBG,
+		"Updated control: %s: min %lld, max %lld, step %lld, default value = %lld\n",
+		ctrl->name, ctrl->minimum, ctrl->maximum,
+		ctrl->step, ctrl->default_value);
+
+error:
+	return rc;
+}
 
 static void msm_vidc_comm_update_ctrl_limits(struct msm_vidc_inst *inst)
 {
@@ -1335,52 +1429,19 @@
 			HAL_VIDEO_CODEC_TME)
 			return;
 		msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE,
-				&inst->capability.bitrate);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR,
-				&inst->capability.bitrate);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR,
-				&inst->capability.bitrate);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR,
-				&inst->capability.bitrate);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR,
-				&inst->capability.bitrate);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR,
-				&inst->capability.bitrate);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR,
-				&inst->capability.bitrate);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
-				&inst->capability.i_qp);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP,
-				&inst->capability.p_qp);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP,
-				&inst->capability.b_qp);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
-				&inst->capability.i_qp);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP,
-				&inst->capability.i_qp);
+				&inst->capability.cap[CAP_BITRATE]);
 		msm_vidc_comm_update_ctrl(inst,
 				V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
-				&inst->capability.slice_bytes);
+				&inst->capability.cap[CAP_SLICE_BYTE]);
 		msm_vidc_comm_update_ctrl(inst,
 				V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
-				&inst->capability.slice_mbs);
+				&inst->capability.cap[CAP_SLICE_MB]);
 		msm_vidc_comm_update_ctrl(inst,
 				V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT,
-				&inst->capability.ltr_count);
+				&inst->capability.cap[CAP_LTR_COUNT]);
 		msm_vidc_comm_update_ctrl(inst,
 				V4L2_CID_MPEG_VIDEO_B_FRAMES,
-				&inst->capability.bframe);
+				&inst->capability.cap[CAP_BFRAME]);
 	}
 }
 
@@ -1389,9 +1450,7 @@
 	struct msm_vidc_cb_cmd_done *response = data;
 	struct msm_vidc_inst *inst = NULL;
 	struct msm_vidc_capability *capability = NULL;
-	struct hfi_device *hdev;
 	struct msm_vidc_core *core;
-	struct hal_profile_level *profile_level;
 	u32 i, codec;
 
 	if (!response) {
@@ -1412,14 +1471,7 @@
 		dprintk(VIDC_ERR,
 			"Session init response from FW : %#x\n",
 			response->status);
-		if (response->status == VIDC_ERR_MAX_CLIENTS)
-			msm_comm_generate_max_clients_error(inst);
-		else
-			msm_comm_generate_session_error(inst);
-
-		signal_session_msg_receipt(cmd, inst);
-		put_inst(inst);
-		return;
+		goto error;
 	}
 
 	if (inst->session_type == MSM_VIDC_CVP) {
@@ -1431,13 +1483,11 @@
 	}
 
 	core = inst->core;
-	hdev = inst->core->device;
 	codec = inst->session_type == MSM_VIDC_DECODER ?
 			inst->fmts[OUTPUT_PORT].fourcc :
 			inst->fmts[CAPTURE_PORT].fourcc;
 
-	/* check if capabilities are available for this session */
-	for (i = 0; i < VIDC_MAX_SESSIONS; i++) {
+	for (i = 0; i < core->resources.codecs_count; i++) {
 		if (core->capabilities[i].codec ==
 				get_hal_codec(codec) &&
 			core->capabilities[i].domain ==
@@ -1446,77 +1496,54 @@
 			break;
 		}
 	}
-
-	if (capability) {
-		dprintk(VIDC_DBG,
-			"%s: capabilities for codec 0x%x, domain %#x\n",
-			__func__, capability->codec, capability->domain);
-		memcpy(&inst->capability, capability,
-			sizeof(struct msm_vidc_capability));
-	} else {
+	if (!capability) {
 		dprintk(VIDC_ERR,
-			"Watch out : Some property may fail inst %pK\n", inst);
-		dprintk(VIDC_ERR,
-			"Caps N/A for codec 0x%x, domain %#x\n",
-			inst->capability.codec, inst->capability.domain);
+			"%s: capabilities not found for domain %#x codec %#x\n",
+			__func__, get_hal_domain(inst->session_type),
+			get_hal_codec(codec));
+		goto error;
 	}
-	inst->capability.pixelprocess_capabilities =
-		call_hfi_op(hdev, get_core_capabilities, hdev->hfi_device_data);
 
 	dprintk(VIDC_DBG,
-		"Capability type : min      max      step size\n");
-	print_cap("width", &inst->capability.width);
-	print_cap("height", &inst->capability.height);
-	print_cap("mbs_per_frame", &inst->capability.mbs_per_frame);
-	print_cap("mbs_per_sec", &inst->capability.mbs_per_sec);
-	print_cap("frame_rate", &inst->capability.frame_rate);
-	print_cap("bitrate", &inst->capability.bitrate);
-	print_cap("scale_x", &inst->capability.scale_x);
-	print_cap("scale_y", &inst->capability.scale_y);
-	print_cap("hier_p", &inst->capability.hier_p);
-	print_cap("ltr_count", &inst->capability.ltr_count);
-	print_cap("bframe", &inst->capability.bframe);
-	print_cap("secure_output2_threshold",
-		&inst->capability.secure_output2_threshold);
-	print_cap("hier_b", &inst->capability.hier_b);
-	print_cap("lcu_size", &inst->capability.lcu_size);
-	print_cap("hier_p_hybrid", &inst->capability.hier_p_hybrid);
+		"%s: capabilities for domain %#x codec %#x\n",
+		__func__, capability->domain, capability->codec);
+	memcpy(&inst->capability, capability,
+			sizeof(struct msm_vidc_capability));
+
+	dprintk(VIDC_DBG,
+		"Capability type :         min        max        step_size  default_value\n");
+	print_cap("width", &inst->capability.cap[CAP_FRAME_WIDTH]);
+	print_cap("height", &inst->capability.cap[CAP_FRAME_HEIGHT]);
+	print_cap("mbs_per_frame", &inst->capability.cap[CAP_MBS_PER_FRAME]);
+	print_cap("mbs_per_sec", &inst->capability.cap[CAP_MBS_PER_SECOND]);
+	print_cap("frame_rate", &inst->capability.cap[CAP_FRAMERATE]);
+	print_cap("bitrate", &inst->capability.cap[CAP_BITRATE]);
+	print_cap("scale_x", &inst->capability.cap[CAP_SCALE_X]);
+	print_cap("scale_y", &inst->capability.cap[CAP_SCALE_Y]);
+	print_cap("hier_p", &inst->capability.cap[CAP_HIER_P_NUM_ENH_LAYERS]);
+	print_cap("ltr_count", &inst->capability.cap[CAP_LTR_COUNT]);
+	print_cap("bframe", &inst->capability.cap[CAP_BFRAME]);
 	print_cap("mbs_per_sec_low_power",
-		&inst->capability.mbs_per_sec_power_save);
-	print_cap("extradata", &inst->capability.extradata);
-	print_cap("profile", &inst->capability.profile);
-	print_cap("level", &inst->capability.level);
-	print_cap("i_qp", &inst->capability.i_qp);
-	print_cap("p_qp", &inst->capability.p_qp);
-	print_cap("b_qp", &inst->capability.b_qp);
-	print_cap("rc_modes", &inst->capability.rc_modes);
-	print_cap("blur_width", &inst->capability.blur_width);
-	print_cap("blur_height", &inst->capability.blur_height);
-	print_cap("slice_bytes", &inst->capability.slice_bytes);
-	print_cap("slice_mbs", &inst->capability.slice_mbs);
-	print_cap("secure", &inst->capability.secure);
-	print_cap("max_num_b_frames", &inst->capability.max_num_b_frames);
-	print_cap("max_video_cores", &inst->capability.max_video_cores);
-	print_cap("max_work_modes", &inst->capability.max_work_modes);
-	print_cap("ubwc_cr_stats", &inst->capability.ubwc_cr_stats);
-
-	dprintk(VIDC_DBG, "profile count : %u\n",
-		inst->capability.profile_level.profile_count);
-	for (i = 0; i < inst->capability.profile_level.profile_count; i++) {
-		profile_level =
-			&inst->capability.profile_level.profile_level[i];
-		dprintk(VIDC_DBG, "profile : %u\n", profile_level->profile);
-		dprintk(VIDC_DBG, "level   : %u\n", profile_level->level);
-	}
-
-	signal_session_msg_receipt(cmd, inst);
-
-	/*
-	 * Update controls after informing session_init_done to avoid
-	 * timeouts.
-	 */
+		&inst->capability.cap[CAP_MBS_PER_SECOND_POWER_SAVE]);
+	print_cap("i_qp", &inst->capability.cap[CAP_I_FRAME_QP]);
+	print_cap("p_qp", &inst->capability.cap[CAP_P_FRAME_QP]);
+	print_cap("b_qp", &inst->capability.cap[CAP_B_FRAME_QP]);
+	print_cap("slice_bytes", &inst->capability.cap[CAP_SLICE_BYTE]);
+	print_cap("slice_mbs", &inst->capability.cap[CAP_SLICE_MB]);
 
 	msm_vidc_comm_update_ctrl_limits(inst);
+
+	signal_session_msg_receipt(cmd, inst);
+	put_inst(inst);
+	return;
+
+error:
+	if (response->status == VIDC_ERR_MAX_CLIENTS)
+		msm_comm_generate_max_clients_error(inst);
+	else
+		msm_comm_generate_session_error(inst);
+
+	signal_session_msg_receipt(cmd, inst);
 	put_inst(inst);
 }
 
@@ -2546,6 +2573,10 @@
 		msm_comm_store_mark_data(&inst->fbd_data, vb->index,
 			fill_buf_done->mark_data, fill_buf_done->mark_target);
 	}
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		msm_comm_store_filled_length(&inst->fbd_data, vb->index,
+			fill_buf_done->filled_len1);
+	}
 
 	extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
 	if (extra_idx && extra_idx < VIDEO_MAX_PLANES)
@@ -2893,7 +2924,7 @@
 
 static int msm_comm_init_core(struct msm_vidc_inst *inst)
 {
-	int rc = 0;
+	int rc, i;
 	struct hfi_device *hdev;
 	struct msm_vidc_core *core;
 
@@ -2908,21 +2939,6 @@
 				core->id, core->state);
 		goto core_already_inited;
 	}
-	if (!core->capabilities) {
-		core->capabilities = kcalloc(VIDC_MAX_SESSIONS,
-				sizeof(struct msm_vidc_capability), GFP_KERNEL);
-		if (!core->capabilities) {
-			dprintk(VIDC_ERR,
-				"%s: failed to allocate capabilities\n",
-				__func__);
-			rc = -ENOMEM;
-			goto fail_cap_alloc;
-		}
-	} else {
-		dprintk(VIDC_WARN,
-			"%s: capabilities memory is expected to be freed\n",
-			__func__);
-	}
 	dprintk(VIDC_DBG, "%s: core %pK\n", __func__, core);
 	rc = call_hfi_op(hdev, core_init, hdev->hfi_device_data);
 	if (rc) {
@@ -2930,10 +2946,54 @@
 				core->id);
 		goto fail_core_init;
 	}
+
+	/* initialize core while firmware processing SYS_INIT cmd */
 	core->state = VIDC_CORE_INIT;
 	core->smmu_fault_handled = false;
 	core->trigger_ssr = false;
-
+	core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
+	core->resources.max_secure_inst_count =
+		core->resources.max_secure_inst_count ?
+		core->resources.max_secure_inst_count :
+		core->resources.max_inst_count;
+	dprintk(VIDC_DBG, "%s: codecs count %d, max inst count %d\n",
+		__func__, core->resources.codecs_count,
+		core->resources.max_inst_count);
+	if (!core->resources.codecs || !core->resources.codecs_count) {
+		dprintk(VIDC_ERR, "%s: invalid codecs\n", __func__);
+		rc = -EINVAL;
+		goto fail_core_init;
+	}
+	if (!core->capabilities) {
+		core->capabilities = kcalloc(core->resources.codecs_count,
+			sizeof(struct msm_vidc_capability), GFP_KERNEL);
+		if (!core->capabilities) {
+			dprintk(VIDC_ERR,
+				"%s: failed to allocate capabilities\n",
+				__func__);
+			rc = -ENOMEM;
+			goto fail_core_init;
+		}
+	} else {
+		dprintk(VIDC_WARN,
+			"%s: capabilities memory is expected to be freed\n",
+			__func__);
+	}
+	for (i = 0; i < core->resources.codecs_count; i++) {
+		core->capabilities[i].domain =
+				core->resources.codecs[i].domain;
+		core->capabilities[i].codec =
+				core->resources.codecs[i].codec;
+	}
+	rc = msm_vidc_capabilities(core);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			 "%s: default capabilities failed\n", __func__);
+		kfree(core->capabilities);
+		core->capabilities = NULL;
+		goto fail_core_init;
+	}
+	dprintk(VIDC_DBG, "%s: done\n", __func__);
 core_already_inited:
 	change_inst_state(inst, MSM_VIDC_CORE_INIT);
 	mutex_unlock(&core->lock);
@@ -2942,9 +3002,6 @@
 	return rc;
 
 fail_core_init:
-	kfree(core->capabilities);
-fail_cap_alloc:
-	core->capabilities = NULL;
 	core->state = VIDC_CORE_UNINIT;
 	mutex_unlock(&core->lock);
 	return rc;
@@ -3157,7 +3214,7 @@
 		msm_comm_get_load(core, MSM_VIDC_ENCODER, quirks);
 
 	max_load_adj = core->resources.max_load +
-		inst->capability.mbs_per_frame.max;
+		inst->capability.cap[CAP_MBS_PER_FRAME].max;
 
 	if (num_mbs_per_sec > max_load_adj) {
 		dprintk(VIDC_ERR, "HW is overloaded, needed: %d max: %d\n",
@@ -3494,7 +3551,7 @@
 	int rc = 0;
 	struct internal_buf *binfo = NULL;
 	u32 smem_flags = SMEM_UNCACHED, buffer_size, num_buffers, hfi_fmt;
-	struct hal_buffer_requirements *output_buf, *extradata_buf;
+	struct hal_buffer_requirements *output_buf;
 	unsigned int i;
 	struct hfi_device *hdev;
 	struct hfi_buffer_size_minimum b;
@@ -3509,6 +3566,17 @@
 		return 0;
 	}
 
+	/* Set DPB buffer count to firmware */
+	rc = msm_comm_set_buffer_count(inst,
+			output_buf->buffer_count_min,
+			output_buf->buffer_count_min,
+			HAL_BUFFER_OUTPUT);
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: failed to set bufreqs(%#x)\n",
+			__func__, buffer_type);
+		return -EINVAL;
+	}
+
 	/* For DPB buffers, Always use FW count */
 	num_buffers = output_buf->buffer_count_min;
 	hfi_fmt = msm_comm_convert_color_fmt(inst->clk_data.dpb_fourcc);
@@ -3528,12 +3596,15 @@
 		inst->session, HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM,
 		&b, sizeof(b));
 
-	extradata_buf = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
-	if (extradata_buf) {
+	if (inst->bufq[CAPTURE_PORT].num_planes == 1 ||
+		!inst->bufq[CAPTURE_PORT].plane_sizes[1]) {
 		dprintk(VIDC_DBG,
-			"extradata: num = %d, size = %d\n",
-			extradata_buf->buffer_count_actual,
-			extradata_buf->buffer_size);
+			"This extradata buffer not required, buffer_type: %x\n",
+			buffer_type);
+	} else {
+		dprintk(VIDC_DBG,
+			"extradata: num = 1, size = %d\n",
+			inst->bufq[CAPTURE_PORT].plane_sizes[1]);
 		inst->dpb_extra_binfo = NULL;
 		inst->dpb_extra_binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
 		if (!inst->dpb_extra_binfo) {
@@ -3542,17 +3613,13 @@
 			goto fail_kzalloc;
 		}
 		rc = msm_comm_smem_alloc(inst,
-			extradata_buf->buffer_size, 1, smem_flags,
+			inst->bufq[CAPTURE_PORT].plane_sizes[1], 1, smem_flags,
 			buffer_type, 0, &inst->dpb_extra_binfo->smem);
 		if (rc) {
 			dprintk(VIDC_ERR,
 				"Failed to allocate output memory\n");
 			goto err_no_mem;
 		}
-	} else {
-		dprintk(VIDC_DBG,
-			"This extradata buffer not required, buffer_type: %x\n",
-			buffer_type);
 	}
 
 	if (inst->flags & VIDC_SECURE)
@@ -4443,7 +4510,7 @@
 	}
 
 	dprintk(VIDC_DBG, "Buffer requirements :\n");
-	dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
+	dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s %8s\n",
 		"buffer type", "count", "mincount_host", "mincount_fw", "size",
 		"alignment");
 	for (i = 0; i < HAL_BUFFER_MAX; i++) {
@@ -5250,10 +5317,10 @@
 		return -ENOTSUPP;
 	}
 
-	if (!inst->capability.scale_x.min ||
-		!inst->capability.scale_x.max ||
-		!inst->capability.scale_y.min ||
-		!inst->capability.scale_y.max) {
+	if (!inst->capability.cap[CAP_SCALE_X].min ||
+		!inst->capability.cap[CAP_SCALE_X].max ||
+		!inst->capability.cap[CAP_SCALE_Y].min ||
+		!inst->capability.cap[CAP_SCALE_Y].max) {
 
 		if (input_width * input_height !=
 			output_width * output_height) {
@@ -5269,10 +5336,10 @@
 		return 0;
 	}
 
-	x_min = (1<<16)/inst->capability.scale_x.min;
-	y_min = (1<<16)/inst->capability.scale_y.min;
-	x_max = inst->capability.scale_x.max >> 16;
-	y_max = inst->capability.scale_y.max >> 16;
+	x_min = (1<<16)/inst->capability.cap[CAP_SCALE_X].min;
+	y_min = (1<<16)/inst->capability.cap[CAP_SCALE_Y].min;
+	x_max = inst->capability.cap[CAP_SCALE_X].max >> 16;
+	y_max = inst->capability.cap[CAP_SCALE_Y].max >> 16;
 
 	if (input_height > output_height) {
 		if (input_height > x_min * output_height) {
@@ -5314,6 +5381,8 @@
 	struct hfi_device *hdev;
 	struct msm_vidc_core *core;
 	u32 output_height, output_width, input_height, input_width;
+	u32 width_min, width_max, height_min, height_max;
+	u32 mbpf_max;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
@@ -5335,6 +5404,20 @@
 		return -ENOTSUPP;
 	}
 
+	if (is_secure_session(inst)) {
+		width_min = capability->cap[CAP_SECURE_FRAME_WIDTH].min;
+		width_max = capability->cap[CAP_SECURE_FRAME_WIDTH].max;
+		height_min = capability->cap[CAP_SECURE_FRAME_HEIGHT].min;
+		height_max = capability->cap[CAP_SECURE_FRAME_HEIGHT].max;
+		mbpf_max = capability->cap[CAP_SECURE_MBS_PER_FRAME].max;
+	} else {
+		width_min = capability->cap[CAP_FRAME_WIDTH].min;
+		width_max = capability->cap[CAP_FRAME_WIDTH].max;
+		height_min = capability->cap[CAP_FRAME_HEIGHT].min;
+		height_max = capability->cap[CAP_FRAME_HEIGHT].max;
+		mbpf_max = capability->cap[CAP_MBS_PER_FRAME].max;
+	}
+
 	output_height = inst->prop.height[CAPTURE_PORT];
 	output_width = inst->prop.width[CAPTURE_PORT];
 	input_height = inst->prop.height[OUTPUT_PORT];
@@ -5356,30 +5439,34 @@
 	output_width = ALIGN(inst->prop.width[CAPTURE_PORT], 16);
 
 	if (!rc) {
-		if (output_width < capability->width.min ||
-			output_height < capability->height.min) {
+		if (output_width < width_min ||
+			output_height < height_min) {
 			dprintk(VIDC_ERR,
 				"Unsupported WxH = (%u)x(%u), min supported is - (%u)x(%u)\n",
-				output_width,
-				output_height,
-				capability->width.min,
-				capability->height.min);
+				output_width, output_height,
+				width_min, height_min);
 			rc = -ENOTSUPP;
 		}
-		if (!rc && output_width > capability->width.max) {
+		if (!rc && output_width > width_max) {
 			dprintk(VIDC_ERR,
 				"Unsupported width = %u supported max width = %u\n",
-				output_width,
-				capability->width.max);
+				output_width, width_max);
 				rc = -ENOTSUPP;
 		}
 
 		if (!rc && output_height * output_width >
-			capability->width.max * capability->height.max) {
+			width_max * height_max) {
 			dprintk(VIDC_ERR,
 			"Unsupported WxH = (%u)x(%u), max supported is - (%u)x(%u)\n",
 			output_width, output_height,
-			capability->width.max, capability->height.max);
+			width_max, height_max);
+			rc = -ENOTSUPP;
+		}
+		if (!rc && NUM_MBS_PER_FRAME(input_width, input_height) >
+			mbpf_max) {
+			dprintk(VIDC_ERR, "Unsupported mbpf %d, max %d\n",
+				NUM_MBS_PER_FRAME(input_width, input_height),
+				mbpf_max);
 			rc = -ENOTSUPP;
 		}
 	}
@@ -5991,9 +6078,14 @@
 			} else if (vb->type ==
 					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 				if (!i) { /* bitstream */
+					u32 size_u32;
 					skip = false;
 					offset = 0;
-					size = vb->planes[i].length;
+					size_u32 = vb->planes[i].length;
+					msm_comm_fetch_filled_length(
+						&inst->fbd_data, vb->index,
+						&size_u32);
+					size = size_u32;
 					cache_op = SMEM_CACHE_INVALIDATE;
 				}
 			}
@@ -6455,6 +6547,63 @@
 	return ret;
 }
 
+void msm_comm_store_filled_length(struct msm_vidc_list *data_list,
+		u32 index, u32 filled_length)
+{
+	struct msm_vidc_buf_data *pdata = NULL;
+	bool found = false;
+
+	if (!data_list) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK\n",
+			__func__, data_list);
+		return;
+	}
+
+	mutex_lock(&data_list->lock);
+	list_for_each_entry(pdata, &data_list->list, list) {
+		if (pdata->index == index) {
+			pdata->filled_length = filled_length;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+		if (!pdata)  {
+			dprintk(VIDC_WARN, "%s: malloc failure.\n", __func__);
+			goto exit;
+		}
+		pdata->index = index;
+		pdata->filled_length = filled_length;
+		list_add_tail(&pdata->list, &data_list->list);
+	}
+
+exit:
+	mutex_unlock(&data_list->lock);
+}
+
+void msm_comm_fetch_filled_length(struct msm_vidc_list *data_list,
+		u32 index, u32 *filled_length)
+{
+	struct msm_vidc_buf_data *pdata = NULL;
+
+	if (!data_list || !filled_length) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+			__func__, data_list, filled_length);
+		return;
+	}
+
+	mutex_lock(&data_list->lock);
+	list_for_each_entry(pdata, &data_list->list, list) {
+		if (pdata->index == index) {
+			*filled_length = pdata->filled_length;
+			break;
+		}
+	}
+	mutex_unlock(&data_list->lock);
+}
+
 void msm_comm_store_mark_data(struct msm_vidc_list *data_list,
 		u32 index, u32 mark_data, u32 mark_target)
 {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index f79a6b7..6b0e882 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -80,6 +80,11 @@
 	return !!(inst->flags & VIDC_REALTIME);
 }
 
+static inline bool is_secure_session(struct msm_vidc_inst *inst)
+{
+	return !!(inst->flags & VIDC_SECURE);
+}
+
 static inline bool is_decode_session(struct msm_vidc_inst *inst)
 {
 	return inst->session_type == MSM_VIDC_DECODER;
@@ -252,6 +257,10 @@
 		struct v4l2_buffer *v4l2);
 void kref_put_mbuf(struct msm_vidc_buffer *mbuf);
 bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
+void msm_comm_store_filled_length(struct msm_vidc_list *data_list,
+		u32 index, u32 filled_length);
+void msm_comm_fetch_filled_length(struct msm_vidc_list *data_list,
+		u32 index, u32 *filled_length);
 void msm_comm_store_mark_data(struct msm_vidc_list *data_list,
 		u32 index, u32 mark_data, u32 mark_target);
 void msm_comm_fetch_mark_data(struct msm_vidc_list *data_list,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 8bf2272..51e382ac 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -7,6 +7,7 @@
 #define MAX_SSR_STRING_LEN 10
 #include "msm_vidc_debug.h"
 #include "vidc_hfi_api.h"
+#include <linux/of_fdt.h>
 
 int msm_vidc_debug = VIDC_ERR | VIDC_WARN;
 EXPORT_SYMBOL(msm_vidc_debug);
@@ -87,6 +88,8 @@
 	cur += write_str(cur, end - cur,
 		"register_size: %u\n", fw_info.register_size);
 	cur += write_str(cur, end - cur, "irq: %u\n", fw_info.irq);
+	cur += write_str(cur, end - cur,
+		"ddr_type: %d\n", of_fdt_get_ddrtype());
 
 err_fw_info:
 	for (i = SYS_MSG_START; i < SYS_MSG_END; i++) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
index f158c35..f144070 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
@@ -3,28 +3,21 @@
  * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
-
-#include <linux/module.h>
-#include "governor.h"
+#include "msm_vidc_debug.h"
 #include "fixedpoint.h"
 #include "msm_vidc_internal.h"
-#include "msm_vidc_debug.h"
 #include "vidc_hfi_api.h"
 #define COMPRESSION_RATIO_MAX 5
 
-enum governor_mode {
-	GOVERNOR_DDR,
-	GOVERNOR_LLCC,
-};
-
-struct governor {
-	enum governor_mode mode;
-	struct devfreq_governor devfreq_gov;
+enum vidc_bus_type {
+	PERF,
+	DDR,
+	LLCC,
 };
 
 /*
- * Minimum dimensions that the governor is willing to calculate
- * bandwidth for.  This means that anything bandwidth(0, 0) ==
+ * Minimum dimensions for which to calculate bandwidth.
+ * This means that anything bandwidth(0, 0) ==
  * bandwidth(BASELINE_DIMENSIONS.width, BASELINE_DIMENSIONS.height)
  */
 static const struct {
@@ -34,15 +27,6 @@
 	.height = 720,
 };
 
-/*
- * These are hardcoded AB values that the governor votes for in certain
- * situations, where a certain bus frequency is desired.  It isn't exactly
- * scalable since different platforms have different bus widths, but we'll
- * deal with that in the future.
- */
-const unsigned long NOMINAL_BW_MBPS = 6000 /* ideally 320 Mhz */,
-	SVS_BW_MBPS = 2000 /* ideally 100 Mhz */;
-
 /* converts Mbps to bps (the "b" part can be bits or bytes based on context) */
 #define kbps(__mbps) ((__mbps) * 1000)
 #define bps(__mbps) (kbps(__mbps) * 1000)
@@ -207,6 +191,16 @@
 	},
 };
 
+static u32 get_type_frm_name(char *name)
+{
+	if (!strcmp(name, "venus-llcc"))
+		return LLCC;
+	else if (!strcmp(name, "venus-ddr"))
+		return DDR;
+	else
+		return PERF;
+}
+
 static struct lut const *__lut(int width, int height, int fps)
 {
 	int frame_size = height * width, c = 0;
@@ -273,27 +267,25 @@
 
 			}
 		}
-
-		dprintk(VIDC_PROF, "%s", formatted_line);
 	}
 }
 
 static unsigned long __calculate_vpe(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	return 0;
 }
 
 static unsigned long __calculate_cvp(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	unsigned long ret = 0;
 
-	switch (gm) {
-	case GOVERNOR_DDR:
+	switch (type) {
+	case DDR:
 		ret = d->ddr_bw;
 		break;
-	case GOVERNOR_LLCC:
+	case LLCC:
 		ret = d->sys_cache_bw;
 		break;
 	default:
@@ -334,7 +326,7 @@
 }
 
 static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	/*
 	 * XXX: Don't fool around with any of the hardcoded numbers unless you
@@ -575,11 +567,11 @@
 		__dump(dump, ARRAY_SIZE(dump));
 	}
 
-	switch (gm) {
-	case GOVERNOR_DDR:
+	switch (type) {
+	case DDR:
 		ret = kbps(fp_round(ddr.total));
 		break;
-	case GOVERNOR_LLCC:
+	case LLCC:
 		ret = kbps(fp_round(llc.total));
 		break;
 	default:
@@ -590,7 +582,7 @@
 }
 
 static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	/*
 	 * XXX: Don't fool around with any of the hardcoded numbers unless you
@@ -872,11 +864,11 @@
 		__dump(dump, ARRAY_SIZE(dump));
 	}
 
-	switch (gm) {
-	case GOVERNOR_DDR:
+	switch (type) {
+	case DDR:
 		ret = kbps(fp_round(ddr.total));
 		break;
-	case GOVERNOR_LLCC:
+	case LLCC:
 		ret = kbps(fp_round(llc.total));
 		break;
 	default:
@@ -887,41 +879,37 @@
 }
 
 static unsigned long __calculate(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
-	unsigned long (*calc[])(struct vidc_bus_vote_data *,
-			enum governor_mode) = {
-		[HAL_VIDEO_DOMAIN_VPE] = __calculate_vpe,
-		[HAL_VIDEO_DOMAIN_ENCODER] = __calculate_encoder,
-		[HAL_VIDEO_DOMAIN_DECODER] = __calculate_decoder,
-		[HAL_VIDEO_DOMAIN_CVP] = __calculate_cvp,
-	};
+	unsigned long value = 0;
 
-	if (d->domain >= ARRAY_SIZE(calc)) {
-		dprintk(VIDC_ERR, "%s: invalid domain %d\n",
-			__func__, d->domain);
-		return 0;
+	switch (d->domain) {
+	case HAL_VIDEO_DOMAIN_VPE:
+		value = __calculate_vpe(d, type);
+		break;
+	case HAL_VIDEO_DOMAIN_ENCODER:
+		value = __calculate_encoder(d, type);
+		break;
+	case HAL_VIDEO_DOMAIN_DECODER:
+		value = __calculate_decoder(d, type);
+		break;
+	case HAL_VIDEO_DOMAIN_CVP:
+		value = __calculate_cvp(d, type);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Unknown Domain");
 	}
-	return calc[d->domain](d, gm);
+
+	return value;
 }
 
-
-static int __get_target_freq(struct devfreq *dev, unsigned long *freq)
+unsigned long __calc_bw(struct bus_info *bus,
+				struct msm_vidc_gov_data *vidc_data)
 {
 	unsigned long ab_kbps = 0, c = 0;
-	struct devfreq_dev_status stats = {0};
-	struct msm_vidc_gov_data *vidc_data = NULL;
-	struct governor *gov = NULL;
+	enum vidc_bus_type type;
 
-	if (!dev || !freq)
-		return -EINVAL;
-
-	gov = container_of(dev->governor,
-			struct governor, devfreq_gov);
-	dev->profile->get_dev_status(dev->dev.parent, &stats);
-	vidc_data = (struct msm_vidc_gov_data *)stats.private_data;
-
-	if (!vidc_data || !vidc_data->data_count)
+	if (!vidc_data || !vidc_data->data_count || !vidc_data->data)
 		goto exit;
 
 	for (c = 0; c < vidc_data->data_count; ++c) {
@@ -931,85 +919,12 @@
 		}
 	}
 
+	type = get_type_frm_name(bus->name);
+
 	for (c = 0; c < vidc_data->data_count; ++c)
-		ab_kbps += __calculate(&vidc_data->data[c], gov->mode);
+		ab_kbps += __calculate(&vidc_data->data[c], type);
 
 exit:
-	*freq = clamp(ab_kbps, dev->min_freq, dev->max_freq ?
-		dev->max_freq : UINT_MAX);
-	trace_msm_vidc_perf_bus_vote(gov->devfreq_gov.name, *freq);
-	return 0;
+	trace_msm_vidc_perf_bus_vote(bus->name, ab_kbps);
+	return ab_kbps;
 }
-
-static int __event_handler(struct devfreq *devfreq, unsigned int event,
-		void *data)
-{
-	int rc = 0;
-
-	if (!devfreq)
-		return -EINVAL;
-
-	switch (event) {
-	case DEVFREQ_GOV_START:
-	case DEVFREQ_GOV_RESUME:
-	case DEVFREQ_GOV_SUSPEND:
-		mutex_lock(&devfreq->lock);
-		rc = update_devfreq(devfreq);
-		mutex_unlock(&devfreq->lock);
-		break;
-	}
-
-	return rc;
-}
-
-static struct governor governors[] = {
-	{
-		.mode = GOVERNOR_DDR,
-		.devfreq_gov = {
-			.name = "msm-vidc-ddr",
-			.get_target_freq = __get_target_freq,
-			.event_handler = __event_handler,
-		},
-	},
-	{
-		.mode = GOVERNOR_LLCC,
-		.devfreq_gov = {
-			.name = "msm-vidc-llcc",
-			.get_target_freq = __get_target_freq,
-			.event_handler = __event_handler,
-		},
-	},
-};
-
-static int __init msm_vidc_bw_gov_init(void)
-{
-	int c = 0, rc = 0;
-
-	for (c = 0; c < ARRAY_SIZE(governors); ++c) {
-		dprintk(VIDC_DBG, "Adding governor %s\n",
-				governors[c].devfreq_gov.name);
-
-		rc = devfreq_add_governor(&governors[c].devfreq_gov);
-		if (rc) {
-			dprintk(VIDC_ERR, "Error adding governor %s: %d\n",
-				governors[c].devfreq_gov.name, rc);
-			break;
-		}
-	}
-
-	return rc;
-}
-module_init(msm_vidc_bw_gov_init);
-
-static void __exit msm_vidc_bw_gov_exit(void)
-{
-	int c = 0;
-
-	for (c = 0; c < ARRAY_SIZE(governors); ++c) {
-		dprintk(VIDC_DBG, "Removing governor %s\n",
-				governors[c].devfreq_gov.name);
-		devfreq_remove_governor(&governors[c].devfreq_gov);
-	}
-}
-module_exit(msm_vidc_bw_gov_exit);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index f96c57d..27c9ceb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -187,6 +187,7 @@
 	u32 index;
 	u32 mark_data;
 	u32 mark_target;
+	u32 filled_length;
 };
 
 struct msm_vidc_common_data {
@@ -202,6 +203,21 @@
 	int low_power_cycles;
 };
 
+struct msm_vidc_codec_capability {
+	enum hal_capability capability_type;
+	enum hal_domain domains;
+	enum hal_video_codec codecs;
+	u32 min;
+	u32 max;
+	u32 step_size;
+	u32 default_value;
+};
+
+struct msm_vidc_codec {
+	enum hal_domain domain;
+	enum hal_video_codec codec;
+};
+
 enum efuse_purpose {
 	SKU_VERSION = 0,
 };
@@ -248,6 +264,10 @@
 	unsigned int common_data_length;
 	struct msm_vidc_codec_data *codec_data;
 	unsigned int codec_data_length;
+	struct msm_vidc_codec *codecs;
+	uint32_t codecs_count;
+	struct msm_vidc_codec_capability *codec_caps;
+	uint32_t codec_caps_count;
 	struct msm_vidc_csc_coeff csc_data;
 	struct msm_vidc_efuse_data *efuse_data;
 	unsigned int efuse_data_length;
@@ -302,6 +322,8 @@
 	struct session_crop crop_info;
 	u32 fps;
 	u32 bitrate;
+	bool bframe_changed;
+	u32 extradata_ctrls;
 };
 
 struct buf_queue {
@@ -361,7 +383,7 @@
 	u32 opb_fourcc;
 	u32 work_mode;
 	bool low_latency_mode;
-	bool turbo_mode;
+	bool is_cbr_plus;
 	u32 work_route;
 	u32 dcvs_flags;
 	u32 frame_rate;
@@ -394,6 +416,7 @@
 	unsigned long (*calc_freq)(struct msm_vidc_inst *inst, u32 filled_len);
 	int (*decide_work_route)(struct msm_vidc_inst *inst);
 	int (*decide_work_mode)(struct msm_vidc_inst *inst);
+	int (*decide_core_and_power_mode)(struct msm_vidc_inst *inst);
 };
 
 struct msm_vidc_core {
@@ -410,9 +433,6 @@
 	struct completion completions[SYS_MSG_END - SYS_MSG_START + 1];
 	enum msm_vidc_hfi_type hfi_type;
 	struct msm_vidc_platform_resources resources;
-	u32 enc_codec_supported;
-	u32 dec_codec_supported;
-	u32 codec_count;
 	struct msm_vidc_capability *capabilities;
 	struct delayed_work fw_unload_work;
 	struct work_struct ssr_work;
@@ -486,6 +506,7 @@
 	u32 frame_quality;
 	u32 rc_type;
 	u32 hybrid_hp;
+	u32 layer_bitrate;
 	u32 client_set_ctrls;
 	struct internal_buf *dpb_extra_binfo;
 	struct msm_vidc_codec_data *codec_data;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index cf3a2ac..c14348f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -126,6 +126,82 @@
 	CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 50, 200, 200),
 };
 
+#define ENC     HAL_VIDEO_DOMAIN_ENCODER
+#define DEC     HAL_VIDEO_DOMAIN_DECODER
+#define H264    HAL_VIDEO_CODEC_H264
+#define HEVC    HAL_VIDEO_CODEC_HEVC
+#define VP8     HAL_VIDEO_CODEC_VP8
+#define VP9     HAL_VIDEO_CODEC_VP9
+#define MPEG2   HAL_VIDEO_CODEC_MPEG2
+#define DOMAINS_ALL    (HAL_VIDEO_DOMAIN_ENCODER | HAL_VIDEO_DOMAIN_DECODER)
+#define CODECS_ALL     (HAL_VIDEO_CODEC_H264 | HAL_VIDEO_CODEC_HEVC | \
+			HAL_VIDEO_CODEC_VP8 | HAL_VIDEO_CODEC_VP9 | \
+			HAL_VIDEO_CODEC_MPEG2)
+
+static struct msm_vidc_codec default_codecs[] = {
+	/* {domain, codec} */
+	{DEC, H264}, {DEC, HEVC}, {DEC, VP8}, {DEC, VP9}, {DEC, MPEG2},
+	{ENC, H264}, {ENC, HEVC}, {ENC, VP8},
+};
+
+static struct msm_vidc_codec_capability kona_capabilities[] = {
+	/* {cap_type, min, max, step_size, default_value, domains, codecs} */
+	{CAP_FRAME_WIDTH, DOMAINS_ALL, CODECS_ALL, 128, 8192, 1, 1920},
+	{CAP_FRAME_HEIGHT, DOMAINS_ALL, CODECS_ALL, 128, 8192, 1, 1080},
+	/* (8192 * 4320) / 256 */
+	{CAP_MBS_PER_FRAME, DOMAINS_ALL, CODECS_ALL, 1, 138240, 1, 138240},
+	/* ((1920 * 1088) / 256) * 960 fps */
+	{CAP_MBS_PER_SECOND, DOMAINS_ALL, CODECS_ALL, 1, 7833600, 1, 7833600},
+	{CAP_FRAMERATE, DOMAINS_ALL, CODECS_ALL, 1, 960, 1, 30},
+	{CAP_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 220000000, 1, 20000000},
+	{CAP_SCALE_X, DOMAINS_ALL, CODECS_ALL, 4096, 65536, 1, 4096},
+	{CAP_SCALE_Y, DOMAINS_ALL, CODECS_ALL, 4096, 65536, 1, 4096},
+	{CAP_BFRAME, ENC, H264|HEVC, 0, 1, 1, 0},
+	{CAP_HIER_P_NUM_ENH_LAYERS, ENC, H264|HEVC, 0, 6, 1, 0},
+	{CAP_LTR_COUNT, ENC, H264|HEVC, 0, 6, 1, 0},
+	/* ((4096 * 2304) / 256) * 60 fps */
+	{CAP_MBS_PER_SECOND_POWER_SAVE, ENC, CODECS_ALL,
+		0, 2211840, 1, 2211840},
+	{CAP_I_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 10},
+	{CAP_P_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20},
+	{CAP_B_FRAME_QP, ENC, H264|HEVC, 0, 51, 1, 20},
+	{CAP_I_FRAME_QP, ENC, VP8|VP9, 0, 127, 1, 20},
+	{CAP_P_FRAME_QP, ENC, VP8|VP9, 0, 127, 1, 40},
+	{CAP_B_FRAME_QP, ENC, VP8|VP9, 0, 127, 1, 40},
+	/* 10 slices */
+	{CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10},
+	{CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10},
+	{CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1},
+
+	/* VP8 specific */
+	{CAP_FRAME_WIDTH, ENC|DEC, VP8, 128, 4096, 1, 1920},
+	{CAP_FRAME_HEIGHT, ENC|DEC, VP8, 128, 4096, 1, 1080},
+	/* (4096 * 2304) / 256 */
+	{CAP_MBS_PER_FRAME, ENC|DEC, VP8, 1, 36864, 1, 8160},
+	/* ((4096 * 2304) / 256) * 120 */
+	{CAP_MBS_PER_SECOND, ENC|DEC, VP8, 1, 4423680, 1, 244800},
+	{CAP_FRAMERATE, ENC|DEC, VP8, 1, 120, 1, 30},
+	{CAP_BITRATE, ENC, VP8, 1, 74000000, 1, 20000000},
+	{CAP_BITRATE, DEC, VP8, 1, 220000000, 1, 20000000},
+
+	/* Mpeg2 decoder specific */
+	{CAP_FRAME_WIDTH, DEC, MPEG2, 128, 1920, 1, 1920},
+	{CAP_FRAME_HEIGHT, DEC, MPEG2, 128, 1920, 1, 1080},
+	/* (1920 * 1088) / 256 */
+	{CAP_MBS_PER_FRAME, DEC, MPEG2, 1, 8160, 1, 8160},
+	/* ((1920 * 1088) / 256) * 30*/
+	{CAP_MBS_PER_SECOND, DEC, MPEG2, 1, 244800, 1, 244800},
+	{CAP_FRAMERATE, DEC, MPEG2, 1, 30, 1, 30},
+	{CAP_BITRATE, DEC, MPEG2, 1, 40000000, 1, 20000000},
+
+	/* Secure usecase specific */
+	{CAP_SECURE_FRAME_WIDTH, DOMAINS_ALL, CODECS_ALL, 128, 4096, 1, 1920},
+	{CAP_SECURE_FRAME_HEIGHT, DOMAINS_ALL, CODECS_ALL, 128, 4096, 1, 1080},
+	/* (4096 * 2304) / 256 */
+	{CAP_SECURE_MBS_PER_FRAME, DOMAINS_ALL, CODECS_ALL, 1, 36864, 1, 36864},
+	{CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 40000000, 1, 20000000},
+};
+
 /*
  * Custom conversion coefficients for resolution: 176x144 negative
  * coeffs are converted to s4.9 format
@@ -153,7 +229,6 @@
 	},
 };
 
-/* Update with kona */
 static struct msm_vidc_common_data kona_common_data[] = {
 	{
 		.key = "qcom,never-unload-fw",
@@ -181,27 +256,27 @@
 	{
 		.key = "qcom,max-hw-load",
 		.value = 3916800,       /*
-					 * 1920x1088/256 MBs@480fps. It is less
-					 * any other usecases (ex:
+					 * 1920x1088/256 MB's@480fps. It is more
+					 * than any other usecases (ex:
 					 * 3840x2160@120fps, 4096x2160@96ps,
 					 * 7680x4320@30fps)
 					 */
 	},
 	{
 		.key = "qcom,max-hq-mbs-per-frame",
-		.value = 8160,
+		.value = 34560,		/* 4096x2160 */
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 1036800,	/* 4096x2160@30fps */
 	},
 	{
-		.key = "qcom,max-b-frame-size",
-		.value = 8160,
+		.key = "qcom,max-b-frame-mbs-per-frame",
+		.value = 32400, /* 3840x2160/256 */
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 1944000, /* 3840x2160/256 MBs@60fps */
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -209,7 +284,7 @@
 	},
 	{
 		.key = "qcom,hw-resp-timeout",
-		.value = 10000,
+		.value = 1000,
 	},
 	{
 		.key = "qcom,debug-timeout",
@@ -263,16 +338,16 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -334,16 +409,16 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -409,16 +484,16 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -464,16 +539,16 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -515,16 +590,16 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -577,6 +652,10 @@
 	.sku_version = 0,
 	.vpu_ver = VPU_VERSION_IRIS2,
 	.ubwc_config = kona_ubwc_data,
+	.codecs = default_codecs,
+	.codecs_count = ARRAY_SIZE(default_codecs),
+	.codec_caps = kona_capabilities,
+	.codec_caps_count = ARRAY_SIZE(kona_capabilities),
 };
 
 static struct msm_vidc_platform_data sm6150_data = {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 8411bc0..e738949 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -420,23 +420,12 @@
 		goto err_bus;
 	}
 
-	rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
-			&bus->governor);
-	if (rc) {
-		rc = 0;
-		dprintk(VIDC_DBG,
-				"'qcom,bus-governor' not found, default to performance governor\n");
-		bus->governor = PERF_GOV;
-	}
+	rc = of_property_read_string(dev->of_node, "qcom,mode",
+			&bus->mode);
 
-	if (!strcmp(bus->governor, PERF_GOV))
+	if (!rc && !strcmp(bus->mode, PERF_GOV))
 		bus->is_prfm_gov_used = true;
 
-	if (of_find_property(dev->of_node, "operating-points-v2", NULL))
-		bus->has_freq_table = true;
-	else
-		bus->has_freq_table = false;
-
 	rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
 			range, ARRAY_SIZE(range));
 	if (rc) {
@@ -452,8 +441,8 @@
 
 	buses->count++;
 	bus->dev = dev;
-	dprintk(VIDC_DBG, "Found bus %s [%d->%d] with governor %s\n",
-			bus->name, bus->master, bus->slave, bus->governor);
+	dprintk(VIDC_DBG, "Found bus %s [%d->%d] with mode %s\n",
+			bus->name, bus->master, bus->slave, bus->mode);
 err_bus:
 	return rc;
 }
@@ -766,6 +755,10 @@
 	platform_data = core->platform_data;
 	res = &core->resources;
 
+	res->codecs = platform_data->codecs;
+	res->codecs_count = platform_data->codecs_count;
+	res->codec_caps = platform_data->codec_caps;
+	res->codec_caps_count = platform_data->codec_caps_count;
 	res->codec_data_count = platform_data->codec_data_length;
 	res->codec_data = platform_data->codec_data;
 
@@ -781,8 +774,14 @@
 	res->max_hq_mbs_per_frame = find_key_value(platform_data,
 			"qcom,max-hq-mbs-per-frame");
 
-	res->max_hq_fps = find_key_value(platform_data,
-			"qcom,max-hq-frames-per-sec");
+	res->max_hq_mbs_per_sec = find_key_value(platform_data,
+			"qcom,max-hq-mbs-per-sec");
+
+	res->max_bframe_mbs_per_frame = find_key_value(platform_data,
+			"qcom,max-b-frame-mbs-per-frame");
+
+	res->max_bframe_mbs_per_sec = find_key_value(platform_data,
+			"qcom,max-b-frame-mbs-per-sec");
 
 	res->sw_power_collapsible = find_key_value(platform_data,
 			"qcom,sw-power-collapse");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 431ff9d..234ee9d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -6,7 +6,6 @@
 #ifndef __MSM_VIDC_RESOURCES_H__
 #define __MSM_VIDC_RESOURCES_H__
 
-#include <linux/devfreq.h>
 #include <linux/platform_device.h>
 #include "msm_vidc.h"
 #include <linux/soc/qcom/llcc-qcom.h>
@@ -94,13 +93,10 @@
 	int master;
 	int slave;
 	unsigned int range[2];
-	const char *governor;
 	struct device *dev;
-	struct devfreq_dev_profile devfreq_prof;
-	struct devfreq *devfreq;
 	struct msm_bus_client_handle *client;
 	bool is_prfm_gov_used;
-	bool has_freq_table;
+	const char *mode;
 };
 
 struct bus_set {
@@ -170,7 +166,9 @@
 	struct buffer_usage_set buffer_usage_set;
 	uint32_t max_load;
 	uint32_t max_hq_mbs_per_frame;
-	uint32_t max_hq_fps;
+	uint32_t max_hq_mbs_per_sec;
+	uint32_t max_bframe_mbs_per_frame;
+	uint32_t max_bframe_mbs_per_sec;
 	struct platform_device *pdev;
 	struct regulator_set regulator_set;
 	struct clock_set clock_set;
@@ -197,6 +195,10 @@
 	bool dcvs;
 	struct msm_vidc_codec_data *codec_data;
 	int codec_data_count;
+	struct msm_vidc_codec *codecs;
+	uint32_t codecs_count;
+	struct msm_vidc_codec_capability *codec_caps;
+	uint32_t codec_caps_count;
 	struct msm_vidc_csc_coeff *csc_coeff_data;
 	struct msm_vidc_mem_cdsp mem_cdsp;
 	uint32_t vpu_ver;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 791e34f..95330d3 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -8,7 +8,6 @@
 #include <linux/clk/qcom.h>
 #include <linux/coresight-stm.h>
 #include <linux/delay.h>
-#include <linux/devfreq.h>
 #include <linux/hash.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -105,28 +104,38 @@
 static void interrupt_init_iris1(struct venus_hfi_device *device);
 static void setup_dsp_uc_memmap_iris1(struct venus_hfi_device *device);
 static void clock_config_on_enable_iris1(struct venus_hfi_device *device);
-static int prepare_ahb2axi_bridge(struct venus_hfi_device *device);
+static int reset_ahb2axi_bridge(struct venus_hfi_device *device);
 static int __set_ubwc_config(struct venus_hfi_device *device);
+static void power_off_common(struct venus_hfi_device *device);
+static void power_off_iris2(struct venus_hfi_device *device);
+static void noc_error_info_common(struct venus_hfi_device *device);
+static void noc_error_info_iris2(struct venus_hfi_device *device);
 
 struct venus_hfi_vpu_ops vpu4_ops = {
 	.interrupt_init = interrupt_init_vpu4,
 	.setup_dsp_uc_memmap = NULL,
 	.clock_config_on_enable = NULL,
-	.prepare_ahb2axi_bridge = NULL,
+	.reset_ahb2axi_bridge = NULL,
+	.power_off = power_off_common,
+	.noc_error_info = noc_error_info_common,
 };
 
 struct venus_hfi_vpu_ops iris1_ops = {
 	.interrupt_init = interrupt_init_iris1,
 	.setup_dsp_uc_memmap = setup_dsp_uc_memmap_iris1,
 	.clock_config_on_enable = clock_config_on_enable_iris1,
-	.prepare_ahb2axi_bridge = prepare_ahb2axi_bridge,
+	.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
+	.power_off = power_off_common,
+	.noc_error_info = noc_error_info_common,
 };
 
 struct venus_hfi_vpu_ops iris2_ops = {
 	.interrupt_init = interrupt_init_iris1,
 	.setup_dsp_uc_memmap = NULL,
 	.clock_config_on_enable = NULL,
-	.prepare_ahb2axi_bridge = prepare_ahb2axi_bridge,
+	.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
+	.power_off = power_off_iris2,
+	.noc_error_info = noc_error_info_iris2,
 };
 
 /**
@@ -570,7 +579,7 @@
 {
 	struct hfi_queue_header *queue;
 	u32 packet_size_in_words, new_write_idx;
-	u32 empty_space, read_idx;
+	u32 empty_space, read_idx, write_idx;
 	u32 *write_ptr;
 
 	if (!qinfo || !packet) {
@@ -593,16 +602,18 @@
 	}
 
 	packet_size_in_words = (*(u32 *)packet) >> 2;
-	if (!packet_size_in_words) {
-		dprintk(VIDC_ERR, "Zero packet size\n");
+	if (!packet_size_in_words || packet_size_in_words >
+		qinfo->q_array.mem_size>>2) {
+		dprintk(VIDC_ERR, "Invalid packet size\n");
 		return -ENODATA;
 	}
 
 	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
 
-	empty_space = (queue->qhdr_write_idx >=  read_idx) ?
-		(queue->qhdr_q_size - (queue->qhdr_write_idx -  read_idx)) :
-		(read_idx - queue->qhdr_write_idx);
+	empty_space = (write_idx >=  read_idx) ?
+		((qinfo->q_array.mem_size>>2) - (write_idx -  read_idx)) :
+		(read_idx - write_idx);
 	if (empty_space <= packet_size_in_words) {
 		queue->qhdr_tx_req =  1;
 		dprintk(VIDC_ERR, "Insufficient size (%d) to write (%d)\n",
@@ -612,13 +623,20 @@
 
 	queue->qhdr_tx_req =  0;
 
-	new_write_idx = (queue->qhdr_write_idx + packet_size_in_words);
+	new_write_idx = write_idx + packet_size_in_words;
 	write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
-		(queue->qhdr_write_idx << 2));
-	if (new_write_idx < queue->qhdr_q_size) {
+			(write_idx << 2));
+	if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+	    write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+	    qinfo->q_array.mem_size)) {
+		dprintk(VIDC_ERR, "Invalid write index");
+		return -ENODATA;
+	}
+
+	if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
 		memcpy(write_ptr, packet, packet_size_in_words << 2);
 	} else {
-		new_write_idx -= queue->qhdr_q_size;
+		new_write_idx -= qinfo->q_array.mem_size >> 2;
 		memcpy(write_ptr, packet, (packet_size_in_words -
 			new_write_idx) << 2);
 		memcpy((void *)qinfo->q_array.align_virtual_addr,
@@ -704,7 +722,8 @@
 	u32 packet_size_in_words, new_read_idx;
 	u32 *read_ptr;
 	u32 receive_request = 0;
-		int rc = 0;
+	u32 read_idx, write_idx;
+	int rc = 0;
 
 	if (!qinfo || !packet || !pb_tx_req_is_set) {
 		dprintk(VIDC_ERR, "Invalid Params\n");
@@ -737,7 +756,10 @@
 	if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
 		receive_request = 1;
 
-	if (queue->qhdr_read_idx == queue->qhdr_write_idx) {
+	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
+
+	if (read_idx == write_idx) {
 		queue->qhdr_rx_req = receive_request;
 		/*
 		 * mb() to ensure qhdr is updated in main memory
@@ -754,21 +776,28 @@
 	}
 
 	read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
-				(queue->qhdr_read_idx << 2));
+				(read_idx << 2));
+	if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+	    read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+	    qinfo->q_array.mem_size - sizeof(*read_ptr))) {
+		dprintk(VIDC_ERR, "Invalid read index\n");
+		return -ENODATA;
+	}
+
 	packet_size_in_words = (*read_ptr) >> 2;
 	if (!packet_size_in_words) {
 		dprintk(VIDC_ERR, "Zero packet size\n");
 		return -ENODATA;
 	}
 
-	new_read_idx = queue->qhdr_read_idx + packet_size_in_words;
-	if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE)
-			&& queue->qhdr_read_idx <= queue->qhdr_q_size) {
-		if (new_read_idx < queue->qhdr_q_size) {
+	new_read_idx = read_idx + packet_size_in_words;
+	if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
+		read_idx <= (qinfo->q_array.mem_size >> 2)) {
+		if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
 			memcpy(packet, read_ptr,
 					packet_size_in_words << 2);
 		} else {
-			new_read_idx -= queue->qhdr_q_size;
+			new_read_idx -= (qinfo->q_array.mem_size >> 2);
 			memcpy(packet, read_ptr,
 			(packet_size_in_words - new_read_idx) << 2);
 			memcpy(packet + ((packet_size_in_words -
@@ -779,18 +808,18 @@
 	} else {
 		dprintk(VIDC_WARN,
 			"BAD packet received, read_idx: %#x, pkt_size: %d\n",
-			queue->qhdr_read_idx, packet_size_in_words << 2);
+			read_idx, packet_size_in_words << 2);
 		dprintk(VIDC_WARN, "Dropping this packet\n");
-		new_read_idx = queue->qhdr_write_idx;
+		new_read_idx = write_idx;
 		rc = -ENODATA;
 	}
 
-	queue->qhdr_read_idx = new_read_idx;
-
-	if (queue->qhdr_read_idx != queue->qhdr_write_idx)
+	if (new_read_idx != write_idx)
 		queue->qhdr_rx_req = 0;
 	else
 		queue->qhdr_rx_req = receive_request;
+
+	queue->qhdr_read_idx = new_read_idx;
 	/*
 	 * mb() to ensure qhdr is updated in main memory
 	 * so that venus reads the updated header values
@@ -954,84 +983,24 @@
 		dprintk(VIDC_ERR, "Failed to restore threshold values\n");
 }
 
-static int __devfreq_target(struct device *devfreq_dev,
-		unsigned long *freq, u32 flags)
+static int __vote_bandwidth(struct bus_info *bus,
+		unsigned long *freq)
 {
 	int rc = 0;
 	uint64_t ab = 0;
-	struct bus_info *bus = NULL, *temp = NULL;
-	struct venus_hfi_device *device = dev_get_drvdata(devfreq_dev);
 
-	venus_hfi_for_each_bus(device, temp) {
-		if (temp->dev == devfreq_dev) {
-			bus = temp;
-			break;
-		}
-	}
-
-	if (!bus) {
-		rc = -EBADHANDLE;
-		goto err_unknown_device;
-	}
-
-	/*
-	 * Clamp for all non zero frequencies. This clamp is necessary to stop
-	 * devfreq driver from spamming - Couldn't update frequency - logs, if
-	 * the scaled ab value is not part of the frequency table.
-	 */
 	if (*freq)
 		*freq = clamp_t(typeof(*freq), *freq, bus->range[0],
 				bus->range[1]);
 
-	/* we expect governors to provide values in kBps form, convert to Bps */
+	/* Bus Driver expects values in Bps */
 	ab = *freq * 1000;
-	rc = msm_bus_scale_update_bw(bus->client, ab, 0);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed voting bus %s to ab %llu\n: %d",
-				bus->name, ab, rc);
-		goto err_unknown_device;
-	}
-
 	dprintk(VIDC_PROF, "Voting bus %s to ab %llu\n", bus->name, ab);
+	rc = msm_bus_scale_update_bw(bus->client, ab, 0);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed voting bus %s to ab %llu, rc=%d\n",
+				bus->name, ab, rc);
 
-	return 0;
-err_unknown_device:
-	return rc;
-}
-
-static int __devfreq_get_status(struct device *devfreq_dev,
-		struct devfreq_dev_status *stat)
-{
-	int rc = 0;
-	struct bus_info *bus = NULL, *temp = NULL;
-	struct venus_hfi_device *device = dev_get_drvdata(devfreq_dev);
-
-	venus_hfi_for_each_bus(device, temp) {
-		if (temp->dev == devfreq_dev) {
-			bus = temp;
-			break;
-		}
-	}
-
-	if (!bus) {
-		rc = -EBADHANDLE;
-		goto err_unknown_device;
-	}
-
-	*stat = (struct devfreq_dev_status) {
-		.private_data = &device->bus_vote,
-		/*
-		 * Put in dummy place holder values for upstream govs, our
-		 * custom gov only needs .private_data.  We should fill this in
-		 * properly if we can actually measure busy_time accurately
-		 * (which we can't at the moment)
-		 */
-		.total_time = 1,
-		.busy_time = 1,
-		.current_frequency = 0,
-	};
-
-err_unknown_device:
 	return rc;
 }
 
@@ -1039,18 +1008,19 @@
 {
 	int rc = 0;
 	struct bus_info *bus = NULL;
+	unsigned long freq = 0, zero = 0;
 
 	kfree(device->bus_vote.data);
 	device->bus_vote.data = NULL;
 	device->bus_vote.data_count = 0;
 
 	venus_hfi_for_each_bus(device, bus) {
-		unsigned long zero = 0;
-
-		if (!bus->is_prfm_gov_used)
-			rc = devfreq_suspend_device(bus->devfreq);
+		if (!bus->is_prfm_gov_used) {
+			freq = __calc_bw(bus, &device->bus_vote);
+			rc = __vote_bandwidth(bus, &freq);
+		}
 		else
-			rc = __devfreq_target(bus->dev, &zero, 0);
+			rc = __vote_bandwidth(bus, &zero);
 
 		if (rc)
 			goto err_unknown_device;
@@ -1066,6 +1036,7 @@
 	int rc = 0;
 	struct bus_info *bus = NULL;
 	struct vidc_bus_vote_data *new_data = NULL;
+	unsigned long freq = 0;
 
 	if (!num_data) {
 		dprintk(VIDC_DBG, "No vote data available\n");
@@ -1088,15 +1059,18 @@
 	device->bus_vote.data_count = num_data;
 
 	venus_hfi_for_each_bus(device, bus) {
-		if (bus && bus->devfreq) {
+		if (bus) {
 			if (!bus->is_prfm_gov_used) {
-				rc = devfreq_resume_device(bus->devfreq);
-				if (rc)
-					goto err_no_mem;
+				freq = __calc_bw(bus, &device->bus_vote);
 			} else {
-				bus->devfreq->nb.notifier_call(
-					&bus->devfreq->nb, 0, NULL);
+				freq = bus->range[1];
+				dprintk(VIDC_ERR, "%s %s perf Vote %u\n",
+						__func__, bus->name,
+						bus->range[1]);
 			}
+			rc = __vote_bandwidth(bus, &freq);
+		} else {
+			dprintk(VIDC_ERR, "No BUS to Vote\n");
 		}
 	}
 
@@ -2258,7 +2232,7 @@
 	struct venus_hfi_device *device;
 	int rc = 0;
 
-	if (!session || !session->device || !pdata) {
+	if (!session || !session->device) {
 		dprintk(VIDC_ERR, "Invalid Params\n");
 		return -EINVAL;
 	}
@@ -3366,7 +3340,6 @@
 	while (!__iface_msgq_read(device, raw_packet)) {
 		void **session_id = NULL;
 		struct msm_vidc_cb_info *info = &packets[packet_count++];
-		struct vidc_hal_sys_init_done sys_init_done = {0};
 		int rc = 0;
 
 		rc = hfi_process_msg_packet(device->device_id,
@@ -3388,13 +3361,6 @@
 			break;
 		case HAL_SYS_INIT_DONE:
 			dprintk(VIDC_DBG, "Received SYS_INIT_DONE\n");
-
-			sys_init_done.capabilities =
-				device->sys_init_capabilities;
-			hfi_process_sys_init_done_prop_read(
-				(struct hfi_msg_sys_init_done_packet *)
-					raw_packet, &sys_init_done);
-			info->response.cmd.data.sys_init_done = sys_init_done;
 			break;
 		case HAL_SESSION_LOAD_RESOURCE_DONE:
 			/*
@@ -3686,41 +3652,57 @@
 }
 
 static int __handle_reset_clk(struct msm_vidc_platform_resources *res,
-			enum reset_state state)
+			int reset_index, enum reset_state state)
 {
-	int rc = 0, i;
+	int rc = 0;
 	struct reset_control *rst;
 	struct reset_set *rst_set = &res->reset_set;
 
 	if (!rst_set->reset_tbl)
 		return 0;
 
-	dprintk(VIDC_DBG, "%s reset_state %d\n", __func__, state);
-	for (i = 0; i < rst_set->count; i++) {
-		rst = rst_set->reset_tbl[i].rst;
-		switch (state) {
-		case INIT:
-			rst = devm_reset_control_get(&res->pdev->dev,
-						rst_set->reset_tbl[i].name);
-			if (IS_ERR(rst))
-				rc = PTR_ERR(rst);
+	rst = rst_set->reset_tbl[reset_index].rst;
+	dprintk(VIDC_DBG, "reset_clk: name %s reset_state %d rst %pK\n",
+		rst_set->reset_tbl[reset_index].name, state, rst);
 
-			rst_set->reset_tbl[i].rst = rst;
-			break;
-		case ASSERT:
-			rc = reset_control_assert(rst);
-			break;
-		case DEASSERT:
-			rc = reset_control_deassert(rst);
-			break;
-		default:
-			dprintk(VIDC_ERR, "Invalid reset request\n");
+	switch (state) {
+	case INIT:
+		if (rst)
+			goto skip_reset_init;
+
+		rst = devm_reset_control_get(&res->pdev->dev,
+				rst_set->reset_tbl[reset_index].name);
+		if (IS_ERR(rst))
+			rc = PTR_ERR(rst);
+
+		rst_set->reset_tbl[reset_index].rst = rst;
+		break;
+	case ASSERT:
+		if (!rst) {
+			rc = PTR_ERR(rst);
+			goto failed_to_reset;
 		}
 
+		rc = reset_control_assert(rst);
+		break;
+	case DEASSERT:
+		if (!rst) {
+			rc = PTR_ERR(rst);
+			goto failed_to_reset;
+		}
+		rc = reset_control_deassert(rst);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid reset request\n");
 		if (rc)
-			return rc;
+			goto failed_to_reset;
 	}
+
 	return 0;
+
+skip_reset_init:
+failed_to_reset:
+	return rc;
 }
 
 static inline void __disable_unprepare_clks(struct venus_hfi_device *device)
@@ -3752,31 +3734,39 @@
 	}
 }
 
-static int prepare_ahb2axi_bridge(struct venus_hfi_device *device)
+static int reset_ahb2axi_bridge(struct venus_hfi_device *device)
 {
-	int rc;
+	int rc, i;
 
 	if (!device) {
 		dprintk(VIDC_ERR, "NULL device\n");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto failed_to_reset;
 	}
 
-	rc = __handle_reset_clk(device->res, ASSERT);
-	if (rc) {
-		dprintk(VIDC_ERR, "failed to assert reset clocks\n");
-		return rc;
-	}
+	for (i = 0; i < device->res->reset_set.count; i++) {
+		rc = __handle_reset_clk(device->res, i, ASSERT);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"failed to assert reset clocks\n");
+			goto failed_to_reset;
+		}
 
-	/* wait for deassert */
-	usleep_range(150, 250);
+		/* wait for deassert */
+		usleep_range(150, 250);
 
-	rc = __handle_reset_clk(device->res, DEASSERT);
-	if (rc) {
-		dprintk(VIDC_ERR, "failed to deassert reset clocks\n");
-		return rc;
+		rc = __handle_reset_clk(device->res, i, DEASSERT);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"failed to deassert reset clocks\n");
+			goto failed_to_reset;
+		}
 	}
 
 	return 0;
+
+failed_to_reset:
+	return rc;
 }
 
 static inline int __prepare_enable_clks(struct venus_hfi_device *device)
@@ -3845,10 +3835,6 @@
 	device->bus_vote = DEFAULT_BUS_VOTE;
 
 	venus_hfi_for_each_bus_reverse(device, bus) {
-		devfreq_remove_device(bus->devfreq);
-		bus->devfreq = NULL;
-		dev_set_drvdata(bus->dev, NULL);
-
 		msm_bus_scale_unregister(bus->client);
 		bus->client = NULL;
 	}
@@ -3863,41 +3849,14 @@
 		return -EINVAL;
 
 	venus_hfi_for_each_bus(device, bus) {
-		struct devfreq_dev_profile profile = {
-			.initial_freq = 0,
-			.polling_ms = INT_MAX,
-			.freq_table = NULL,
-			.max_state = 0,
-			.target = __devfreq_target,
-			.get_dev_status = __devfreq_get_status,
-			.exit = NULL,
-			/*.get_cur_greq = NULL,*/
-		};
-
-		if (!strcmp(bus->governor, "msm-vidc-llcc")) {
+		if (!strcmp(bus->mode, "msm-vidc-llcc")) {
 			if (msm_vidc_syscache_disable) {
 				dprintk(VIDC_DBG,
 					 "Skipping LLC bus init %s: %s\n",
-				bus->name, bus->governor);
+				bus->name, bus->mode);
 				continue;
 			}
 		}
-
-		/*
-		 * This is stupid, but there's no other easy way to get a hold
-		 * of struct bus_info in venus_hfi_devfreq_*()
-		 */
-		WARN(dev_get_drvdata(bus->dev), "%s's drvdata already set\n",
-				dev_name(bus->dev));
-		dev_set_drvdata(bus->dev, device);
-
-		if (bus->has_freq_table) {
-			rc = dev_pm_opp_of_add_table(bus->dev);
-			if (rc)
-				dprintk(VIDC_ERR, "Failed to add %s OPP table",
-						bus->name);
-		}
-
 		bus->client = msm_bus_scale_register(bus->master, bus->slave,
 				bus->name, false);
 		if (IS_ERR_OR_NULL(bus->client)) {
@@ -3908,25 +3867,6 @@
 			bus->client = NULL;
 			goto err_add_dev;
 		}
-
-		bus->devfreq_prof = profile;
-		bus->devfreq = devfreq_add_device(bus->dev,
-				&bus->devfreq_prof, bus->governor, NULL);
-		if (IS_ERR_OR_NULL(bus->devfreq)) {
-			rc = PTR_ERR(bus->devfreq) ?
-				PTR_ERR(bus->devfreq) : -EBADHANDLE;
-			dprintk(VIDC_ERR,
-					"Failed to add devfreq device for bus %s and governor %s: %d\n",
-					bus->name, bus->governor, rc);
-			bus->devfreq = NULL;
-			goto err_add_dev;
-		}
-
-		/*
-		 * Devfreq starts monitoring immediately, since we are just
-		 * initializing stuff at this point, force it to suspend
-		 */
-		devfreq_suspend_device(bus->devfreq);
 	}
 
 	return 0;
@@ -4047,7 +3987,7 @@
 static int __init_resources(struct venus_hfi_device *device,
 				struct msm_vidc_platform_resources *res)
 {
-	int rc = 0;
+	int i, rc = 0;
 
 	rc = __init_regulators(device);
 	if (rc) {
@@ -4062,11 +4002,13 @@
 		goto err_init_clocks;
 	}
 
-	rc = __handle_reset_clk(res, INIT);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to init reset clocks\n");
-		rc = -ENODEV;
-		goto err_init_reset_clk;
+	for (i = 0; i < device->res->reset_set.count; i++) {
+		rc = __handle_reset_clk(res, i, INIT);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to init reset clocks\n");
+			rc = -ENODEV;
+			goto err_init_reset_clk;
+		}
 	}
 
 	rc = __init_bus(device);
@@ -4079,10 +4021,6 @@
 	if (rc)
 		dprintk(VIDC_WARN, "Failed to init subcaches: %d\n", rc);
 
-	device->sys_init_capabilities =
-		kzalloc(sizeof(struct msm_vidc_capability)
-		* VIDC_MAX_SESSIONS, GFP_KERNEL);
-
 	return rc;
 
 err_init_reset_clk:
@@ -4099,8 +4037,6 @@
 	__deinit_bus(device);
 	__deinit_clocks(device);
 	__deinit_regulators(device);
-	kfree(device->sys_init_capabilities);
-	device->sys_init_capabilities = NULL;
 }
 
 static int __protect_cp_mem(struct venus_hfi_device *device)
@@ -4515,9 +4451,9 @@
 		goto fail_enable_gdsc;
 	}
 
-	rc = call_venus_op(device, prepare_ahb2axi_bridge, device);
+	rc = call_venus_op(device, reset_ahb2axi_bridge, device);
 	if (rc) {
-		dprintk(VIDC_ERR, "Failed to enable ahb2axi: %d\n", rc);
+		dprintk(VIDC_ERR, "Failed to reset ahb2axi: %d\n", rc);
 		goto fail_enable_clks;
 	}
 
@@ -4564,7 +4500,7 @@
 	return rc;
 }
 
-static void __venus_power_off(struct venus_hfi_device *device)
+static void power_off_common(struct venus_hfi_device *device)
 {
 	if (!device->power_enabled)
 		return;
@@ -4574,6 +4510,96 @@
 	device->intr_status = 0;
 
 	__disable_unprepare_clks(device);
+	if (call_venus_op(device, reset_ahb2axi_bridge, device))
+		dprintk(VIDC_ERR, "Failed to reset ahb2axi\n");
+
+	if (__disable_regulators(device))
+		dprintk(VIDC_WARN, "Failed to disable regulators\n");
+
+	if (__unvote_buses(device))
+		dprintk(VIDC_WARN, "Failed to unvote for buses\n");
+	device->power_enabled = false;
+}
+
+static void power_off_iris2(struct venus_hfi_device *device)
+{
+	u32 lpi_status, reg_status = 0, count = 0, max_count = 10;
+
+	if (!device->power_enabled)
+		return;
+
+	if (!(device->intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+		disable_irq_nosync(device->hal_data->irq);
+	device->intr_status = 0;
+
+	/* HPG 6.1.2 Step 1  */
+	__write_register(device, VIDC_CPU_CS_X2RPMh, 0x3);
+
+	/* HPG 6.1.2 Step 2, noc to low power */
+	__write_register(device, VIDC_AON_WRAPPER_MVP_NOC_LPI_CONTROL, 0x1);
+	while (!reg_status && count < max_count) {
+		lpi_status =
+			 __read_register(device,
+				VIDC_AON_WRAPPER_MVP_NOC_LPI_STATUS);
+		reg_status = lpi_status & BIT(0);
+		dprintk(VIDC_DBG,
+			"Noc: lpi_status %d noc_status %d (count %d)\n",
+			lpi_status, reg_status, count);
+		usleep_range(50, 100);
+		count++;
+	}
+	if (count == max_count) {
+		dprintk(VIDC_ERR,
+			"NOC not in qaccept status %d\n", reg_status);
+	}
+
+	/* HPG 6.1.2 Step 3, debug bridge to low power */
+	__write_register(device,
+		VIDC_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x7);
+	reg_status = 0;
+	count = 0;
+	while ((reg_status != 0x7) && count < max_count) {
+		lpi_status = __read_register(device,
+				 VIDC_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+		reg_status = lpi_status & 0x7;
+		dprintk(VIDC_DBG,
+			"DBLP Set : lpi_status %d reg_status %d (count %d)\n",
+			lpi_status, reg_status, count);
+		usleep_range(50, 100);
+		count++;
+	}
+	if (count == max_count) {
+		dprintk(VIDC_ERR,
+			"DBLP Set: status %d\n", reg_status);
+	}
+
+	/* HPG 6.1.2 Step 4, debug bridge to lpi release */
+	__write_register(device,
+		VIDC_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x0);
+	lpi_status = 0x1;
+	count = 0;
+	while (lpi_status && count < max_count) {
+		lpi_status = __read_register(device,
+				 VIDC_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+		dprintk(VIDC_DBG,
+			"DBLP Release: lpi_status %d(count %d)\n",
+			lpi_status, count);
+		usleep_range(50, 100);
+		count++;
+	}
+	if (count == max_count) {
+		dprintk(VIDC_ERR,
+			"DBLP Release: lpi_status %d\n", lpi_status);
+	}
+
+	/* HPG 6.1.2 Step 6 */
+	__disable_unprepare_clks(device);
+
+	/* HPG 6.1.2 Step 7 & 8 */
+	if (call_venus_op(device, reset_ahb2axi_bridge, device))
+		dprintk(VIDC_ERR, "Failed to reset ahb2axi\n");
+
+	/* HPG 6.1.2 Step 5 */
 	if (__disable_regulators(device))
 		dprintk(VIDC_WARN, "Failed to disable regulators\n");
 
@@ -4608,7 +4634,7 @@
 
 	__disable_subcaches(device);
 
-	__venus_power_off(device);
+	call_venus_op(device, power_off, device);
 	dprintk(VIDC_PROF, "Venus power off\n");
 	return rc;
 
@@ -4683,7 +4709,7 @@
 err_reset_core:
 	__tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
 err_set_video_state:
-	__venus_power_off(device);
+	call_venus_op(device, power_off, device);
 err_venus_power_on:
 	dprintk(VIDC_ERR, "Failed to resume from power collapse\n");
 	return rc;
@@ -4743,7 +4769,7 @@
 		subsystem_put(device->resources.fw.cookie);
 	device->resources.fw.cookie = NULL;
 fail_load_fw:
-	__venus_power_off(device);
+	call_venus_op(device, power_off, device);
 fail_venus_power_on:
 fail_init_pkt:
 	__deinit_resources(device);
@@ -4764,7 +4790,7 @@
 	__vote_buses(device, NULL, 0);
 	subsystem_put(device->resources.fw.cookie);
 	__interface_queues_release(device);
-	__venus_power_off(device);
+	call_venus_op(device, power_off, device);
 	device->resources.fw.cookie = NULL;
 	__deinit_resources(device);
 
@@ -4897,10 +4923,54 @@
 	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG3_HIGH: %#x\n", core_num, val);
 }
 
+static void noc_error_info_common(struct venus_hfi_device *device)
+{
+	const u32 core0 = 0, core1 = 1;
+
+	if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
+		__noc_error_info(device, core0);
+
+	if (__read_register(device, VCODEC_CORE1_VIDEO_NOC_BASE_OFFS +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
+		__noc_error_info(device, core1);
+}
+
+static void noc_error_info_iris2(struct venus_hfi_device *device)
+{
+	u32 val = 0;
+
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_SWID_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_SWID_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_SWID_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_SWID_HIGH:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_MAINCTL_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_MAINCTL_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRVLD_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRVLD_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRCLR_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRCLR_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH:     %#x\n", val);
+}
+
 static int venus_hfi_noc_error_info(void *dev)
 {
 	struct venus_hfi_device *device;
-	const u32 core0 = 0, core1 = 1;
 
 	if (!dev) {
 		dprintk(VIDC_ERR, "%s: null device\n", __func__);
@@ -4911,13 +4981,7 @@
 	mutex_lock(&device->lock);
 	dprintk(VIDC_ERR, "%s: non error information\n", __func__);
 
-	if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
-		__noc_error_info(device, core0);
-
-	if (__read_register(device, VCODEC_CORE1_VIDEO_NOC_BASE_OFFS +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
-		__noc_error_info(device, core1);
+	call_venus_op(device, noc_error_info, device);
 
 	mutex_unlock(&device->lock);
 
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index c771a30..a361a23 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -42,6 +42,9 @@
 #define VIDC_MAX_SUBCACHES 4
 #define VIDC_MAX_SUBCACHE_SIZE 52
 
+extern unsigned long __calc_bw(struct bus_info *bus,
+			struct msm_vidc_gov_data *vidc_data);
+
 struct hfi_queue_table_header {
 	u32 qtbl_version;
 	u32 qtbl_size;
@@ -236,7 +239,9 @@
 	void (*interrupt_init)(struct venus_hfi_device *ptr);
 	void (*setup_dsp_uc_memmap)(struct venus_hfi_device *device);
 	void (*clock_config_on_enable)(struct venus_hfi_device *device);
-	int (*prepare_ahb2axi_bridge)(struct venus_hfi_device *device);
+	int (*reset_ahb2axi_bridge)(struct venus_hfi_device *device);
+	void (*power_off)(struct venus_hfi_device *device);
+	void (*noc_error_info)(struct venus_hfi_device *device);
 };
 
 struct venus_hfi_device {
@@ -274,7 +279,6 @@
 	u8 *raw_packet;
 	struct pm_qos_request qos;
 	unsigned int skip_pc_count;
-	struct msm_vidc_capability *sys_init_capabilities;
 	struct venus_hfi_vpu_ops *vpu_ops;
 };
 
@@ -283,7 +287,5 @@
 int venus_hfi_initialize(struct hfi_device *hdev, u32 device_id,
 		struct msm_vidc_platform_resources *res,
 		hfi_cmd_response_callback callback);
-bool venus_hfi_is_session_supported(unsigned long sessions_supported,
-		enum vidc_vote_data_session session_type);
 
 #endif
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index e208f46..c72849c 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -186,6 +186,8 @@
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0021)
 #define HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA	\
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0022)
+#define HFI_PROPERTY_PARAM_HDR10_HIST_EXTRADATA \
+	(HFI_PROPERTY_PARAM_OX_START + 0x0023)
 
 #define HFI_PROPERTY_CONFIG_VDEC_OX_START				\
 	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000)
@@ -835,13 +837,5 @@
 int hfi_process_msg_packet(u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr,
 		struct msm_vidc_cb_info *info);
 
-enum vidc_status hfi_process_sys_init_done_prop_read(
-	struct hfi_msg_sys_init_done_packet *pkt,
-	struct vidc_hal_sys_init_done *sys_init_done);
-
-enum vidc_status hfi_process_session_init_done_prop_read(
-	struct hfi_msg_sys_session_init_done_packet *pkt,
-	struct vidc_hal_session_init_done *session_init_done);
-
 #endif
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index ff6bf34..4406c84 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -87,10 +87,10 @@
 };
 
 enum hal_domain {
-	HAL_VIDEO_DOMAIN_VPE,
-	HAL_VIDEO_DOMAIN_ENCODER,
-	HAL_VIDEO_DOMAIN_DECODER,
-	HAL_VIDEO_DOMAIN_CVP,
+	HAL_VIDEO_DOMAIN_VPE        = BIT(0),
+	HAL_VIDEO_DOMAIN_ENCODER    = BIT(1),
+	HAL_VIDEO_DOMAIN_DECODER    = BIT(2),
+	HAL_VIDEO_DOMAIN_CVP        = BIT(3),
 	HAL_UNUSED_DOMAIN = 0x10000000,
 };
 
@@ -238,46 +238,45 @@
 	HAL_UNUSED_CHROMA = 0x10000000,
 };
 
-struct hal_properties_supported {
-	u32 num_properties;
-	u32 rg_properties[1];
-};
-
 enum hal_capability {
-	HAL_CAPABILITY_FRAME_WIDTH = 0x1,
-	HAL_CAPABILITY_FRAME_HEIGHT,
-	HAL_CAPABILITY_MBS_PER_FRAME,
-	HAL_CAPABILITY_MBS_PER_SECOND,
-	HAL_CAPABILITY_FRAMERATE,
-	HAL_CAPABILITY_SCALE_X,
-	HAL_CAPABILITY_SCALE_Y,
-	HAL_CAPABILITY_BITRATE,
-	HAL_CAPABILITY_BFRAME,
-	HAL_CAPABILITY_PEAKBITRATE,
-	HAL_CAPABILITY_HIER_P_NUM_ENH_LAYERS,
-	HAL_CAPABILITY_ENC_LTR_COUNT,
-	HAL_CAPABILITY_SECURE_OUTPUT2_THRESHOLD,
-	HAL_CAPABILITY_HIER_B_NUM_ENH_LAYERS,
-	HAL_CAPABILITY_LCU_SIZE,
-	HAL_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS,
-	HAL_CAPABILITY_MBS_PER_SECOND_POWER_SAVE,
-	HAL_CAPABILITY_EXTRADATA,
-	HAL_CAPABILITY_PROFILE,
-	HAL_CAPABILITY_LEVEL,
-	HAL_CAPABILITY_I_FRAME_QP,
-	HAL_CAPABILITY_P_FRAME_QP,
-	HAL_CAPABILITY_B_FRAME_QP,
-	HAL_CAPABILITY_RATE_CONTROL_MODES,
-	HAL_CAPABILITY_BLUR_WIDTH,
-	HAL_CAPABILITY_BLUR_HEIGHT,
-	HAL_CAPABILITY_SLICE_BYTE,
-	HAL_CAPABILITY_SLICE_MB,
-	HAL_CAPABILITY_SECURE,
-	HAL_CAPABILITY_MAX_NUM_B_FRAMES,
-	HAL_CAPABILITY_MAX_VIDEOCORES,
-	HAL_CAPABILITY_MAX_WORKMODES,
-	HAL_CAPABILITY_UBWC_CR_STATS,
-	HAL_UNUSED_CAPABILITY = 0x10000000,
+	CAP_FRAME_WIDTH = 0x1,
+	CAP_FRAME_HEIGHT,
+	CAP_MBS_PER_FRAME,
+	CAP_MBS_PER_SECOND,
+	CAP_FRAMERATE,
+	CAP_SCALE_X,
+	CAP_SCALE_Y,
+	CAP_BITRATE,
+	CAP_BFRAME,
+	CAP_PEAKBITRATE,
+	CAP_HIER_P_NUM_ENH_LAYERS,
+	CAP_LTR_COUNT,
+	CAP_SECURE_OUTPUT2_THRESHOLD,
+	CAP_HIER_B_NUM_ENH_LAYERS,
+	CAP_LCU_SIZE,
+	CAP_HIER_P_HYBRID_NUM_ENH_LAYERS,
+	CAP_MBS_PER_SECOND_POWER_SAVE,
+	CAP_EXTRADATA,
+	CAP_PROFILE,
+	CAP_LEVEL,
+	CAP_I_FRAME_QP,
+	CAP_P_FRAME_QP,
+	CAP_B_FRAME_QP,
+	CAP_RATE_CONTROL_MODES,
+	CAP_BLUR_WIDTH,
+	CAP_BLUR_HEIGHT,
+	CAP_SLICE_BYTE,
+	CAP_SLICE_MB,
+	CAP_SECURE,
+	CAP_MAX_NUM_B_FRAMES,
+	CAP_MAX_VIDEOCORES,
+	CAP_MAX_WORKMODES,
+	CAP_UBWC_CR_STATS,
+	CAP_SECURE_FRAME_WIDTH,
+	CAP_SECURE_FRAME_HEIGHT,
+	CAP_SECURE_MBS_PER_FRAME,
+	CAP_SECURE_BITRATE,
+	CAP_MAX,
 };
 
 struct hal_capability_supported {
@@ -285,11 +284,7 @@
 	u32 min;
 	u32 max;
 	u32 step_size;
-};
-
-struct hal_capability_supported_info {
-	u32 num_capabilities;
-	struct hal_capability_supported rg_data[1];
+	u32 default_value;
 };
 
 struct hal_nal_stream_format_supported {
@@ -448,13 +443,9 @@
 	struct hal_batch_info batch_info;
 	struct hal_uncompressed_format_supported uncompressed_format_supported;
 	struct hal_interlace_format_supported interlace_format_supported;
-	struct hal_properties_supported properties_supported;
-	struct hal_capability_supported capability_supported;
-	struct hal_capability_supported_info capability_supported_info;
 	struct hal_nal_stream_format_supported nal_stream_format_supported;
 	struct hal_nal_stream_format_select nal_stream_format_select;
 	struct hal_multi_view_format multi_view_format;
-	struct hal_codec_supported codec_supported;
 	struct hal_buffer_info buffer_info;
 	struct hal_buffer_alloc_mode buffer_alloc_mode;
 	struct buffer_requirements buf_req;
@@ -570,54 +561,12 @@
 struct msm_vidc_capability {
 	enum hal_domain domain;
 	enum hal_video_codec codec;
-	struct hal_capability_supported width;
-	struct hal_capability_supported height;
-	struct hal_capability_supported mbs_per_frame;
-	struct hal_capability_supported mbs_per_sec;
-	struct hal_capability_supported frame_rate;
-	struct hal_capability_supported scale_x;
-	struct hal_capability_supported scale_y;
-	struct hal_capability_supported bitrate;
-	struct hal_capability_supported bframe;
-	struct hal_capability_supported hier_p;
-	struct hal_capability_supported ltr_count;
-	struct hal_capability_supported secure_output2_threshold;
-	struct hal_capability_supported hier_b;
-	struct hal_capability_supported lcu_size;
-	struct hal_capability_supported hier_p_hybrid;
-	struct hal_capability_supported mbs_per_sec_power_save;
-	struct hal_capability_supported extradata;
-	struct hal_capability_supported profile;
-	struct hal_capability_supported level;
-	struct hal_capability_supported i_qp;
-	struct hal_capability_supported p_qp;
-	struct hal_capability_supported b_qp;
-	struct hal_capability_supported rc_modes;
-	struct hal_capability_supported blur_width;
-	struct hal_capability_supported blur_height;
-	struct hal_capability_supported slice_bytes;
-	struct hal_capability_supported slice_mbs;
-	struct hal_capability_supported secure;
-	struct hal_capability_supported max_num_b_frames;
-	struct hal_capability_supported max_video_cores;
-	struct hal_capability_supported max_work_modes;
-	struct hal_capability_supported ubwc_cr_stats;
-	struct hal_profile_level_supported profile_level;
-	struct hal_uncompressed_format_supported uncomp_format;
-	struct hal_interlace_format_supported HAL_format;
-	struct hal_nal_stream_format_supported nal_stream_format;
-	struct hal_intra_refresh intra_refresh;
-	enum buffer_mode_type alloc_mode_out;
-	enum buffer_mode_type alloc_mode_in;
-	u32 pixelprocess_capabilities;
-	u32 tme_version;
+	struct hal_capability_supported cap[CAP_MAX];
 };
 
 struct vidc_hal_sys_init_done {
 	u32 dec_codec_supported;
 	u32 enc_codec_supported;
-	u32 codec_count;
-	struct msm_vidc_capability *capabilities;
 	u32 max_sessions_supported;
 };
 
@@ -709,32 +658,6 @@
 	VIDC_THERMAL_CRITICAL
 };
 
-enum vidc_vote_data_session {
-	VIDC_BUS_VOTE_DATA_SESSION_INVALID = 0,
-	/*
-	 * No declarations exist. Values generated by VIDC_VOTE_DATA_SESSION_VAL
-	 * describe the enumerations e.g.:
-	 *
-	 * enum vidc_bus_vote_data_session_type h264_decoder_session =
-	 *        VIDC_VOTE_DATA_SESSION_VAL(HAL_VIDEO_CODEC_H264,
-	 *                 HAL_VIDEO_DOMAIN_DECODER);
-	 */
-};
-
-/*
- * Careful modifying VIDC_VOTE_DATA_SESSION_VAL().
- *
- * This macro assigns two bits to each codec: the lower bit denoting the codec
- * type, and the higher bit denoting session type.
- */
-static inline enum vidc_vote_data_session VIDC_VOTE_DATA_SESSION_VAL(
-		enum hal_video_codec c, enum hal_domain d) {
-	if (d != HAL_VIDEO_DOMAIN_ENCODER && d != HAL_VIDEO_DOMAIN_DECODER)
-		return VIDC_BUS_VOTE_DATA_SESSION_INVALID;
-
-	return (1 << ilog2(c) * 2) | ((d - 1) << (ilog2(c) * 2 + 1));
-}
-
 struct msm_vidc_gov_data {
 	struct vidc_bus_vote_data *data;
 	u32 data_count;
@@ -768,13 +691,6 @@
 	bool b_frames_enabled;
 };
 
-struct vidc_clk_scale_data {
-	enum vidc_vote_data_session session[VIDC_MAX_SESSIONS];
-	enum msm_vidc_power_mode power_mode[VIDC_MAX_SESSIONS];
-	u32 load[VIDC_MAX_SESSIONS];
-	int num_sessions;
-};
-
 struct hal_cmd_sys_get_property_packet {
 	u32 size;
 	u32 packet_type;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 438332d..49c0856 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -346,6 +346,8 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x036)
 #define  HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x037)
+#define  HFI_PROPERTY_PARAM_VENC_BITRATE_SAVINGS \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x038)
 
 #define HFI_PROPERTY_CONFIG_VENC_COMMON_START				\
 	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
@@ -444,17 +446,6 @@
 #define HFI_CAPABILITY_MAX_WORKROUTES			(HFI_COMMON_BASE + 0X31)
 #define HFI_CAPABILITY_CQ_QUALITY_LEVEL			(HFI_COMMON_BASE + 0X32)
 
-struct hfi_capability_supported {
-	u32 capability_type;
-	u32 min;
-	u32 max;
-	u32 step_size;
-};
-
-struct hfi_capability_supported_info {
-	u32 num_capabilities;
-	struct hfi_capability_supported rg_data[1];
-};
 
 #define HFI_DEBUG_MSG_LOW					0x00000001
 #define HFI_DEBUG_MSG_MEDIUM					0x00000002
@@ -732,20 +723,6 @@
 	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
 };
 
-struct hfi_codec_supported {
-	u32 decoder_codec_supported;
-	u32 encoder_codec_supported;
-};
-
-struct hfi_properties_supported {
-	u32 num_properties;
-	u32 rg_properties[1];
-};
-
-struct hfi_max_sessions_supported {
-	u32 max_sessions;
-};
-
 struct hfi_vpe_color_space_conversion {
 	u32 input_color_primaries;
 	u32 custom_matrix_enabled;
@@ -1097,8 +1074,8 @@
 	struct msm_vidc_content_light_level_sei_payload cll_info;
 };
 
-struct hfi_vbv_hdr_buf_size {
-	u32 vbv_hdr_buf_size;
+struct hfi_vbv_hrd_buf_size {
+	u32 vbv_hrd_buf_size;
 };
 
 #endif
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_io.h b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
index eb47f68..847c75f 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_io.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
@@ -12,6 +12,7 @@
 
 #define VIDC_CPU_BASE_OFFS			0x000A0000
 #define VIDEO_CC_BASE_OFFS			0x000F0000
+#define VIDC_AON_BASE_OFFS			0x000E0000
 #define VIDC_CPU_CS_BASE_OFFS		(VIDC_CPU_BASE_OFFS)
 #define VIDC_CPU_IC_BASE_OFFS		(VIDC_CPU_BASE_OFFS)
 
@@ -107,6 +108,8 @@
 #define VIDC_WRAPPER_CPU_CGC_DIS	(VIDC_WRAPPER_BASE_OFFS + 0x2010)
 #define VIDC_WRAPPER_CPU_STATUS	(VIDC_WRAPPER_BASE_OFFS + 0x2014)
 
+#define VIDC_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL	(VIDC_WRAPPER_BASE_OFFS + 0x54)
+#define VIDC_WRAPPER_DEBUG_BRIDGE_LPI_STATUS	(VIDC_WRAPPER_BASE_OFFS + 0x58)
 /*
  * --------------------------------------------------------------------------
  * MODULE: vidc_tz_wrapper
@@ -172,7 +175,7 @@
 
 /*
  * --------------------------------------------------------------------------
- * MODULE: vcodec noc error log registers
+ * MODULE: vcodec noc error log registers (iris1)
  * --------------------------------------------------------------------------
  */
 #define VCODEC_CORE0_VIDEO_NOC_BASE_OFFS		0x00004000
@@ -191,4 +194,27 @@
 #define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS	0x0538
 #define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS	0x053C
 
+#define VIDC_AON_WRAPPER_MVP_NOC_LPI_CONTROL	(VIDC_AON_BASE_OFFS)
+#define VIDC_AON_WRAPPER_MVP_NOC_LPI_STATUS	(VIDC_AON_BASE_OFFS + 0x4)
+
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: vcodec noc error log registers (iris2)
+ * --------------------------------------------------------------------------
+ */
+#define VCODEC_NOC_VIDEO_A_NOC_BASE_OFFS		0x00010000
+#define VCODEC_NOC_ERL_MAIN_SWID_LOW			0x00011200
+#define VCODEC_NOC_ERL_MAIN_SWID_HIGH			0x00011204
+#define VCODEC_NOC_ERL_MAIN_MAINCTL_LOW			0x00011208
+#define VCODEC_NOC_ERL_MAIN_ERRVLD_LOW			0x00011210
+#define VCODEC_NOC_ERL_MAIN_ERRCLR_LOW			0x00011218
+#define VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW			0x00011220
+#define VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH		0x00011224
+#define VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW			0x00011228
+#define VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH		0x0001122C
+#define VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW			0x00011230
+#define VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH		0x00011234
+#define VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW			0x00011238
+#define VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH		0x0001123C
+#
 #endif
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
index 3e73e9d..7c02504 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
@@ -41,25 +41,27 @@
 	node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
 	if (!node) {
 		mtk_v4l2_err("no mediatek,larb found");
-		return -1;
+		return -ENODEV;
 	}
 	pdev = of_find_device_by_node(node);
+	of_node_put(node);
 	if (!pdev) {
 		mtk_v4l2_err("no mediatek,larb device found");
-		return -1;
+		return -ENODEV;
 	}
 	pm->larbvenc = &pdev->dev;
 
 	node = of_parse_phandle(dev->of_node, "mediatek,larb", 1);
 	if (!node) {
 		mtk_v4l2_err("no mediatek,larb found");
-		return -1;
+		return -ENODEV;
 	}
 
 	pdev = of_find_device_by_node(node);
+	of_node_put(node);
 	if (!pdev) {
 		mtk_v4l2_err("no mediatek,larb device found");
-		return -1;
+		return -ENODEV;
 	}
 
 	pm->larbvenclt = &pdev->dev;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index bb6add9..5b8350e 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -264,6 +264,14 @@
 	if (ret)
 		return ret;
 
+	if (!dev->dma_parms) {
+		dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+					      GFP_KERNEL);
+		if (!dev->dma_parms)
+			return -ENOMEM;
+	}
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
 	INIT_LIST_HEAD(&core->instances);
 	mutex_init(&core->lock);
 	INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index f06003b..2a92e5a 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -865,8 +865,11 @@
 			"%s-vid-cap", dev->v4l2_dev.name);
 
 	if (IS_ERR(dev->kthread_vid_cap)) {
+		int err = PTR_ERR(dev->kthread_vid_cap);
+
+		dev->kthread_vid_cap = NULL;
 		v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
-		return PTR_ERR(dev->kthread_vid_cap);
+		return err;
 	}
 	*pstreaming = true;
 	vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index 9981e75..4885905 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -236,8 +236,11 @@
 			"%s-vid-out", dev->v4l2_dev.name);
 
 	if (IS_ERR(dev->kthread_vid_out)) {
+		int err = PTR_ERR(dev->kthread_vid_out);
+
+		dev->kthread_vid_out = NULL;
 		v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
-		return PTR_ERR(dev->kthread_vid_out);
+		return err;
 	}
 	*pstreaming = true;
 	vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 1599159..baa7c83 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -438,6 +438,8 @@
 		tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
 		break;
 	}
+	vfree(dev->bitmap_cap);
+	dev->bitmap_cap = NULL;
 	vivid_update_quality(dev);
 	tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
 	dev->crop_cap = dev->src_rect;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index be531ca..2079861 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -21,7 +21,7 @@
 	.type = V4L2_DV_BT_656_1120,
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
-	V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
+	V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
 		V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
 		V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
 		V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-common.c b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
index a7e7dfa..bfb4c6a 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-common.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
@@ -120,64 +120,12 @@
 /* 0: 87.5 - 108 MHz (USA, Europe)*/
 /* 1: 76   - 108 MHz (Japan wide band) */
 /* 2: 76   -  90 MHz (Japan) */
-static unsigned short band;
 
 /* De-emphasis */
 /* 0: 75 us (USA) */
 /* 1: 50 us (Europe, Australia, Japan) */
 static unsigned short de;
 
-static const struct v4l2_frequency_band bands[] = {
-	{
-		.type = V4L2_TUNER_RADIO,
-		.index = 0,
-		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
-			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
-			    V4L2_TUNER_CAP_FREQ_BANDS |
-			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
-			    V4L2_TUNER_CAP_HWSEEK_WRAP,
-		.rangelow   =  87500,
-		.rangehigh  = 108000,
-		.modulation = V4L2_BAND_MODULATION_FM,
-	},
-	{
-		.type = V4L2_TUNER_RADIO,
-		.index = 1,
-		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
-			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
-			    V4L2_TUNER_CAP_FREQ_BANDS |
-			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
-			    V4L2_TUNER_CAP_HWSEEK_WRAP,
-		.rangelow   =  76000,
-		.rangehigh  = 108000,
-		.modulation = V4L2_BAND_MODULATION_FM,
-	},
-	{
-		.type = V4L2_TUNER_RADIO,
-		.index = 2,
-		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
-			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
-			    V4L2_TUNER_CAP_FREQ_BANDS |
-			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
-			    V4L2_TUNER_CAP_HWSEEK_WRAP,
-		.rangelow   =  76000,
-		.rangehigh  =  91000,
-		.modulation = V4L2_BAND_MODULATION_FM,
-	},
-	{
-		.type = V4L2_TUNER_RADIO,
-		.index = 3,
-		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
-			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
-			    V4L2_TUNER_CAP_FREQ_BANDS |
-			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
-			    V4L2_TUNER_CAP_HWSEEK_WRAP,
-		.rangelow   =  64000,
-		.rangehigh  =  76000,
-		.modulation = V4L2_BAND_MODULATION_FM,
-	},
-};
-
 wait_queue_head_t rtc6226_wq;
 int rtc6226_wq_flag = NO_WAIT;
 #ifdef New_VolumeControl
@@ -753,8 +701,8 @@
 	u32 band_high_limit;
 	u8 spacing = 0;
 
-	band_low_limit = bands[radio->band].rangelow;
-	band_high_limit = bands[radio->band].rangehigh;
+	band_low_limit = radio->recv_conf.band_low_limit * TUNE_STEP_SIZE;
+	band_high_limit = radio->recv_conf.band_high_limit * TUNE_STEP_SIZE;
 
 	if (radio->space == 0)
 		spacing = CH_SPACING_200;
@@ -2197,6 +2145,8 @@
 {
 	struct rtc6226_device *radio = video_drvdata(file);
 	int retval = 0;
+	u16 bottom_freq;
+	u16 top_freq;
 
 	pr_info("%s entry\n", __func__);
 
@@ -2214,33 +2164,34 @@
 		radio->registers[MPXCFG] &= ~MPXCFG_CSR0_MONO; /* try stereo */
 		break;
 	default:
-		goto done;
+		pr_debug("%s audmode is not set\n", __func__);
 	}
 
 	retval = rtc6226_set_register(radio, MPXCFG);
 
-	pr_info("%s low:%d high:%d\n", __func__,
-		tuner->rangelow, tuner->rangehigh);
+	/*  unit is 10kHz */
+	top_freq = (u16)((tuner->rangehigh / TUNE_PARAM) / TUNE_STEP_SIZE);
+	bottom_freq = (u16)((tuner->rangelow / TUNE_PARAM) / TUNE_STEP_SIZE);
 
-	/* set band */
-	if (tuner->rangelow || tuner->rangehigh) {
-		for (band = 0; band < ARRAY_SIZE(bands); band++) {
-			if (bands[band].rangelow  == tuner->rangelow &&
-				bands[band].rangehigh == tuner->rangehigh)
-				break;
-		}
-		if (band == ARRAY_SIZE(bands)) {
-			pr_err("%s err\n", __func__);
-			band = 0;
-		}
-	} else
-		band = 0; /* If nothing is specified seek 87.5 - 108 Mhz */
+	pr_debug("%s low:%d high:%d\n", __func__,
+		bottom_freq, top_freq);
 
-	if (radio->band != band) {
-		radio->registers[CHANNEL] |= (band  << 12);
-		rtc6226_set_register(radio, MPXCFG);
-		radio->band = band;
-	}
+	radio->registers[RADIOSEEKCFG1] = top_freq;
+	radio->registers[RADIOSEEKCFG2] = bottom_freq;
+
+	retval = rtc6226_set_register(radio, RADIOSEEKCFG1);
+	if (retval < 0)
+		pr_err("In %s, error %d setting higher limit freq\n",
+			__func__, retval);
+	else
+		radio->recv_conf.band_high_limit = top_freq;
+
+	retval = rtc6226_set_register(radio, RADIOSEEKCFG2);
+	if (retval < 0)
+		pr_err("In %s, error %d setting lower limit freq\n",
+			__func__, retval);
+	else
+		radio->recv_conf.band_low_limit = bottom_freq;
 done:
 	pr_info("%s exit %d\n", __func__, retval);
 	return retval;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index ca68e1d..0f218af 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -707,7 +707,8 @@
 			 (dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0)
 	};
 
-	ir_lirc_scancode_event(dev, &sc);
+	if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
+		ir_lirc_scancode_event(dev, &sc);
 
 	spin_lock_irqsave(&dev->keylock, flags);
 
@@ -747,7 +748,8 @@
 		.keycode = keycode
 	};
 
-	ir_lirc_scancode_event(dev, &sc);
+	if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
+		ir_lirc_scancode_event(dev, &sc);
 
 	if (new_event && dev->keypressed)
 		ir_do_keyup(dev, false);
@@ -1954,6 +1956,8 @@
 	rc_free_rx_device(dev);
 
 	mutex_lock(&dev->lock);
+	if (dev->users && dev->close)
+		dev->close(dev);
 	dev->registered = false;
 	mutex_unlock(&dev->lock);
 
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index 024c751..2ad2dde 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -155,7 +155,6 @@
 				stream->props.u.bulk.buffersize,
 				usb_urb_complete, stream);
 
-		stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER;
 		stream->urbs_initialized++;
 	}
 	return 0;
@@ -186,7 +185,7 @@
 		urb->complete = usb_urb_complete;
 		urb->pipe = usb_rcvisocpipe(stream->udev,
 				stream->props.endpoint);
-		urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER;
+		urb->transfer_flags = URB_ISO_ASAP;
 		urb->interval = stream->props.u.isoc.interval;
 		urb->number_of_packets = stream->props.u.isoc.framesperurb;
 		urb->transfer_buffer_length = stream->props.u.isoc.framesize *
@@ -210,7 +209,7 @@
 	if (stream->state & USB_STATE_URB_BUF) {
 		while (stream->buf_num) {
 			stream->buf_num--;
-			stream->buf_list[stream->buf_num] = NULL;
+			kfree(stream->buf_list[stream->buf_num]);
 		}
 	}
 
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index d46dc43..6f1fd40 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1065,11 +1065,19 @@
 			return -EINVAL;
 		}
 
-		/* Make sure the terminal type MSB is not null, otherwise it
-		 * could be confused with a unit.
+		/*
+		 * Reject invalid terminal types that would cause issues:
+		 *
+		 * - The high byte must be non-zero, otherwise it would be
+		 *   confused with a unit.
+		 *
+		 * - Bit 15 must be 0, as we use it internally as a terminal
+		 *   direction flag.
+		 *
+		 * Other unknown types are accepted.
 		 */
 		type = get_unaligned_le16(&buffer[4]);
-		if ((type & 0xff00) == 0) {
+		if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
 			uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
 				"interface %d INPUT_TERMINAL %d has invalid "
 				"type 0x%04x, skipping\n", udev->devnum,
@@ -1824,11 +1832,7 @@
 	usb_put_intf(dev->intf);
 	usb_put_dev(dev->udev);
 
-	if (dev->vdev.dev)
-		v4l2_device_unregister(&dev->vdev);
 #ifdef CONFIG_MEDIA_CONTROLLER
-	if (media_devnode_is_registered(dev->mdev.devnode))
-		media_device_unregister(&dev->mdev);
 	media_device_cleanup(&dev->mdev);
 #endif
 
@@ -1885,6 +1889,15 @@
 
 		uvc_debugfs_cleanup_stream(stream);
 	}
+
+	uvc_status_unregister(dev);
+
+	if (dev->vdev.dev)
+		v4l2_device_unregister(&dev->vdev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+	if (media_devnode_is_registered(dev->mdev.devnode))
+		media_device_unregister(&dev->mdev);
+#endif
 }
 
 int uvc_register_video_device(struct uvc_device *dev,
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 0722dc6..883e4ca 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -54,7 +54,7 @@
 	return ret;
 }
 
-static void uvc_input_cleanup(struct uvc_device *dev)
+static void uvc_input_unregister(struct uvc_device *dev)
 {
 	if (dev->input)
 		input_unregister_device(dev->input);
@@ -71,7 +71,7 @@
 
 #else
 #define uvc_input_init(dev)
-#define uvc_input_cleanup(dev)
+#define uvc_input_unregister(dev)
 #define uvc_input_report_key(dev, code, value)
 #endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */
 
@@ -292,12 +292,16 @@
 	return 0;
 }
 
-void uvc_status_cleanup(struct uvc_device *dev)
+void uvc_status_unregister(struct uvc_device *dev)
 {
 	usb_kill_urb(dev->int_urb);
+	uvc_input_unregister(dev);
+}
+
+void uvc_status_cleanup(struct uvc_device *dev)
+{
 	usb_free_urb(dev->int_urb);
 	kfree(dev->status);
-	uvc_input_cleanup(dev);
 }
 
 int uvc_status_start(struct uvc_device *dev, gfp_t flags)
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index e5f5d84..a738486 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -750,6 +750,7 @@
 
 /* Status */
 int uvc_status_init(struct uvc_device *dev);
+void uvc_status_unregister(struct uvc_device *dev);
 void uvc_status_cleanup(struct uvc_device *dev);
 int uvc_status_start(struct uvc_device *dev, gfp_t flags);
 void uvc_status_stop(struct uvc_device *dev);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index d0ff403..931fff7 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -286,6 +286,7 @@
 	const struct v4l2_window *win;
 	const struct v4l2_sdr_format *sdr;
 	const struct v4l2_meta_format *meta;
+	u32 planes;
 	unsigned i;
 
 	pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -316,7 +317,8 @@
 			prt_names(mp->field, v4l2_field_names),
 			mp->colorspace, mp->num_planes, mp->flags,
 			mp->ycbcr_enc, mp->quantization, mp->xfer_func);
-		for (i = 0; i < mp->num_planes; i++)
+		planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
+		for (i = 0; i < planes; i++)
 			printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
 					mp->plane_fmt[i].bytesperline,
 					mp->plane_fmt[i].sizeimage);
@@ -1297,6 +1299,90 @@
 	case V4L2_META_FMT_VSP1_HGO:	descr = "R-Car VSP1 1-D Histogram"; break;
 	case V4L2_META_FMT_VSP1_HGT:	descr = "R-Car VSP1 2-D Histogram"; break;
 	case V4L2_META_FMT_UVC:		descr = "UVC payload header metadata"; break;
+	case V4L2_PIX_FMT_NV12_UBWC:
+					descr = "NV12 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS:
+					descr = "Y/CbCr 4:2:0 P10 Venus"; break;
+	case V4L2_PIX_FMT_NV12_TP10_UBWC:
+					descr = "Y/CbCr 4:2:0 TP10 UBWC"; break;
+	case V4L2_PIX_FMT_NV12_512:
+				descr = "Y/CbCr 4:2:0 (512 align)"; break;
+	case V4L2_PIX_FMT_NV12_P010_UBWC:
+					descr = "Y/CbCr 4:2:0 P010 UBWC"; break;
+	case V4L2_PIX_FMT_RGBA8888_UBWC:
+					descr = "RGBA8888 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_8888:
+					descr = "32-bit ABGR 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_8888:
+					descr = "32-bit RGBA 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_8888:
+					descr = "32-bit RGBX 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_8888:
+					descr = "32-bit XBGR 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_5551:
+					descr = "16-bit RGBA 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_1555:
+					descr = "16-bit ABGR 1-5-5-5"; break;
+	case V4L2_PIX_FMT_SDE_BGRA_5551:
+					descr = "16-bit BGRA 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_BGRX_5551:
+					descr = "16-bit BGRX 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_5551:
+					descr = "16-bit RGBX 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_1555:
+					descr = "16-bit XBGR 1-5-5-5"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_4444:
+					descr = "16-bit RGBA 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_BGRA_4444:
+					descr = "16-bit BGRA 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_4444:
+					descr = "16-bit ABGR 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_4444:
+					descr = "16-bit RGBX 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_BGRX_4444:
+					descr = "16-bit BGRX 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_4444:
+					descr = "16-bit XBGR 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_BGR_565:
+					descr = "16-bit BGR 5-6-5"; break;
+	case V4L2_PIX_FMT_SDE_Y_CR_CB_GH2V2:
+					descr = "Planar YVU 4:2:0 A16"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H1V2:
+					descr = "Y/CbCr 4:2:2"; break;
+	case V4L2_PIX_FMT_SDE_Y_CRCB_H1V2:
+					descr = "Y/CrCb 4:2:2"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_VENUS:
+					descr = "Y/CbCr 4:2:0 Venus"; break;
+	case V4L2_PIX_FMT_SDE_Y_CRCB_H2V2_VENUS:
+					descr = "Y/CrCb 4:2:0 Venus"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_8888_UBWC:
+					descr = "RGBX 8:8:8:8 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_RGB_565_UBWC:
+					descr = "RGB 5:6:5 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_1010102:
+					descr = "RGBA 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_1010102:
+					descr = "RGBX 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_ARGB_2101010:
+					descr = "ARGB 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_XRGB_2101010:
+					descr = "XRGB 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_BGRA_1010102:
+					descr = "BGRA 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_BGRX_1010102:
+					descr = "BGRX 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_2101010:
+					descr = "ABGR 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_2101010:
+					descr = "XBGR 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_1010102_UBWC:
+					descr = "RGBA 10:10:10:2 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_1010102_UBWC:
+					descr = "RGBX 10:10:10:2 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_TP10:
+					descr = "Y/CbCr 4:2:0 TP10"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010:
+					descr = "Y/CbCr 4:2:0 P10"; break;
 
 	default:
 		/* Compressed formats */
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 76382c8..1246d69 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -18,6 +18,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 
 #define DRIVER_NAME "memstick"
 
@@ -436,6 +437,7 @@
 	struct memstick_dev *card;
 
 	dev_dbg(&host->dev, "memstick_check started\n");
+	pm_runtime_get_noresume(host->dev.parent);
 	mutex_lock(&host->lock);
 	if (!host->card) {
 		if (memstick_power_on(host))
@@ -479,6 +481,7 @@
 		host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
 
 	mutex_unlock(&host->lock);
+	pm_runtime_put(host->dev.parent);
 	dev_dbg(&host->dev, "memstick_check finished\n");
 }
 
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 30d09d1..11ab17f 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -261,7 +261,7 @@
 	mutex_unlock(&ab8500->lock);
 	dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
 
-	return ret;
+	return (ret < 0) ? ret : 0;
 }
 
 static int ab8500_get_register(struct device *dev, u8 bank,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 0be511d..f8e0fa9 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -640,9 +640,9 @@
 
 static const struct mfd_cell axp223_cells[] = {
 	{
-		.name			= "axp221-pek",
-		.num_resources		= ARRAY_SIZE(axp22x_pek_resources),
-		.resources		= axp22x_pek_resources,
+		.name		= "axp221-pek",
+		.num_resources	= ARRAY_SIZE(axp22x_pek_resources),
+		.resources	= axp22x_pek_resources,
 	}, {
 		.name		= "axp22x-adc",
 		.of_compatible	= "x-powers,axp221-adc",
@@ -650,7 +650,7 @@
 		.name		= "axp20x-battery-power-supply",
 		.of_compatible	= "x-powers,axp221-battery-power-supply",
 	}, {
-		.name			= "axp20x-regulator",
+		.name		= "axp20x-regulator",
 	}, {
 		.name		= "axp20x-ac-power-supply",
 		.of_compatible	= "x-powers,axp221-ac-power-supply",
@@ -666,9 +666,9 @@
 
 static const struct mfd_cell axp152_cells[] = {
 	{
-		.name			= "axp20x-pek",
-		.num_resources		= ARRAY_SIZE(axp152_pek_resources),
-		.resources		= axp152_pek_resources,
+		.name		= "axp20x-pek",
+		.num_resources	= ARRAY_SIZE(axp152_pek_resources),
+		.resources	= axp152_pek_resources,
 	},
 };
 
@@ -697,87 +697,101 @@
 
 static const struct mfd_cell axp288_cells[] = {
 	{
-		.name = "axp288_adc",
-		.num_resources = ARRAY_SIZE(axp288_adc_resources),
-		.resources = axp288_adc_resources,
-	},
-	{
-		.name = "axp288_extcon",
-		.num_resources = ARRAY_SIZE(axp288_extcon_resources),
-		.resources = axp288_extcon_resources,
-	},
-	{
-		.name = "axp288_charger",
-		.num_resources = ARRAY_SIZE(axp288_charger_resources),
-		.resources = axp288_charger_resources,
-	},
-	{
-		.name = "axp288_fuel_gauge",
-		.num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
-		.resources = axp288_fuel_gauge_resources,
-	},
-	{
-		.name = "axp221-pek",
-		.num_resources = ARRAY_SIZE(axp288_power_button_resources),
-		.resources = axp288_power_button_resources,
-	},
-	{
-		.name = "axp288_pmic_acpi",
+		.name		= "axp288_adc",
+		.num_resources	= ARRAY_SIZE(axp288_adc_resources),
+		.resources	= axp288_adc_resources,
+	}, {
+		.name		= "axp288_extcon",
+		.num_resources	= ARRAY_SIZE(axp288_extcon_resources),
+		.resources	= axp288_extcon_resources,
+	}, {
+		.name		= "axp288_charger",
+		.num_resources	= ARRAY_SIZE(axp288_charger_resources),
+		.resources	= axp288_charger_resources,
+	}, {
+		.name		= "axp288_fuel_gauge",
+		.num_resources	= ARRAY_SIZE(axp288_fuel_gauge_resources),
+		.resources	= axp288_fuel_gauge_resources,
+	}, {
+		.name		= "axp221-pek",
+		.num_resources	= ARRAY_SIZE(axp288_power_button_resources),
+		.resources	= axp288_power_button_resources,
+	}, {
+		.name		= "axp288_pmic_acpi",
 	},
 };
 
 static const struct mfd_cell axp803_cells[] = {
 	{
-		.name			= "axp221-pek",
-		.num_resources		= ARRAY_SIZE(axp803_pek_resources),
-		.resources		= axp803_pek_resources,
+		.name		= "axp221-pek",
+		.num_resources	= ARRAY_SIZE(axp803_pek_resources),
+		.resources	= axp803_pek_resources,
+	}, {
+		.name		= "axp20x-gpio",
+		.of_compatible	= "x-powers,axp813-gpio",
+	}, {
+		.name		= "axp813-adc",
+		.of_compatible	= "x-powers,axp813-adc",
+	}, {
+		.name		= "axp20x-battery-power-supply",
+		.of_compatible	= "x-powers,axp813-battery-power-supply",
+	}, {
+		.name		= "axp20x-ac-power-supply",
+		.of_compatible	= "x-powers,axp813-ac-power-supply",
+		.num_resources	= ARRAY_SIZE(axp20x_ac_power_supply_resources),
+		.resources	= axp20x_ac_power_supply_resources,
 	},
-	{	.name			= "axp20x-regulator" },
+	{	.name		= "axp20x-regulator" },
 };
 
 static const struct mfd_cell axp806_self_working_cells[] = {
 	{
-		.name			= "axp221-pek",
-		.num_resources		= ARRAY_SIZE(axp806_pek_resources),
-		.resources		= axp806_pek_resources,
+		.name		= "axp221-pek",
+		.num_resources	= ARRAY_SIZE(axp806_pek_resources),
+		.resources	= axp806_pek_resources,
 	},
-	{	.name			= "axp20x-regulator" },
+	{	.name		= "axp20x-regulator" },
 };
 
 static const struct mfd_cell axp806_cells[] = {
 	{
-		.id			= 2,
-		.name			= "axp20x-regulator",
+		.id		= 2,
+		.name		= "axp20x-regulator",
 	},
 };
 
 static const struct mfd_cell axp809_cells[] = {
 	{
-		.name			= "axp221-pek",
-		.num_resources		= ARRAY_SIZE(axp809_pek_resources),
-		.resources		= axp809_pek_resources,
+		.name		= "axp221-pek",
+		.num_resources	= ARRAY_SIZE(axp809_pek_resources),
+		.resources	= axp809_pek_resources,
 	}, {
-		.id			= 1,
-		.name			= "axp20x-regulator",
+		.id		= 1,
+		.name		= "axp20x-regulator",
 	},
 };
 
 static const struct mfd_cell axp813_cells[] = {
 	{
-		.name			= "axp221-pek",
-		.num_resources		= ARRAY_SIZE(axp803_pek_resources),
-		.resources		= axp803_pek_resources,
+		.name		= "axp221-pek",
+		.num_resources	= ARRAY_SIZE(axp803_pek_resources),
+		.resources	= axp803_pek_resources,
 	}, {
-		.name			= "axp20x-regulator",
+		.name		= "axp20x-regulator",
 	}, {
-		.name			= "axp20x-gpio",
-		.of_compatible		= "x-powers,axp813-gpio",
+		.name		= "axp20x-gpio",
+		.of_compatible	= "x-powers,axp813-gpio",
 	}, {
-		.name			= "axp813-adc",
-		.of_compatible		= "x-powers,axp813-adc",
+		.name		= "axp813-adc",
+		.of_compatible	= "x-powers,axp813-adc",
 	}, {
 		.name		= "axp20x-battery-power-supply",
 		.of_compatible	= "x-powers,axp813-battery-power-supply",
+	}, {
+		.name		= "axp20x-ac-power-supply",
+		.of_compatible	= "x-powers,axp813-ac-power-supply",
+		.num_resources	= ARRAY_SIZE(axp20x_ac_power_supply_resources),
+		.resources	= axp20x_ac_power_supply_resources,
 	},
 };
 
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 503979c..fab3cdc 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -59,6 +59,7 @@
 };
 
 static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
+	regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
 	regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
 	regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
 	regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 6b22d54..bccde3e 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -499,6 +499,7 @@
 
 	cros_ec_debugfs_remove(ec);
 
+	mfd_remove_devices(ec->dev);
 	cdev_del(&ec->cdev);
 	device_unregister(&ec->class_dev);
 	return 0;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5970b8de..aec20e1 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2584,7 +2584,7 @@
 	.irq_unmask	= prcmu_irq_unmask,
 };
 
-static __init char *fw_project_name(u32 project)
+static char *fw_project_name(u32 project)
 {
 	switch (project) {
 	case PRCMU_FW_PROJECT_U8500:
@@ -2732,7 +2732,7 @@
 	INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
 }
 
-static void __init init_prcm_registers(void)
+static void init_prcm_registers(void)
 {
 	u32 val;
 
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index c63e331..234febf 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -274,7 +274,9 @@
 
 	mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
 
-	mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+	ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+	if (ret)
+		goto out;
 
 	adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
 	adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 77b64bd..ab24e17 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -329,8 +329,7 @@
 
 	default:
 		dev_err(&pdev->dev, "unsupported chip: %d\n", id);
-		ret = -ENODEV;
-		break;
+		return -ENODEV;
 	}
 
 	if (ret) {
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index f1da3aa..43af70a 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2014, 2017-2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2014-2015, 2017-2019, The Linux Foundation. All rights reserved. */
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -150,7 +150,12 @@
 		.of_match_table = pmic_spmi_id_table,
 	},
 };
-module_spmi_driver(pmic_spmi_driver);
+
+static int __init pmic_spmi_init(void)
+{
+	return spmi_driver_register(&pmic_spmi_driver);
+}
+arch_initcall(pmic_spmi_init);
 
 MODULE_DESCRIPTION("Qualcomm SPMI PMIC driver");
 MODULE_ALIAS("spmi:spmi-pmic");
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 52fafea..8d420c3 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -638,6 +638,10 @@
 		return -EFAULT;
 	}
 
+	writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
+	writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
+	writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
+
 	dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
 							fw_version[1],
 							fw_version[2]);
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 7a30546..fe8d335 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -264,8 +264,9 @@
 		cell->pdata_size = sizeof(tscadc);
 	}
 
-	err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
-			tscadc->used_cells, NULL, 0, NULL);
+	err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+			      tscadc->cells, tscadc->used_cells, NULL,
+			      0, NULL);
 	if (err < 0)
 		goto err_disable_clk;
 
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 910f569..8bcdecf 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -235,9 +235,9 @@
 
 	mutex_init(&tps->tps_lock);
 
-	ret = regmap_add_irq_chip(tps->regmap, tps->irq,
-			IRQF_ONESHOT, 0, &tps65218_irq_chip,
-			&tps->irq_data);
+	ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
+				       IRQF_ONESHOT, 0, &tps65218_irq_chip,
+				       &tps->irq_data);
 	if (ret < 0)
 		return ret;
 
@@ -253,26 +253,9 @@
 			      ARRAY_SIZE(tps65218_cells), NULL, 0,
 			      regmap_irq_get_domain(tps->irq_data));
 
-	if (ret < 0)
-		goto err_irq;
-
-	return 0;
-
-err_irq:
-	regmap_del_irq_chip(tps->irq, tps->irq_data);
-
 	return ret;
 }
 
-static int tps65218_remove(struct i2c_client *client)
-{
-	struct tps65218 *tps = i2c_get_clientdata(client);
-
-	regmap_del_irq_chip(tps->irq, tps->irq_data);
-
-	return 0;
-}
-
 static const struct i2c_device_id tps65218_id_table[] = {
 	{ "tps65218", TPS65218 },
 	{ },
@@ -285,7 +268,6 @@
 		.of_match_table = of_tps65218_match_table,
 	},
 	.probe		= tps65218_probe,
-	.remove		= tps65218_remove,
 	.id_table       = tps65218_id_table,
 };
 
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b893797..9c7925c 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -592,6 +592,29 @@
 	return 0;
 }
 
+static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
+{
+	struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+	if (tps6586x->client->irq)
+		disable_irq(tps6586x->client->irq);
+
+	return 0;
+}
+
+static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
+{
+	struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+	if (tps6586x->client->irq)
+		enable_irq(tps6586x->client->irq);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
+			 tps6586x_i2c_resume);
+
 static const struct i2c_device_id tps6586x_id_table[] = {
 	{ "tps6586x", 0 },
 	{ },
@@ -602,6 +625,7 @@
 	.driver	= {
 		.name	= "tps6586x",
 		.of_match_table = of_match_ptr(tps6586x_of_match),
+		.pm	= &tps6586x_pm_ops,
 	},
 	.probe		= tps6586x_i2c_probe,
 	.remove		= tps6586x_i2c_remove,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 4be3d23..299016b 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -979,7 +979,7 @@
  * letting it generate the right frequencies for USB, MADC, and
  * other purposes.
  */
-static inline int __init protect_pm_master(void)
+static inline int protect_pm_master(void)
 {
 	int e = 0;
 
@@ -988,7 +988,7 @@
 	return e;
 }
 
-static inline int __init unprotect_pm_master(void)
+static inline int unprotect_pm_master(void)
 {
 	int e = 0;
 
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 1ee68bd..16c6e2a 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -1618,6 +1618,7 @@
 	{ 0x00000ECD, 0x0000 },    /* R3789  - HPLPF4_2 */
 	{ 0x00000EE0, 0x0000 },    /* R3808  - ASRC_ENABLE */
 	{ 0x00000EE2, 0x0000 },    /* R3810  - ASRC_RATE1 */
+	{ 0x00000EE3, 0x4000 },    /* R3811  - ASRC_RATE2 */
 	{ 0x00000EF0, 0x0000 },    /* R3824  - ISRC 1 CTRL 1 */
 	{ 0x00000EF1, 0x0000 },    /* R3825  - ISRC 1 CTRL 2 */
 	{ 0x00000EF2, 0x0000 },    /* R3826  - ISRC 1 CTRL 3 */
@@ -2869,6 +2870,7 @@
 	case ARIZONA_ASRC_ENABLE:
 	case ARIZONA_ASRC_STATUS:
 	case ARIZONA_ASRC_RATE1:
+	case ARIZONA_ASRC_RATE2:
 	case ARIZONA_ISRC_1_CTRL_1:
 	case ARIZONA_ISRC_1_CTRL_2:
 	case ARIZONA_ISRC_1_CTRL_3:
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 68a1ac9..d382b13 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -13,7 +13,7 @@
 	  ones like at24c64, 24lc02 or fm24c04:
 
 	     24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
-	     24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
+	     24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
 
 	  Unless you like data loss puzzles, always be sure that any chip
 	  you configure as a 24c32 (32 kbit) or larger is NOT really a
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 7e50e1d..94836fc 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -173,6 +173,7 @@
 AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
 AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
 AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
+AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
 /* identical to 24c08 ? */
 AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
 
@@ -199,6 +200,7 @@
 	{ "24c256",	(kernel_ulong_t)&at24_data_24c256 },
 	{ "24c512",	(kernel_ulong_t)&at24_data_24c512 },
 	{ "24c1024",	(kernel_ulong_t)&at24_data_24c1024 },
+	{ "24c2048",    (kernel_ulong_t)&at24_data_24c2048 },
 	{ "at24",	0 },
 	{ /* END OF LIST */ }
 };
@@ -227,6 +229,7 @@
 	{ .compatible = "atmel,24c256",		.data = &at24_data_24c256 },
 	{ .compatible = "atmel,24c512",		.data = &at24_data_24c512 },
 	{ .compatible = "atmel,24c1024",	.data = &at24_data_24c1024 },
+	{ .compatible = "atmel,24c2048",	.data = &at24_data_24c2048 },
 	{ /* END OF LIST */ },
 };
 MODULE_DEVICE_TABLE(of, at24_of_match);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8679e0b..f4f8ab6 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -217,7 +217,7 @@
 void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
 			       dma_addr_t *dma_handle)
 {
-	if (get_order(size) > MAX_ORDER)
+	if (get_order(size) >= MAX_ORDER)
 		return NULL;
 
 	return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
diff --git a/drivers/misc/hdcp_qseecom.c b/drivers/misc/hdcp_qseecom.c
index 53947b5..6de29bc 100644
--- a/drivers/misc/hdcp_qseecom.c
+++ b/drivers/misc/hdcp_qseecom.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[hdcp-qseecom] %s: " fmt, __func__
@@ -1044,11 +1044,7 @@
 	}
 
 	rc = handle->tx_init(handle);
-	if (rc)
-		goto error;
 
-	if (!handle->legacy_app)
-		rc = hdcp2_app_start_auth(handle);
 error:
 	return rc;
 }
@@ -1188,6 +1184,7 @@
 	pr_err("failed, rc=%d\n", rc);
 	return rc;
 }
+EXPORT_SYMBOL(hdcp2_force_encryption);
 
 static int hdcp2_app_query_stream(struct hdcp2_handle *handle)
 {
@@ -1236,6 +1233,9 @@
 	case HDCP2_CMD_START:
 		rc = hdcp2_app_start(handle);
 		break;
+	case HDCP2_CMD_START_AUTH:
+		rc = hdcp2_app_start_auth(handle);
+		break;
 	case HDCP2_CMD_PROCESS_MSG:
 		rc = hdcp2_app_process_msg(handle);
 		break;
@@ -1268,6 +1268,7 @@
 error:
 	return rc;
 }
+EXPORT_SYMBOL(hdcp2_app_comm);
 
 static int hdcp2_open_stream_helper(struct hdcp2_handle *handle,
 		uint8_t vc_payload_id,
@@ -1322,6 +1323,7 @@
 	return hdcp2_open_stream_helper(handle, vc_payload_id, stream_number,
 		stream_id);
 }
+EXPORT_SYMBOL(hdcp2_open_stream);
 
 static int hdcp2_close_stream_helper(struct hdcp2_handle *handle,
 		uint32_t stream_id)
@@ -1368,6 +1370,7 @@
 
 	return hdcp2_close_stream_helper(handle, stream_id);
 }
+EXPORT_SYMBOL(hdcp2_close_stream);
 
 void *hdcp2_init(u32 device_type)
 {
@@ -1382,11 +1385,13 @@
 error:
 	return handle;
 }
+EXPORT_SYMBOL(hdcp2_init);
 
 void hdcp2_deinit(void *ctx)
 {
 	kzfree(ctx);
 }
+EXPORT_SYMBOL(hdcp2_deinit);
 
 void *hdcp1_init(void)
 {
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index b8aaa68..2ed23c9 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -820,21 +820,24 @@
  *
  * Return:
  *	0 - Success
+ *	Non-zero - Failure
  */
 static int ibmvmc_open(struct inode *inode, struct file *file)
 {
 	struct ibmvmc_file_session *session;
-	int rc = 0;
 
 	pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
 		 (unsigned long)inode, (unsigned long)file,
 		 ibmvmc.state);
 
 	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		return -ENOMEM;
+
 	session->file = file;
 	file->private_data = session;
 
-	return rc;
+	return 0;
 }
 
 /**
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index e4b10b2..bb1ee98 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
 #define MEI_DEV_ID_BXT_M      0x1A9A  /* Broxton M */
 #define MEI_DEV_ID_APL_I      0x5A9A  /* Apollo Lake I */
 
+#define MEI_DEV_ID_DNV_IE     0x19E5  /* Denverton IE */
+
 #define MEI_DEV_ID_GLK        0x319A  /* Gemini Lake */
 
 #define MEI_DEV_ID_KBP        0xA2BA  /* Kaby Point */
@@ -137,6 +139,8 @@
 #define MEI_DEV_ID_CNP_H      0xA360  /* Cannon Point H */
 #define MEI_DEV_ID_CNP_H_4    0xA364  /* Cannon Point H 4 (iTouch) */
 
+#define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
+
 /*
  * MEI HW Section
  */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index ea4e152..4299658 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -88,11 +88,13 @@
 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
 
 	{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
 
+	{MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
+
 	{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
 
 	{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
@@ -103,6 +105,8 @@
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
 
+	{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+
 	/* required last entry */
 	{0, }
 };
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 3633202..de7f035 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -563,6 +563,8 @@
 	int ret = -1;
 
 	if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
+		struct device *dev = get_device(&vdev->vdev.dev);
+
 		dev_dbg(&vpdev->dev,
 			"%s %d config_change %d type %d vdev %p\n",
 			__func__, __LINE__,
@@ -574,7 +576,7 @@
 		iowrite8(-1, &dc->h2c_vdev_db);
 		if (status & VIRTIO_CONFIG_S_DRIVER_OK)
 			wait_for_completion(&vdev->reset_done);
-		put_device(&vdev->vdev.dev);
+		put_device(dev);
 		iowrite8(1, &dc->guest_ack);
 		dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
 			__func__, __LINE__, ioread8(&dc->guest_ack));
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 57a6bb1..8f2c5d8 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -318,7 +318,7 @@
 		if (rc)
 			return rc;
 		ptr = (u32 *) &afu->name[i];
-		*ptr = val;
+		*ptr = le32_to_cpu((__force __le32) val);
 	}
 	afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
 	return 0;
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index 31695a0..646d164 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -566,7 +566,7 @@
 
 	mutex_lock(&spa->spa_lock);
 
-	pe->tid = tid;
+	pe->tid = cpu_to_be32(tid);
 
 	/*
 	 * The barrier makes sure the PE is updated
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 9758701..5ae4fa7 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -420,6 +420,29 @@
 }
 __setup("androidboot.keymaster=", get_qseecom_keymaster_status);
 
+
+#define QSEECOM_SCM_EBUSY_WAIT_MS 30
+#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
+
+static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
+{
+	int ret = 0;
+	int retry_count = 0;
+
+	do {
+		ret = scm_call2_noretry(smc_id, desc);
+		if (ret == -EBUSY) {
+			mutex_unlock(&app_access_lock);
+			msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
+			mutex_lock(&app_access_lock);
+		}
+		if (retry_count == 33)
+			pr_warn("secure world has been busy for 1 second!\n");
+	} while (ret == -EBUSY &&
+			(retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
+	return ret;
+}
+
 static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			const void *req_buf, void *resp_buf)
 {
@@ -447,7 +470,7 @@
 				svc_id, tz_cmd_id);
 			return -EINVAL;
 		}
-		ret = scm_call2(smc_id, &desc);
+		ret = __qseecom_scm_call2_locked(smc_id, &desc);
 		break;
 	}
 	case SCM_SVC_ES: {
@@ -470,7 +493,7 @@
 			desc.args[0] = p_hash_req->partition_id;
 			desc.args[1] = virt_to_phys(tzbuf);
 			desc.args[2] = SHA256_DIGEST_LENGTH;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -505,7 +528,7 @@
 				desc.args[2] = req_64bit->phy_addr;
 			}
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_APP_SHUTDOWN_COMMAND: {
@@ -515,7 +538,7 @@
 			smc_id = TZ_OS_APP_SHUTDOWN_ID;
 			desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
 			desc.args[0] = req->app_id;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_APP_LOOKUP_COMMAND: {
@@ -534,7 +557,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = strlen(req->app_name);
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -558,7 +581,7 @@
 				desc.args[1] = req_64bit->size;
 			}
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
@@ -582,14 +605,14 @@
 				desc.args[2] = req_64bit->phy_addr;
 			}
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
 			smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
 			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_REGISTER_LISTENER: {
@@ -614,12 +637,12 @@
 			}
 			qseecom.smcinvoke_support = true;
 			smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			if (ret == -EIO) {
 				/* smcinvoke is not supported */
 				qseecom.smcinvoke_support = false;
 				smc_id = TZ_OS_REGISTER_LISTENER_ID;
-				ret = scm_call2(smc_id, &desc);
+				ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			}
 			break;
 		}
@@ -631,7 +654,7 @@
 			smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
 			desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
 			desc.args[0] = req->listener_id;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_LISTENER_DATA_RSP_COMMAND: {
@@ -644,7 +667,7 @@
 				TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
 			desc.args[0] = req->listener_id;
 			desc.args[1] = req->status;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
@@ -672,7 +695,7 @@
 				desc.args[2] = req_64->sglistinfo_ptr;
 				desc.args[3] = req_64->sglistinfo_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
@@ -694,14 +717,14 @@
 				desc.args[2] = req_64bit->phy_addr;
 			}
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
 			smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
 			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 			}
 
@@ -729,7 +752,7 @@
 				desc.args[3] = req_64bit->rsp_ptr;
 				desc.args[4] = req_64bit->rsp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
@@ -761,7 +784,7 @@
 				desc.args[5] = req_64bit->sglistinfo_ptr;
 				desc.args[6] = req_64bit->sglistinfo_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
@@ -773,21 +796,21 @@
 			desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
 			desc.args[0] = req->key_type;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_RPMB_ERASE_COMMAND: {
 			smc_id = TZ_OS_RPMB_ERASE_ID;
 			desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
 			smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
 			desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_GENERATE_KEY: {
@@ -808,7 +831,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -830,7 +853,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -852,7 +875,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -874,7 +897,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -900,7 +923,7 @@
 				desc.args[3] = req_64bit->resp_ptr;
 				desc.args[4] = req_64bit->resp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
@@ -930,7 +953,7 @@
 				desc.args[5] = req_64bit->sglistinfo_ptr;
 				desc.args[6] = req_64bit->sglistinfo_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_INVOKE_COMMAND: {
@@ -955,7 +978,7 @@
 				desc.args[3] = req_64bit->resp_ptr;
 				desc.args[4] = req_64bit->resp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
@@ -985,7 +1008,7 @@
 				desc.args[5] = req_64bit->sglistinfo_ptr;
 				desc.args[6] = req_64bit->sglistinfo_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_CLOSE_SESSION: {
@@ -1010,7 +1033,7 @@
 				desc.args[3] = req_64bit->resp_ptr;
 				desc.args[4] = req_64bit->resp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_REQUEST_CANCELLATION: {
@@ -1036,7 +1059,7 @@
 				desc.args[3] = req_64bit->resp_ptr;
 				desc.args[4] = req_64bit->resp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
@@ -1051,7 +1074,7 @@
 			desc.arginfo =
 				TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
 			desc.args[0] = req->app_or_session_id;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		default: {
@@ -2084,7 +2107,7 @@
 	}
 
 	/* find app_id & img_name from list */
-	if (!ptr_app) {
+	if (!ptr_app && data->client.app_arch != ELFCLASSNONE) {
 		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
 		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
 							list) {
@@ -6475,7 +6498,7 @@
 		if (ret)
 			break;
 
-		ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
+		ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
 
 		__qseecom_disable_clk(CLK_QSEE);
 
@@ -8668,8 +8691,10 @@
 
 	desc.args[0] = FEATURE_ID_WHITELIST;
 	desc.arginfo = SCM_ARGS(1);
-	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
+	mutex_lock(&app_access_lock);
+	ret = __qseecom_scm_call2_locked(SCM_SIP_FNID(SCM_SVC_INFO,
 		GET_FEAT_VERSION_CMD), &desc);
+	mutex_unlock(&app_access_lock);
 	if (!ret)
 		version = desc.ret[0];
 
@@ -8752,8 +8777,10 @@
 	qseecom.send_resp_flag = 0;
 
 	qseecom.qsee_version = QSEEE_VERSION_00;
+	mutex_lock(&app_access_lock);
 	rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
 		&resp, sizeof(resp));
+	mutex_unlock(&app_access_lock);
 	pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
 	if (rc) {
 		pr_err("Failed to get QSEE version info %d\n", rc);
@@ -8906,9 +8933,11 @@
 				rc = -EIO;
 				goto exit_deinit_clock;
 			}
+			mutex_lock(&app_access_lock);
 			rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
 					cmd_buf, cmd_len,
 					&resp, sizeof(resp));
+			mutex_unlock(&app_access_lock);
 			__qseecom_disable_clk(CLK_QSEE);
 			if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
 				pr_err("send secapp reg fail %d resp.res %d\n",
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 6c3591c..a3c6c77 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -61,7 +61,7 @@
 	int tries;
 	long timeout;
 
-	if (WARN_ON(index > func->num_templates))
+	if (WARN_ON(index >= func->num_templates))
 		return -EINVAL;
 
 	command = readl(syscfg->base + SYS_CFGCTRL);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index f37625f..bf4a3e2 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2158,7 +2158,7 @@
 		if (waiting)
 			wake_up(&mq->wait);
 		else
-			kblockd_schedule_work(&mq->complete_work);
+			queue_work(mq->card->complete_wq, &mq->complete_work);
 
 		return;
 	}
@@ -2972,6 +2972,13 @@
 
 	mmc_fixup_device(card, mmc_blk_fixups);
 
+	card->complete_wq = alloc_workqueue("mmc_complete",
+					WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+	if (unlikely(!card->complete_wq)) {
+		pr_err("Failed to create mmc completion workqueue");
+		return -ENOMEM;
+	}
+
 	md = mmc_blk_alloc(card);
 	if (IS_ERR(md))
 		return PTR_ERR(md);
@@ -3042,6 +3049,7 @@
 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	mmc_set_bus_resume_policy(card->host, 0);
 #endif
+	destroy_workqueue(card->complete_wq);
 }
 
 static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f50e922..ea76572 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -97,7 +97,7 @@
 	if (!data)
 		return;
 
-	if (cmd->error || data->error ||
+	if ((cmd && cmd->error) || data->error ||
 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
 		return;
 
@@ -543,6 +543,7 @@
 int mmc_init_clk_scaling(struct mmc_host *host)
 {
 	int err;
+	struct devfreq *devfreq;
 
 	if (!host || !host->card) {
 		pr_err("%s: unexpected host/card parameters\n",
@@ -593,22 +594,34 @@
 		return err;
 	}
 
+	dev_pm_opp_add(mmc_classdev(host),
+		host->clk_scaling.devfreq_profile.freq_table[0], 0);
+	dev_pm_opp_add(mmc_classdev(host),
+		host->clk_scaling.devfreq_profile.freq_table[1], 0);
+
 	pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
 		mmc_hostname(host),
 		host->clk_scaling.ondemand_gov_data.upthreshold,
 		host->clk_scaling.ondemand_gov_data.downdifferential,
 		host->clk_scaling.devfreq_profile.polling_ms);
-	host->clk_scaling.devfreq = devfreq_add_device(
+
+	devfreq = devfreq_add_device(
 		mmc_classdev(host),
 		&host->clk_scaling.devfreq_profile,
 		"simple_ondemand",
 		&host->clk_scaling.ondemand_gov_data);
-	if (!host->clk_scaling.devfreq) {
+
+	if (IS_ERR(devfreq)) {
 		pr_err("%s: unable to register with devfreq\n",
 			mmc_hostname(host));
-		return -EPERM;
+		dev_pm_opp_remove(mmc_classdev(host),
+			host->clk_scaling.devfreq_profile.freq_table[0]);
+		dev_pm_opp_remove(mmc_classdev(host),
+			host->clk_scaling.devfreq_profile.freq_table[1]);
+		return PTR_ERR(devfreq);
 	}
 
+	host->clk_scaling.devfreq = devfreq;
 	pr_debug("%s: clk scaling is enabled for device %s (%pK) with devfreq %pK (clock = %uHz)\n",
 		mmc_hostname(host),
 		dev_name(mmc_classdev(host)),
@@ -765,6 +778,11 @@
 		return err;
 	}
 
+	dev_pm_opp_remove(mmc_classdev(host),
+		host->clk_scaling.devfreq_profile.freq_table[0]);
+	dev_pm_opp_remove(mmc_classdev(host),
+		host->clk_scaling.devfreq_profile.freq_table[1]);
+
 	kfree(host->clk_scaling.devfreq_profile.freq_table);
 
 	host->clk_scaling.devfreq = NULL;
@@ -1787,12 +1805,13 @@
 	unsigned long flags;
 	int retry_cnt = delay_ms/10;
 	bool pm = false;
+	struct task_struct *task = current;
 
 	do {
 		spin_lock_irqsave(&host->lock, flags);
-		if (!host->claimed || host->claimer->task == current) {
+		if (!host->claimed || mmc_ctx_matches(host, NULL, task)) {
 			host->claimed = 1;
-			host->claimer->task = current;
+			mmc_ctx_set_claimer(host, NULL, task);
 			host->claim_cnt += 1;
 			claimed_host = 1;
 			if (host->claim_cnt == 1)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 00cdf6d..8cc2aac 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -32,6 +32,7 @@
 #include "pwrseq.h"
 
 #define DEFAULT_CMD6_TIMEOUT_MS	500
+#define MIN_CACHE_EN_TIMEOUT_MS 1600
 
 static const unsigned int tran_exp[] = {
 	10000,		100000,		1000000,	10000000,
@@ -547,8 +548,7 @@
 			card->cid.year += 16;
 
 		/* check whether the eMMC card supports BKOPS */
-		if (!mmc_card_broken_hpi(card) &&
-		    (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
+		if ((ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
 				card->ext_csd.hpi) {
 			card->ext_csd.bkops = 1;
 			card->ext_csd.man_bkops_en =
@@ -2146,22 +2146,30 @@
 		if (err) {
 			pr_warn("%s: Enabling HPI failed\n",
 				mmc_hostname(card->host));
+			card->ext_csd.hpi_en = 0;
 			err = 0;
-		} else
+		} else {
 			card->ext_csd.hpi_en = 1;
+		}
 	}
 
 	/*
-	 * If cache size is higher than 0, this indicates
-	 * the existence of cache and it can be turned on.
+	 * If cache size is higher than 0, this indicates the existence of cache
+	 * and it can be turned on. Note that some eMMCs from Micron has been
+	 * reported to need ~800 ms timeout, while enabling the cache after
+	 * sudden power failure tests. Let's extend the timeout to a minimum of
+	 * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
 	 * If HPI is not supported then cache shouldn't be enabled.
 	 */
-	if (!mmc_card_broken_hpi(card) && card->ext_csd.cache_size > 0) {
+	if (card->ext_csd.cache_size > 0) {
 		if (card->ext_csd.hpi_en &&
 			(!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
+			unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
+
+			timeout_ms = max(card->ext_csd.generic_cmd6_time,
+					 timeout_ms);
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-					EXT_CSD_CACHE_CTRL, 1,
-					card->ext_csd.generic_cmd6_time);
+					EXT_CSD_CACHE_CTRL, 1, timeout_ms);
 			if (err && err != -EBADMSG) {
 				pr_err("%s: %s: fail on CACHE_CTRL ON %d\n",
 					mmc_hostname(host), __func__, err);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 6ef491b..6419517 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -817,14 +817,6 @@
 		ocr |= SD_OCR_CCS;
 
 	/*
-	 * If the host supports one of UHS-I modes, request the card
-	 * to switch to 1.8V signaling level. If the card has failed
-	 * repeatedly to switch however, skip this.
-	 */
-	if (retries && mmc_host_uhs(host))
-		ocr |= SD_OCR_S18R;
-
-	/*
 	 * If the host can supply more than 150mA at current voltage,
 	 * XPC should be set to 1.
 	 */
@@ -1197,8 +1189,6 @@
 		return;
 	}
 
-	mmc_get_card(host->card, NULL);
-
 	/*
 	 * Just check if our card has been removed.
 	 */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index be53044..fbc56ee 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1954,13 +1954,14 @@
 			}
 
 			atmci_request_end(host, host->mrq);
-			state = STATE_IDLE;
+			goto unlock; /* atmci_request_end() sets host->state */
 			break;
 		}
 	} while (state != prev_state);
 
 	host->state = state;
 
+unlock:
 	spin_unlock(&host->lock);
 }
 
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 768972a..5301302 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -286,6 +286,7 @@
 
 	if (host->dma_chan)
 		dmaengine_terminate_sync(host->dma_chan);
+	host->dma_chan = NULL;
 	bcm2835_reset_internal(host);
 }
 
@@ -772,6 +773,8 @@
 
 		if (!(sdhsts & SDHSTS_CRC7_ERROR) ||
 		    (host->cmd->opcode != MMC_SEND_OP_COND)) {
+			u32 edm, fsm;
+
 			if (sdhsts & SDHSTS_CMD_TIME_OUT) {
 				host->cmd->error = -ETIMEDOUT;
 			} else {
@@ -780,6 +783,13 @@
 				bcm2835_dumpregs(host);
 				host->cmd->error = -EILSEQ;
 			}
+			edm = readl(host->ioaddr + SDEDM);
+			fsm = edm & SDEDM_FSM_MASK;
+			if (fsm == SDEDM_FSM_READWAIT ||
+			    fsm == SDEDM_FSM_WRITESTART1)
+				/* Kick the FSM out of its wait */
+				writel(edm | SDEDM_FORCE_DATA_MODE,
+				       host->ioaddr + SDEDM);
 			bcm2835_finish_request(host);
 			return;
 		}
@@ -837,6 +847,8 @@
 		dev_err(dev, "timeout waiting for hardware interrupt.\n");
 		bcm2835_dumpregs(host);
 
+		bcm2835_reset(host->mmc);
+
 		if (host->data) {
 			host->data->error = -ETIMEDOUT;
 			bcm2835_finish_data(host);
@@ -1427,6 +1439,8 @@
 
 err:
 	dev_dbg(dev, "%s -> err %d\n", __func__, ret);
+	if (host->dma_chan_rxtx)
+		dma_release_channel(host->dma_chan_rxtx);
 	mmc_free_host(mmc);
 
 	return ret;
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 159270e..a8af682 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -201,7 +201,7 @@
 	cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 
 	cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
-		(cq_host->num_slots - 1);
+		cq_host->mmc->cqe_qdepth;
 
 	pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 		 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
@@ -217,12 +217,21 @@
 						 cq_host->desc_size,
 						 &cq_host->desc_dma_base,
 						 GFP_KERNEL);
+	if (!cq_host->desc_base)
+		return -ENOMEM;
+
 	cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 					      cq_host->data_size,
 					      &cq_host->trans_desc_dma_base,
 					      GFP_KERNEL);
-	if (!cq_host->desc_base || !cq_host->trans_desc_base)
+	if (!cq_host->trans_desc_base) {
+		dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
+				   cq_host->desc_base,
+				   cq_host->desc_dma_base);
+		cq_host->desc_base = NULL;
+		cq_host->desc_dma_base = 0;
 		return -ENOMEM;
+	}
 
 	pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 		 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index 54c3fbb..db56d4f 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -1,11 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2018 Mellanox Technologies.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
  */
 
 #include <linux/bitfield.h>
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 993386c9..864338e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -983,17 +983,17 @@
 	if (!pdata->read_only_active_low)
 		mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 
-	if (gpio_is_valid(pdata->gpio_card_detect)) {
-		ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
-		if (ret)
-			return ret;
-	}
+	/*
+	 * Get optional card detect and write protect GPIOs,
+	 * only back out on probe deferral.
+	 */
+	ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+	if (ret == -EPROBE_DEFER)
+		return ret;
 
-	if (gpio_is_valid(pdata->gpio_read_only)) {
-		ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only);
-		if (ret)
-			return ret;
-	}
+	ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+	if (ret == -EPROBE_DEFER)
+		return ret;
 
 	return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power,
 			"MMC read only", true, pdata->power_active_low);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c201c37..ddd98cd 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -174,6 +174,8 @@
 	struct sd_emmc_desc *descs;
 	dma_addr_t descs_dma_addr;
 
+	int irq;
+
 	bool vqmmc_enabled;
 };
 
@@ -1181,7 +1183,7 @@
 	struct resource *res;
 	struct meson_host *host;
 	struct mmc_host *mmc;
-	int ret, irq;
+	int ret;
 
 	mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
 	if (!mmc)
@@ -1228,8 +1230,8 @@
 		goto free_host;
 	}
 
-	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0) {
+	host->irq = platform_get_irq(pdev, 0);
+	if (host->irq <= 0) {
 		dev_err(&pdev->dev, "failed to get interrupt resource.\n");
 		ret = -EINVAL;
 		goto free_host;
@@ -1283,9 +1285,9 @@
 	writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
 	       host->regs + SD_EMMC_IRQ_EN);
 
-	ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
-					meson_mmc_irq_thread, IRQF_SHARED,
-					NULL, host);
+	ret = request_threaded_irq(host->irq, meson_mmc_irq,
+				   meson_mmc_irq_thread, IRQF_SHARED,
+				   dev_name(&pdev->dev), host);
 	if (ret)
 		goto err_init_clk;
 
@@ -1303,7 +1305,7 @@
 	if (host->bounce_buf == NULL) {
 		dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
 		ret = -ENOMEM;
-		goto err_init_clk;
+		goto err_free_irq;
 	}
 
 	host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
@@ -1322,6 +1324,8 @@
 err_bounce_buf:
 	dma_free_coherent(host->dev, host->bounce_buf_size,
 			  host->bounce_buf, host->bounce_dma_addr);
+err_free_irq:
+	free_irq(host->irq, host);
 err_init_clk:
 	clk_disable_unprepare(host->mmc_clk);
 err_core_clk:
@@ -1339,6 +1343,7 @@
 
 	/* disable interrupts */
 	writel(0, host->regs + SD_EMMC_IRQ_EN);
+	free_irq(host->irq, host);
 
 	dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
 			  host->descs, host->descs_dma_addr);
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 2cfec33..9841b44 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -596,6 +596,9 @@
 	init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
 				   "%s#fixed_factor",
 				   dev_name(host->controller_dev));
+	if (!init.name)
+		return -ENOMEM;
+
 	init.ops = &clk_fixed_factor_ops;
 	init.flags = 0;
 	init.parent_names = &clk_fixed_factor_parent;
@@ -612,6 +615,9 @@
 	clk_div_parent = __clk_get_name(host->fixed_factor_clk);
 	init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
 				   "%s#div", dev_name(host->controller_dev));
+	if (!init.name)
+		return -ENOMEM;
+
 	init.ops = &clk_divider_ops;
 	init.flags = CLK_SET_RATE_PARENT;
 	init.parent_names = &clk_div_parent;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 476e53d..67f6bd2 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1447,6 +1447,7 @@
 		mmc->caps &= ~MMC_CAP_NEEDS_POLL;
 		mmc_gpiod_request_cd_irq(mmc);
 	}
+	mmc_detect_change(mmc, 0);
 
 	if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
 		has_ro = true;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 0484138..f171cce 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -784,7 +784,7 @@
 
 	if (timing == MMC_TIMING_MMC_HS400 &&
 	    host->dev_comp->hs400_tune)
-		sdr_set_field(host->base + PAD_CMD_TUNE,
+		sdr_set_field(host->base + tune_reg,
 			      MSDC_PAD_TUNE_CMDRRDLY,
 			      host->hs400_cmd_int_delay);
 	dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 68760d4..b23c57e 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2066,7 +2066,6 @@
 	mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
 	mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
 	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
-	mmc->max_seg_size = mmc->max_req_size;
 
 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
 		     MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
@@ -2096,6 +2095,17 @@
 		goto err_irq;
 	}
 
+	/*
+	 * Limit the maximum segment size to the lower of the request size
+	 * and the DMA engine device segment size limits.  In reality, with
+	 * 32-bit transfers, the DMA engine can do longer segments than this
+	 * but there is no way to represent that in the DMA model - if we
+	 * increase this figure here, we get warnings from the DMA API debug.
+	 */
+	mmc->max_seg_size = min3(mmc->max_req_size,
+			dma_get_max_seg_size(host->rx_chan->device->dev),
+			dma_get_max_seg_size(host->tx_chan->device->dev));
+
 	/* Request IRQ for MMC operations */
 	ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
 			mmc_hostname(mmc), host);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 5389c48..c3d63ed 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -68,6 +68,7 @@
 	.scc_offset	= 0x0300,
 	.taps		= rcar_gen2_scc_taps,
 	.taps_num	= ARRAY_SIZE(rcar_gen2_scc_taps),
+	.max_blk_count  = 0xffffffff,
 };
 
 /* Definitions for sampling clocks */
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index f44e490..753973d 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1097,11 +1097,12 @@
 		writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
 			| ESDHC_BURST_LEN_EN_INCR,
 			host->ioaddr + SDHCI_HOST_CONTROL);
+
 		/*
-		* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
-		* TO1.1, it's harmless for MX6SL
-		*/
-		writel(readl(host->ioaddr + 0x6c) | BIT(7),
+		 * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+		 * TO1.1, it's harmless for MX6SL
+		 */
+		writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
 			host->ioaddr + 0x6c);
 
 		/* disable DLL_CTRL delay line settings */
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index d0e83db..94eeed2 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -279,7 +279,10 @@
 
 	iproc_host->data = iproc_data;
 
-	mmc_of_parse(host->mmc);
+	ret = mmc_of_parse(host->mmc);
+	if (ret)
+		goto err;
+
 	sdhci_get_of_property(pdev);
 
 	host->mmc->caps |= iproc_host->data->mmc_caps;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index a82a8cb..495cab4 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1170,6 +1170,29 @@
 			drv_type);
 }
 
+static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u32 config, oldconfig = readl_relaxed(host->ioaddr +
+					      msm_host_offset->CORE_DLL_CONFIG);
+
+	config = oldconfig;
+	if (enable) {
+		config |= CORE_CDR_EN;
+		config &= ~CORE_CDR_EXT_EN;
+	} else {
+		config &= ~CORE_CDR_EN;
+		config |= CORE_CDR_EXT_EN;
+	}
+
+	if (config != oldconfig)
+		writel_relaxed(config, host->ioaddr +
+			       msm_host_offset->CORE_DLL_CONFIG);
+}
+
 int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
 {
 	unsigned long flags;
@@ -1195,8 +1218,14 @@
 	if (host->clock <= CORE_FREQ_100MHZ ||
 		!((ios.timing == MMC_TIMING_MMC_HS400) ||
 		(ios.timing == MMC_TIMING_MMC_HS200) ||
-		(ios.timing == MMC_TIMING_UHS_SDR104)))
+		(ios.timing == MMC_TIMING_UHS_SDR104))) {
+		msm_host->use_cdr = false;
+		sdhci_msm_set_cdr(host, false);
 		return 0;
+	}
+
+	/* Clock-Data-Recovery used to dynamically adjust RX sampling point */
+	msm_host->use_cdr = true;
 
 	/*
 	 * Don't allow re-tuning for CRC errors observed for any commands
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 902edd4..5c1c961 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -252,6 +252,8 @@
 	bool use_7nm_dll;
 	int soc_min_rev;
 	struct workqueue_struct *pm_qos_wq;
+	bool use_cdr;
+	u32 transfer_mode;
 };
 
 extern char *saved_command_line;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 9cb7554..a7bf851 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -526,8 +526,12 @@
 	/* Wait max 20 ms */
 	timeout = ktime_add_ms(ktime_get(), 20);
 	val = ESDHC_CLOCK_STABLE;
-	while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
-		if (ktime_after(ktime_get(), timeout)) {
+	while  (1) {
+		bool timedout = ktime_after(ktime_get(), timeout);
+
+		if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
+			break;
+		if (timedout) {
 			pr_err("%s: Internal clock never stabilised.\n",
 				mmc_hostname(host->mmc));
 			break;
@@ -591,8 +595,12 @@
 
 	/* Wait max 20 ms */
 	timeout = ktime_add_ms(ktime_get(), 20);
-	while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) {
-		if (ktime_after(ktime_get(), timeout)) {
+	while (1) {
+		bool timedout = ktime_after(ktime_get(), timeout);
+
+		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
+			break;
+		if (timedout) {
 			pr_err("%s: Internal clock never stabilised.\n",
 				mmc_hostname(host->mmc));
 			return;
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index d264391..d02f5cf 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -220,8 +220,12 @@
 
 	/* wait 1ms */
 	timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
-	while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) {
-		if (WARN_ON(ktime_after(ktime_get(), timeout)))
+	while (1) {
+		bool timedout = ktime_after(ktime_get(), timeout);
+
+		if (sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)
+			break;
+		if (WARN_ON(timedout))
 			return;
 		usleep_range(5, 10);
 	}
@@ -653,8 +657,12 @@
 
 	/* wait 1ms */
 	timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
-	while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) {
-		if (WARN_ON(ktime_after(ktime_get(), timeout)))
+	while (1) {
+		bool timedout = ktime_after(ktime_get(), timeout);
+
+		if (sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)
+			break;
+		if (WARN_ON(timedout))
 			return;
 		usleep_range(5, 10);
 	}
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index c335052..caccedc 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -357,9 +357,13 @@
 
 	/* Wait max 32 ms */
 	timeout = ktime_add_ms(ktime_get(), 32);
-	while (!(sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) &
-		XENON_DLL_LOCK_STATE)) {
-		if (ktime_after(ktime_get(), timeout)) {
+	while (1) {
+		bool timedout = ktime_after(ktime_get(), timeout);
+
+		if (sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) &
+		    XENON_DLL_LOCK_STATE)
+			break;
+		if (timedout) {
 			dev_err(mmc_dev(host->mmc), "Wait for DLL Lock time-out\n");
 			return -ETIMEDOUT;
 		}
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 4d0791f..a0b5089 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -34,9 +34,13 @@
 	sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL);
 	/* Wait max 20 ms */
 	timeout = ktime_add_ms(ktime_get(), 20);
-	while (!((reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
-			& SDHCI_CLOCK_INT_STABLE)) {
-		if (ktime_after(ktime_get(), timeout)) {
+	while (1) {
+		bool timedout = ktime_after(ktime_get(), timeout);
+
+		reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+		if (reg & SDHCI_CLOCK_INT_STABLE)
+			break;
+		if (timedout) {
 			dev_err(mmc_dev(host->mmc), "Internal clock never stabilised.\n");
 			return -ETIMEDOUT;
 		}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 1eb37e1..ce02455 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -4764,7 +4764,7 @@
 	mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
 		(host->flags & SDHCI_USE_ADMA) ?
 		((host->flags & SDHCI_USE_64_BIT_DMA) ?
-		"64-bit ADMA" : "32-bit ADMA") :
+		"64-bit ADMA" : "32-bit ADMA") : "",
 		((host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"));
 
 	sdhci_enable_card_detection(host);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 568349e..c458418 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1394,6 +1394,21 @@
 	if (ret)
 		goto error_free_dma;
 
+	/*
+	 * If we don't support delay chains in the SoC, we can't use any
+	 * of the higher speed modes. Mask them out in case the device
+	 * tree specifies the properties for them, which gets added to
+	 * the caps by mmc_of_parse() above.
+	 */
+	if (!(host->cfg->clk_delays || host->use_new_timings)) {
+		mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
+			       MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
+		mmc->caps2 &= ~MMC_CAP2_HS200;
+	}
+
+	/* TODO: This driver doesn't support HS400 mode yet */
+	mmc->caps2 &= ~MMC_CAP2_HS400;
+
 	ret = sunxi_mmc_init_host(host);
 	if (ret)
 		goto error_free_dma;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 5d141f7..7c40a7e 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -279,6 +279,11 @@
 	iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
 }
 
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+	iowrite32(val, host->ctl + (addr << host->bus_shift));
+}
+
 static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
 				       const u32 *buf, int count)
 {
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 261b4d6..7d13ca9 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -46,6 +46,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/mmc/sdio.h>
 #include <linux/scatterlist.h>
+#include <linux/sizes.h>
 #include <linux/spinlock.h>
 #include <linux/swiotlb.h>
 #include <linux/workqueue.h>
@@ -703,7 +704,7 @@
 	return false;
 }
 
-static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
+static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
 {
 	struct mmc_host *mmc = host->mmc;
 	struct tmio_mmc_data *pdata = host->pdata;
@@ -711,7 +712,7 @@
 	unsigned int sdio_status;
 
 	if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
-		return;
+		return false;
 
 	status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
 	ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
@@ -724,6 +725,8 @@
 
 	if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
 		mmc_signal_sdio_irq(mmc);
+
+	return ireg;
 }
 
 irqreturn_t tmio_mmc_irq(int irq, void *devid)
@@ -742,9 +745,10 @@
 	if (__tmio_mmc_sdcard_irq(host, ireg, status))
 		return IRQ_HANDLED;
 
-	__tmio_mmc_sdio_irq(host);
+	if (__tmio_mmc_sdio_irq(host))
+		return IRQ_HANDLED;
 
-	return IRQ_HANDLED;
+	return IRQ_NONE;
 }
 EXPORT_SYMBOL_GPL(tmio_mmc_irq);
 
@@ -774,7 +778,10 @@
 
 	/* Set transfer length / blocksize */
 	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
-	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+	if (host->mmc->max_blk_count >= SZ_64K)
+		sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
+	else
+		sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
 
 	tmio_mmc_start_dma(host, data);
 
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 99c460f..0bbb23b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -470,6 +470,10 @@
 		/* let's register it anyway to preserve ordering */
 		slave->offset = 0;
 		slave->mtd.size = 0;
+
+		/* Initialize ->erasesize to make add_mtd_device() happy. */
+		slave->mtd.erasesize = parent->erasesize;
+
 		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
 			part->name);
 		goto out_register;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index 88ea220..322a008 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -155,9 +155,10 @@
 
 	/*
 	 * Reset BCH here, too. We got failures otherwise :(
-	 * See later BCH reset for explanation of MX23 handling
+	 * See later BCH reset for explanation of MX23 and MX28 handling
 	 */
-	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+	ret = gpmi_reset_block(r->bch_regs,
+			       GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
 	if (ret)
 		goto err_out;
 
@@ -263,12 +264,10 @@
 	/*
 	* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
 	* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
-	* On the other hand, the MX28 needs the reset, because one case has been
-	* seen where the BCH produced ECC errors constantly after 10000
-	* consecutive reboots. The latter case has not been seen on the MX23
-	* yet, still we don't know if it could happen there as well.
+	* and MX28.
 	*/
-	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+	ret = gpmi_reset_block(r->bch_regs,
+			       GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
 	if (ret)
 		goto err_out;
 
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index c7573cc..9c90695 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -444,9 +444,14 @@
 	writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
 }
 
-static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
 {
+	u32 reg;
+
+	reg = readl_relaxed(nfc->regs + NDSR);
 	writel_relaxed(int_mask, nfc->regs + NDSR);
+
+	return reg & int_mask;
 }
 
 static void marvell_nfc_force_byte_access(struct nand_chip *chip,
@@ -613,6 +618,7 @@
 static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
 {
 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+	u32 pending;
 	int ret;
 
 	/* Timeout is expressed in ms */
@@ -625,8 +631,13 @@
 	ret = wait_for_completion_timeout(&nfc->complete,
 					  msecs_to_jiffies(timeout_ms));
 	marvell_nfc_disable_int(nfc, NDCR_RDYM);
-	marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
-	if (!ret) {
+	pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+
+	/*
+	 * In case the interrupt was not served in the required time frame,
+	 * check if the ISR was not served or if something went actually wrong.
+	 */
+	if (ret && !pending) {
 		dev_err(nfc->dev, "Timeout waiting for RB signal\n");
 		return -ETIMEDOUT;
 	}
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 4546ac0..b1683d7 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1938,7 +1938,7 @@
 	case NAND_OMAP_PREFETCH_DMA:
 		dma_cap_zero(mask);
 		dma_cap_set(DMA_SLAVE, mask);
-		info->dma = dma_request_chan(dev, "rxtx");
+		info->dma = dma_request_chan(dev->parent, "rxtx");
 
 		if (IS_ERR(info->dma)) {
 			dev_err(dev, "DMA engine request failed\n");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 8815f3e..880e75f 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2839,6 +2839,16 @@
 	if (ret)
 		return ret;
 
+	if (nandc->props->is_bam) {
+		free_bam_transaction(nandc);
+		nandc->bam_txn = alloc_bam_transaction(nandc);
+		if (!nandc->bam_txn) {
+			dev_err(nandc->dev,
+				"failed to allocate bam transaction\n");
+			return -ENOMEM;
+		}
+	}
+
 	ret = mtd_device_register(mtd, NULL, 0);
 	if (ret)
 		nand_cleanup(chip);
@@ -2853,16 +2863,6 @@
 	struct qcom_nand_host *host;
 	int ret;
 
-	if (nandc->props->is_bam) {
-		free_bam_transaction(nandc);
-		nandc->bam_txn = alloc_bam_transaction(nandc);
-		if (!nandc->bam_txn) {
-			dev_err(nandc->dev,
-				"failed to allocate bam transaction\n");
-			return -ENOMEM;
-		}
-	}
-
 	for_each_available_child_of_node(dn, child) {
 		host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
 		if (!host) {
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 30f8364..8c7bf91 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -304,24 +304,30 @@
 	struct nand_device *nand = spinand_to_nand(spinand);
 	struct mtd_info *mtd = nanddev_to_mtd(nand);
 	struct nand_page_io_req adjreq = *req;
-	unsigned int nbytes = 0;
-	void *buf = NULL;
+	void *buf = spinand->databuf;
+	unsigned int nbytes;
 	u16 column = 0;
 	int ret;
 
-	memset(spinand->databuf, 0xff,
-	       nanddev_page_size(nand) +
-	       nanddev_per_page_oobsize(nand));
+	/*
+	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
+	 * the cache content to 0xFF (depends on vendor implementation), so we
+	 * must fill the page cache entirely even if we only want to program
+	 * the data portion of the page, otherwise we might corrupt the BBM or
+	 * user data previously programmed in OOB area.
+	 */
+	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+	memset(spinand->databuf, 0xff, nbytes);
+	adjreq.dataoffs = 0;
+	adjreq.datalen = nanddev_page_size(nand);
+	adjreq.databuf.out = spinand->databuf;
+	adjreq.ooblen = nanddev_per_page_oobsize(nand);
+	adjreq.ooboffs = 0;
+	adjreq.oobbuf.out = spinand->oobbuf;
 
-	if (req->datalen) {
+	if (req->datalen)
 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
 		       req->datalen);
-		adjreq.dataoffs = 0;
-		adjreq.datalen = nanddev_page_size(nand);
-		adjreq.databuf.out = spinand->databuf;
-		nbytes = adjreq.datalen;
-		buf = spinand->databuf;
-	}
 
 	if (req->ooblen) {
 		if (req->mode == MTD_OPS_AUTO_OOB)
@@ -332,14 +338,6 @@
 		else
 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
 			       req->ooblen);
-
-		adjreq.ooblen = nanddev_per_page_oobsize(nand);
-		adjreq.ooboffs = 0;
-		nbytes += nanddev_per_page_oobsize(nand);
-		if (!buf) {
-			buf = spinand->oobbuf;
-			column = nanddev_page_size(nand);
-		}
 	}
 
 	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
@@ -370,8 +368,8 @@
 
 		/*
 		 * We need to use the RANDOM LOAD CACHE operation if there's
-		 * more than one iteration, because the LOAD operation resets
-		 * the cache to 0xff.
+		 * more than one iteration, because the LOAD operation might
+		 * reset the cache to 0xff.
 		 */
 		if (nbytes) {
 			column = op.addr.val;
@@ -1016,11 +1014,11 @@
 	for (i = 0; i < nand->memorg.ntargets; i++) {
 		ret = spinand_select_target(spinand, i);
 		if (ret)
-			goto err_free_bufs;
+			goto err_manuf_cleanup;
 
 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
 		if (ret)
-			goto err_free_bufs;
+			goto err_manuf_cleanup;
 	}
 
 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 6cc9c92..37775fc 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -41,7 +41,7 @@
 
 config SPI_ATMEL_QUADSPI
 	tristate "Atmel Quad SPI Controller"
-	depends on ARCH_AT91 || (ARM && COMPILE_TEST)
+	depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
 	depends on OF && HAS_IOMEM
 	help
 	  This enables support for the Quad SPI controller in master mode.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3c59756..b2c42ca 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1171,29 +1171,22 @@
 		}
 	}
 
-	/* Link-local multicast packets should be passed to the
-	 * stack on the link they arrive as well as pass them to the
-	 * bond-master device. These packets are mostly usable when
-	 * stack receives it with the link on which they arrive
-	 * (e.g. LLDP) they also must be available on master. Some of
-	 * the use cases include (but are not limited to): LLDP agents
-	 * that must be able to operate both on enslaved interfaces as
-	 * well as on bonds themselves; linux bridges that must be able
-	 * to process/pass BPDUs from attached bonds when any kind of
-	 * STP version is enabled on the network.
+	/*
+	 * For packets determined by bond_should_deliver_exact_match() call to
+	 * be suppressed we want to make an exception for link-local packets.
+	 * This is necessary for e.g. LLDP daemons to be able to monitor
+	 * inactive slave links without being forced to bind to them
+	 * explicitly.
+	 *
+	 * At the same time, packets that are passed to the bonding master
+	 * (including link-local ones) can have their originating interface
+	 * determined via PACKET_ORIGDEV socket option.
 	 */
-	if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
-		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
-		if (nskb) {
-			nskb->dev = bond->dev;
-			nskb->queue_mapping = 0;
-			netif_rx(nskb);
-		}
-		return RX_HANDLER_PASS;
-	}
-	if (bond_should_deliver_exact_match(skb, slave, bond))
+	if (bond_should_deliver_exact_match(skb, slave, bond)) {
+		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+			return RX_HANDLER_PASS;
 		return RX_HANDLER_EXACT;
+	}
 
 	skb->dev = bond->dev;
 
@@ -1947,6 +1940,9 @@
 	if (!bond_has_slaves(bond)) {
 		bond_set_carrier(bond);
 		eth_hw_addr_random(bond_dev);
+		bond->nest_level = SINGLE_DEPTH_NESTING;
+	} else {
+		bond->nest_level = dev_get_nest_level(bond_dev) + 1;
 	}
 
 	unblock_netpoll_tx();
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3b3f88f..c05e4d5 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -480,8 +480,6 @@
 struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
 {
 	struct can_priv *priv = netdev_priv(dev);
-	struct sk_buff *skb = priv->echo_skb[idx];
-	struct canfd_frame *cf;
 
 	if (idx >= priv->echo_skb_max) {
 		netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
@@ -489,20 +487,21 @@
 		return NULL;
 	}
 
-	if (!skb) {
-		netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
-			   __func__, idx);
-		return NULL;
+	if (priv->echo_skb[idx]) {
+		/* Using "struct canfd_frame::len" for the frame
+		 * length is supported on both CAN and CANFD frames.
+		 */
+		struct sk_buff *skb = priv->echo_skb[idx];
+		struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+		u8 len = cf->len;
+
+		*len_ptr = len;
+		priv->echo_skb[idx] = NULL;
+
+		return skb;
 	}
 
-	/* Using "struct canfd_frame::len" for the frame
-	 * length is supported on both CAN and CANFD frames.
-	 */
-	cf = (struct canfd_frame *)skb->data;
-	*len_ptr = cf->len;
-	priv->echo_skb[idx] = NULL;
-
-	return skb;
+	return NULL;
 }
 
 /*
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 75ce113..ae219b8 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1004,7 +1004,7 @@
 		}
 	} else {
 		/* clear and invalidate unused mailboxes first */
-		for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
+		for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) {
 			priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
 				    &regs->mb[i].can_ctrl);
 		}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8da3d39..c078c79 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -261,6 +261,7 @@
 	unsigned int sub_irq;
 	unsigned int n;
 	u16 reg;
+	u16 ctl1;
 	int err;
 
 	mutex_lock(&chip->reg_lock);
@@ -270,13 +271,28 @@
 	if (err)
 		goto out;
 
-	for (n = 0; n < chip->g1_irq.nirqs; ++n) {
-		if (reg & (1 << n)) {
-			sub_irq = irq_find_mapping(chip->g1_irq.domain, n);
-			handle_nested_irq(sub_irq);
-			++nhandled;
+	do {
+		for (n = 0; n < chip->g1_irq.nirqs; ++n) {
+			if (reg & (1 << n)) {
+				sub_irq = irq_find_mapping(chip->g1_irq.domain,
+							   n);
+				handle_nested_irq(sub_irq);
+				++nhandled;
+			}
 		}
-	}
+
+		mutex_lock(&chip->reg_lock);
+		err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
+		if (err)
+			goto unlock;
+		err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
+unlock:
+		mutex_unlock(&chip->reg_lock);
+		if (err)
+			goto out;
+		ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
+	} while (reg & ctl1);
+
 out:
 	return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
 }
@@ -868,7 +884,7 @@
 	default:
 		return U64_MAX;
 	}
-	value = (((u64)high) << 16) | low;
+	value = (((u64)high) << 32) | low;
 	return value;
 }
 
@@ -2391,6 +2407,107 @@
 	return mv88e6xxx_g1_stats_clear(chip);
 }
 
+/* The mv88e6390 has some hidden registers used for debug and
+ * development. The errata also makes use of them.
+ */
+static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
+				  int reg, u16 val)
+{
+	u16 ctrl;
+	int err;
+
+	err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
+				   PORT_RESERVED_1A, val);
+	if (err)
+		return err;
+
+	ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
+	       PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
+	       reg;
+
+	return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
+				    PORT_RESERVED_1A, ctrl);
+}
+
+static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
+{
+	return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
+			      PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
+}
+
+
+static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
+				  int reg, u16 *val)
+{
+	u16 ctrl;
+	int err;
+
+	ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
+	       PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
+	       reg;
+
+	err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
+				   PORT_RESERVED_1A, ctrl);
+	if (err)
+		return err;
+
+	err = mv88e6390_hidden_wait(chip);
+	if (err)
+		return err;
+
+	return 	mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
+				    PORT_RESERVED_1A, val);
+}
+
+/* Check if the errata has already been applied. */
+static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
+{
+	int port;
+	int err;
+	u16 val;
+
+	for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+		err = mv88e6390_hidden_read(chip, port, 0, &val);
+		if (err) {
+			dev_err(chip->dev,
+				"Error reading hidden register: %d\n", err);
+			return false;
+		}
+		if (val != 0x01c0)
+			return false;
+	}
+
+	return true;
+}
+
+/* The 6390 copper ports have an errata which require poking magic
+ * values into undocumented hidden registers and then performing a
+ * software reset.
+ */
+static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
+{
+	int port;
+	int err;
+
+	if (mv88e6390_setup_errata_applied(chip))
+		return 0;
+
+	/* Set the ports into blocking mode */
+	for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+		err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
+		if (err)
+			return err;
+	}
+
+	for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+		err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
+		if (err)
+			return err;
+	}
+
+	return mv88e6xxx_software_reset(chip);
+}
+
 static int mv88e6xxx_setup(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_chip *chip = ds->priv;
@@ -2403,6 +2520,12 @@
 
 	mutex_lock(&chip->reg_lock);
 
+	if (chip->info->ops->setup_errata) {
+		err = chip->info->ops->setup_errata(chip);
+		if (err)
+			goto unlock;
+	}
+
 	/* Cache the cmode of each port. */
 	for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
 		if (chip->info->ops->port_get_cmode) {
@@ -2947,7 +3070,7 @@
 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
 	.port_link_state = mv88e6352_port_link_state,
 	.port_get_cmode = mv88e6185_port_get_cmode,
-	.stats_snapshot = mv88e6320_g1_stats_snapshot,
+	.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
 	.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
 	.stats_get_sset_count = mv88e6095_stats_get_sset_count,
 	.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3201,6 +3324,7 @@
 
 static const struct mv88e6xxx_ops mv88e6190_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3243,6 +3367,7 @@
 
 static const struct mv88e6xxx_ops mv88e6190x_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3285,6 +3410,7 @@
 
 static const struct mv88e6xxx_ops mv88e6191_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3374,6 +3500,7 @@
 
 static const struct mv88e6xxx_ops mv88e6290_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3675,6 +3802,7 @@
 
 static const struct mv88e6xxx_ops mv88e6390_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3722,6 +3850,7 @@
 
 static const struct mv88e6xxx_ops mv88e6390x_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -4059,7 +4188,7 @@
 		.name = "Marvell 88E6190",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4082,7 +4211,7 @@
 		.name = "Marvell 88E6190X",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4105,7 +4234,7 @@
 		.name = "Marvell 88E6191",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
 		.phy_base_addr = 0x0,
@@ -4152,7 +4281,7 @@
 		.name = "Marvell 88E6290",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4314,7 +4443,7 @@
 		.name = "Marvell 88E6390",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4337,7 +4466,7 @@
 		.name = "Marvell 88E6390X",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4432,6 +4561,14 @@
 	return 0;
 }
 
+static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
+{
+	int i;
+
+	for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+		chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
+}
+
 static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
 							int port)
 {
@@ -4468,6 +4605,8 @@
 	if (err)
 		goto free;
 
+	mv88e6xxx_ports_cmode_init(chip);
+
 	mutex_lock(&chip->reg_lock);
 	err = mv88e6xxx_switch_reset(chip);
 	mutex_unlock(&chip->reg_lock);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index f9ecb78..546651d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -300,6 +300,11 @@
 };
 
 struct mv88e6xxx_ops {
+	/* Switch Setup Errata, called early in the switch setup to
+	 * allow any errata actions to be performed
+	 */
+	int (*setup_errata)(struct mv88e6xxx_chip *chip);
+
 	int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
 	int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
 
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 5200e4b..ea24384 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -314,6 +314,7 @@
 {
 	struct mv88e6xxx_chip *chip = dev_id;
 	struct mv88e6xxx_atu_entry entry;
+	int spid;
 	int err;
 	u16 val;
 
@@ -336,6 +337,8 @@
 	if (err)
 		goto out;
 
+	spid = entry.state;
+
 	if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
 		dev_err_ratelimited(chip->dev,
 				    "ATU age out violation for %pM\n",
@@ -344,23 +347,23 @@
 
 	if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
 		dev_err_ratelimited(chip->dev,
-				    "ATU member violation for %pM portvec %x\n",
-				    entry.mac, entry.portvec);
-		chip->ports[entry.portvec].atu_member_violation++;
+				    "ATU member violation for %pM portvec %x spid %d\n",
+				    entry.mac, entry.portvec, spid);
+		chip->ports[spid].atu_member_violation++;
 	}
 
 	if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
 		dev_err_ratelimited(chip->dev,
-				    "ATU miss violation for %pM portvec %x\n",
-				    entry.mac, entry.portvec);
-		chip->ports[entry.portvec].atu_miss_violation++;
+				    "ATU miss violation for %pM portvec %x spid %d\n",
+				    entry.mac, entry.portvec, spid);
+		chip->ports[spid].atu_miss_violation++;
 	}
 
 	if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
 		dev_err_ratelimited(chip->dev,
-				    "ATU full violation for %pM portvec %x\n",
-				    entry.mac, entry.portvec);
-		chip->ports[entry.portvec].atu_full_violation++;
+				    "ATU full violation for %pM portvec %x spid %d\n",
+				    entry.mac, entry.portvec, spid);
+		chip->ports[spid].atu_full_violation++;
 	}
 	mutex_unlock(&chip->reg_lock);
 
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 9294584..7fffce7 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -190,7 +190,7 @@
 		/* normal duplex detection */
 		break;
 	default:
-		return -EINVAL;
+		return -EOPNOTSUPP;
 	}
 
 	err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
@@ -374,6 +374,10 @@
 		cmode = 0;
 	}
 
+	/* cmode doesn't change, nothing to do for us */
+	if (cmode == chip->ports[port].cmode)
+		return 0;
+
 	lane = mv88e6390x_serdes_get_lane(chip, port);
 	if (lane < 0)
 		return lane;
@@ -384,7 +388,7 @@
 			return err;
 	}
 
-	err = mv88e6390_serdes_power(chip, port, false);
+	err = mv88e6390x_serdes_power(chip, port, false);
 	if (err)
 		return err;
 
@@ -400,7 +404,7 @@
 		if (err)
 			return err;
 
-		err = mv88e6390_serdes_power(chip, port, true);
+		err = mv88e6390x_serdes_power(chip, port, true);
 		if (err)
 			return err;
 
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index f32f56a..95b59f5 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -52,6 +52,7 @@
 #define MV88E6185_PORT_STS_CMODE_1000BASE_X	0x0005
 #define MV88E6185_PORT_STS_CMODE_PHY		0x0006
 #define MV88E6185_PORT_STS_CMODE_DISABLED	0x0007
+#define MV88E6XXX_PORT_STS_CMODE_INVALID	0xff
 
 /* Offset 0x01: MAC (or PCS or Physical) Control Register */
 #define MV88E6XXX_PORT_MAC_CTL				0x01
@@ -251,6 +252,16 @@
 /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
 #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567	0x19
 
+/* Offset 0x1a: Magic undocumented errata register */
+#define PORT_RESERVED_1A			0x1a
+#define PORT_RESERVED_1A_BUSY			BIT(15)
+#define PORT_RESERVED_1A_WRITE			BIT(14)
+#define PORT_RESERVED_1A_READ			0
+#define PORT_RESERVED_1A_PORT_SHIFT		5
+#define PORT_RESERVED_1A_BLOCK			(0xf << 10)
+#define PORT_RESERVED_1A_CTRL_PORT		4
+#define PORT_RESERVED_1A_DATA_PORT		5
+
 int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
 			u16 *val);
 int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
index b4b839a..ad41ec6 100644
--- a/drivers/net/dsa/realtek-smi.c
+++ b/drivers/net/dsa/realtek-smi.c
@@ -347,16 +347,17 @@
 	struct device_node *mdio_np;
 	int ret;
 
-	mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
-					  "realtek,smi-mdio");
+	mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
 	if (!mdio_np) {
 		dev_err(smi->dev, "no MDIO bus node\n");
 		return -ENODEV;
 	}
 
 	smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
-	if (!smi->slave_mii_bus)
-		return -ENOMEM;
+	if (!smi->slave_mii_bus) {
+		ret = -ENOMEM;
+		goto err_put_node;
+	}
 	smi->slave_mii_bus->priv = smi;
 	smi->slave_mii_bus->name = "SMI slave MII";
 	smi->slave_mii_bus->read = realtek_smi_mdio_read;
@@ -371,10 +372,15 @@
 	if (ret) {
 		dev_err(smi->dev, "unable to register MDIO bus %s\n",
 			smi->slave_mii_bus->id);
-		of_node_put(mdio_np);
+		goto err_put_node;
 	}
 
 	return 0;
+
+err_put_node:
+	of_node_put(mdio_np);
+
+	return ret;
 }
 
 static int realtek_smi_probe(struct platform_device *pdev)
@@ -457,6 +463,8 @@
 	struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
 
 	dsa_unregister_switch(smi->ds);
+	if (smi->slave_mii_bus)
+		of_node_put(smi->slave_mii_bus->dev.of_node);
 	gpiod_set_value(smi->reset, 1);
 
 	return 0;
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 0fb986b..0ae723f 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -145,7 +145,8 @@
 			& 0xffff;
 
 	if (inuse) { /* Tx FIFO is not empty */
-		ready = priv->tx_prod - priv->tx_cons - inuse - 1;
+		ready = max_t(int,
+			      priv->tx_prod - priv->tx_cons - inuse - 1, 0);
 	} else {
 		/* Check for buffered last packet */
 		status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index baca8f7..c3c1195 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -714,8 +714,10 @@
 
 		phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
 				     priv->phy_iface);
-		if (IS_ERR(phydev))
+		if (IS_ERR(phydev)) {
 			netdev_err(dev, "Could not attach to PHY\n");
+			phydev = NULL;
+		}
 
 	} else {
 		int ret;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 4b73131..1b5f591 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2595,11 +2595,6 @@
 		goto err_device_destroy;
 	}
 
-	clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
-	/* Make sure we don't have a race with AENQ Links state handler */
-	if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
-		netif_carrier_on(adapter->netdev);
-
 	rc = ena_enable_msix_and_set_admin_interrupts(adapter,
 						      adapter->num_queues);
 	if (rc) {
@@ -2616,6 +2611,11 @@
 	}
 
 	set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
+	clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
+	if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+		netif_carrier_on(adapter->netdev);
+
 	mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
 	dev_err(&pdev->dev, "Device reset completed successfully\n");
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index d272dc6..b40d437 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -431,8 +431,6 @@
 #define MAC_MDIOSCAR_PA_WIDTH		5
 #define MAC_MDIOSCAR_RA_INDEX		0
 #define MAC_MDIOSCAR_RA_WIDTH		16
-#define MAC_MDIOSCAR_REG_INDEX		0
-#define MAC_MDIOSCAR_REG_WIDTH		21
 #define MAC_MDIOSCCDR_BUSY_INDEX	22
 #define MAC_MDIOSCCDR_BUSY_WIDTH	1
 #define MAC_MDIOSCCDR_CMD_INDEX		16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1e929a1..4666084 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1284,6 +1284,20 @@
 	}
 }
 
+static unsigned int xgbe_create_mdio_sca(int port, int reg)
+{
+	unsigned int mdio_sca, da;
+
+	da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+
+	mdio_sca = 0;
+	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
+	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
+
+	return mdio_sca;
+}
+
 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
 				   int reg, u16 val)
 {
@@ -1291,9 +1305,7 @@
 
 	reinit_completion(&pdata->mdio_complete);
 
-	mdio_sca = 0;
-	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
-	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+	mdio_sca = xgbe_create_mdio_sca(addr, reg);
 	XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
 
 	mdio_sccd = 0;
@@ -1317,9 +1329,7 @@
 
 	reinit_completion(&pdata->mdio_complete);
 
-	mdio_sca = 0;
-	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
-	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+	mdio_sca = xgbe_create_mdio_sca(addr, reg);
 	XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
 
 	mdio_sccd = 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3b889ef..50dd6bf 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -29,9 +29,6 @@
 #define RES_RING_CSR	1
 #define RES_RING_CMD	2
 
-static const struct of_device_id xgene_enet_of_match[];
-static const struct acpi_device_id xgene_enet_acpi_match[];
-
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
 	struct xgene_enet_raw_desc16 *raw_desc;
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index 7d623e9..c81d231 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -21,4 +21,9 @@
 	---help---
 	  This enables the support for the aQuantia AQtion(tm) Ethernet card.
 
+config AQFWD
+	tristate "aQuantia Forwarding driver"
+	depends on PCI && (X86_64 || ARM64)
+	---help---
+	  This enables the support for forwarding driver for the aQuantia AQtion(tm) Ethernet card.
 endif # NET_VENDOR_AQUANTIA
diff --git a/drivers/net/ethernet/aquantia/Makefile b/drivers/net/ethernet/aquantia/Makefile
index 4f4897b..67b8226 100644
--- a/drivers/net/ethernet/aquantia/Makefile
+++ b/drivers/net/ethernet/aquantia/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_AQTION) += atlantic/
+obj-$(CONFIG_AQFWD)  += atlantic-fwd/
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile b/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile
new file mode 100644
index 0000000..ca94832
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile
@@ -0,0 +1,42 @@
+################################################################################
+#
+# aQuantia Ethernet Controller AQtion Linux Driver
+# Copyright(c) 2014-2017 aQuantia Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information: <rdc-drv@aquantia.com>
+# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
+#
+################################################################################
+
+#
+# Makefile for the AQtion(tm) Ethernet driver
+#
+
+obj-$(CONFIG_AQFWD) += atlantic-fwd.o
+
+atlantic-fwd-objs := atl_fw.o \
+		     atl_hw.o \
+		     atl_main.o \
+		     atl_ring.o \
+		     atl_ethtool.o \
+		     atl_trace.o \
+		     atl_fwd.o \
+		     atl_compat.o \
+		     atl_hwmon.o
+
+CFLAGS_atl_trace.o := -I$(src)
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h
new file mode 100644
index 0000000..f60358c
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h
@@ -0,0 +1,371 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_COMMON_H_
+#define _ATL_COMMON_H_
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
+
+#define ATL_VERSION "1.0.15"
+
+struct atl_nic;
+
+#include "atl_compat.h"
+#include "atl_hw.h"
+
+#define ATL_MAX_QUEUES 8
+
+#include "atl_fwd.h"
+
+struct atl_rx_ring_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t linear_dropped;
+	uint64_t alloc_skb_failed;
+	uint64_t reused_head_page;
+	uint64_t reused_data_page;
+	uint64_t alloc_head_page;
+	uint64_t alloc_data_page;
+	uint64_t alloc_head_page_failed;
+	uint64_t alloc_data_page_failed;
+	uint64_t non_eop_descs;
+	uint64_t mac_err;
+	uint64_t csum_err;
+	uint64_t multicast;
+};
+
+struct atl_tx_ring_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t tx_busy;
+	uint64_t tx_restart;
+	uint64_t dma_map_failed;
+};
+
+struct atl_ring_stats {
+	union {
+		struct atl_rx_ring_stats rx;
+		struct atl_tx_ring_stats tx;
+	};
+};
+
+struct atl_ether_stats {
+	uint64_t rx_pause;
+	uint64_t tx_pause;
+	uint64_t rx_ether_drops;
+	uint64_t rx_ether_octets;
+	uint64_t rx_ether_pkts;
+	uint64_t rx_ether_broacasts;
+	uint64_t rx_ether_multicasts;
+	uint64_t rx_ether_crc_align_errs;
+	uint64_t rx_filter_host;
+	uint64_t rx_filter_lost;
+};
+
+struct atl_global_stats {
+	struct atl_rx_ring_stats rx;
+	struct atl_tx_ring_stats tx;
+
+	/* MSM counters can't be reset without full HW reset, so
+	 * store them in relative form:
+	 * eth[i] == HW_counter - eth_base[i] */
+	struct atl_ether_stats eth;
+	struct atl_ether_stats eth_base;
+};
+
+enum {
+	ATL_RXF_VLAN_BASE = 0,
+	ATL_RXF_VLAN_MAX = ATL_VLAN_FLT_NUM,
+	ATL_RXF_ETYPE_BASE = ATL_RXF_VLAN_BASE + ATL_RXF_VLAN_MAX,
+	ATL_RXF_ETYPE_MAX = ATL_ETYPE_FLT_NUM,
+	ATL_RXF_NTUPLE_BASE = ATL_RXF_ETYPE_BASE + ATL_RXF_ETYPE_MAX,
+	ATL_RXF_NTUPLE_MAX = ATL_NTUPLE_FLT_NUM,
+};
+
+enum atl_rxf_common_cmd {
+	ATL_RXF_EN = BIT(31),
+	ATL_RXF_RXQ_MSK = BIT(5) - 1,
+	ATL_RXF_ACT_SHIFT = 16,
+	ATL_RXF_ACT_MASK = BIT(3) - 1,
+	ATL_RXF_ACT_TOHOST = BIT(0) << ATL_RXF_ACT_SHIFT,
+};
+
+enum atl_ntuple_cmd {
+	ATL_NTC_EN = ATL_RXF_EN, /* Filter enabled */
+	ATL_NTC_V6 = BIT(30),	/* IPv6 mode -- only valid in filters
+				 * 0 and 4 */
+	ATL_NTC_SA = BIT(29),	/* Match source address */
+	ATL_NTC_DA = BIT(28),	/* Match destination address */
+	ATL_NTC_SP = BIT(27),	/* Match source port */
+	ATL_NTC_DP = BIT(26),	/* Match destination port */
+	ATL_NTC_PROTO = BIT(25), /* Match L4 proto */
+	ATL_NTC_ARP = BIT(24),
+	ATL_NTC_RXQ = BIT(23),	/* Assign Rx queue */
+	ATL_NTC_ACT_SHIFT = ATL_RXF_ACT_SHIFT,
+	ATL_NTC_RXQ_SHIFT = 8,
+	ATL_NTC_RXQ_MASK = ATL_RXF_RXQ_MSK << ATL_NTC_RXQ_SHIFT,
+	ATL_NTC_L4_MASK = BIT(3) - 1,
+	ATL_NTC_L4_TCP = 0,
+	ATL_NTC_L4_UDP = 1,
+	ATL_NTC_L4_SCTP = 2,
+	ATL_NTC_L4_ICMP = 3,
+};
+
+struct atl_rxf_ntuple {
+	union {
+		struct {
+			__be32 dst_ip4[ATL_RXF_NTUPLE_MAX];
+			__be32 src_ip4[ATL_RXF_NTUPLE_MAX];
+		};
+		struct {
+			__be32 dst_ip6[ATL_RXF_NTUPLE_MAX / 4][4];
+			__be32 src_ip6[ATL_RXF_NTUPLE_MAX / 4][4];
+		};
+	};
+	__be16 dst_port[ATL_RXF_NTUPLE_MAX];
+	__be16 src_port[ATL_RXF_NTUPLE_MAX];
+	uint32_t cmd[ATL_RXF_NTUPLE_MAX];
+	int count;
+};
+
+enum atl_vlan_cmd {
+	ATL_VLAN_EN = ATL_RXF_EN,
+	ATL_VLAN_RXQ = BIT(28),
+	ATL_VLAN_RXQ_SHIFT = 20,
+	ATL_VLAN_RXQ_MASK = ATL_RXF_RXQ_MSK << ATL_VLAN_RXQ_SHIFT,
+	ATL_VLAN_ACT_SHIFT = ATL_RXF_ACT_SHIFT,
+	ATL_VLAN_VID_MASK = BIT(12) - 1,
+};
+
+#define ATL_VID_MAP_LEN BITS_TO_LONGS(BIT(12))
+
+struct atl_rxf_vlan {
+	uint32_t cmd[ATL_RXF_VLAN_MAX];
+	int count;
+	unsigned long map[ATL_VID_MAP_LEN];
+	int vlans_active;
+	int promisc_count;
+};
+
+enum atl_etype_cmd {
+	ATL_ETYPE_EN = ATL_RXF_EN,
+	ATL_ETYPE_RXQ = BIT(29),
+	ATL_ETYPE_RXQ_SHIFT = 20,
+	ATL_ETYPE_RXQ_MASK = ATL_RXF_RXQ_MSK << ATL_ETYPE_RXQ_SHIFT,
+	ATL_ETYPE_ACT_SHIFT = ATL_RXF_ACT_SHIFT,
+	ATL_ETYPE_VAL_MASK = BIT(16) - 1,
+};
+
+struct atl_rxf_etype {
+	uint32_t cmd[ATL_RXF_ETYPE_MAX];
+	int count;
+};
+
+struct atl_queue_vec;
+
+#define ATL_NUM_FWD_RINGS ATL_MAX_QUEUES
+#define ATL_FWD_RING_BASE ATL_MAX_QUEUES /* Use TC 1 for offload
+					  * engine rings */
+#define ATL_NUM_MSI_VECS 32
+#define ATL_NUM_NON_RING_IRQS 1
+
+#define ATL_RXF_RING_ANY 32
+
+#define ATL_FWD_MSI_BASE (ATL_MAX_QUEUES + ATL_NUM_NON_RING_IRQS)
+
+enum atl_fwd_dir {
+	ATL_FWDIR_RX = 0,
+	ATL_FWDIR_TX = 1,
+	ATL_FWDIR_NUM,
+};
+
+struct atl_fwd {
+	unsigned long ring_map[ATL_FWDIR_NUM];
+	struct atl_fwd_ring *rings[ATL_FWDIR_NUM][ATL_NUM_FWD_RINGS];
+	unsigned long msi_map;
+};
+
+struct atl_nic {
+	struct net_device *ndev;
+
+	struct atl_queue_vec *qvecs;
+	int nvecs;
+	struct atl_hw hw;
+	unsigned flags;
+	unsigned long state;
+	uint32_t priv_flags;
+	struct timer_list link_timer;
+	int max_mtu;
+	int requested_nvecs;
+	int requested_rx_size;
+	int requested_tx_size;
+	int rx_intr_delay;
+	int tx_intr_delay;
+	struct atl_global_stats stats;
+	spinlock_t stats_lock;
+	struct work_struct work;
+
+	struct atl_fwd fwd;
+
+	struct atl_rxf_ntuple rxf_ntuple;
+	struct atl_rxf_vlan rxf_vlan;
+	struct atl_rxf_etype rxf_etype;
+};
+
+/* Flags only modified with RTNL lock held */
+enum atl_nic_flags {
+	ATL_FL_MULTIPLE_VECTORS = BIT(0),
+	ATL_FL_WOL = BIT(1),
+};
+
+enum atl_nic_state {
+	ATL_ST_UP,
+	ATL_ST_CONFIGURED,
+	ATL_ST_ENABLED,
+	ATL_ST_WORK_SCHED,
+};
+
+#define ATL_PF(_name) ATL_PF_ ## _name
+#define ATL_PF_BIT(_name) ATL_PF_ ## _name ## _BIT
+#define ATL_DEF_PF_BIT(_name) ATL_PF_BIT(_name) = BIT(ATL_PF(_name))
+
+enum atl_priv_flags {
+	ATL_PF_LPB_SYS_PB,
+	ATL_PF_LPB_SYS_DMA,
+	/* ATL_PF_LPB_NET_DMA, */
+	ATL_PF_LPI_RX_MAC,
+	ATL_PF_LPI_TX_MAC,
+	ATL_PF_LPI_RX_PHY,
+	ATL_PF_LPI_TX_PHY,
+	ATL_PF_STATS_RESET,
+	ATL_PF_STRIP_PAD,
+};
+
+enum atl_priv_flag_bits {
+	ATL_DEF_PF_BIT(LPB_SYS_PB),
+	ATL_DEF_PF_BIT(LPB_SYS_DMA),
+	/* ATL_DEF_PF_BIT(LPB_NET_DMA), */
+
+	ATL_PF_LPB_MASK = ATL_PF_BIT(LPB_SYS_DMA) | ATL_PF_BIT(LPB_SYS_PB)
+		/* | ATL_PF_BIT(LPB_NET_DMA) */,
+
+	ATL_DEF_PF_BIT(LPI_RX_MAC),
+	ATL_DEF_PF_BIT(LPI_TX_MAC),
+	ATL_DEF_PF_BIT(LPI_RX_PHY),
+	ATL_DEF_PF_BIT(LPI_TX_PHY),
+	ATL_PF_LPI_MASK = ATL_PF_BIT(LPI_RX_MAC) | ATL_PF_BIT(LPI_TX_MAC) |
+		ATL_PF_BIT(LPI_RX_PHY) | ATL_PF_BIT(LPI_TX_PHY),
+
+	ATL_DEF_PF_BIT(STATS_RESET),
+
+	ATL_DEF_PF_BIT(STRIP_PAD),
+
+	ATL_PF_RW_MASK = ATL_PF_LPB_MASK | ATL_PF_BIT(STATS_RESET) |
+		ATL_PF_BIT(STRIP_PAD),
+	ATL_PF_RO_MASK = ATL_PF_LPI_MASK,
+};
+
+#define ATL_MAX_MTU (16352 - ETH_FCS_LEN - ETH_HLEN)
+
+#define ATL_MAX_RING_SIZE (8192 - 8)
+#define ATL_RING_SIZE 4096
+
+extern const char atl_driver_name[];
+
+extern const struct ethtool_ops atl_ethtool_ops;
+
+extern int atl_max_queues;
+extern unsigned atl_rx_linear;
+extern unsigned atl_min_intr_delay;
+
+/* Logging conviniency macros.
+ *
+ * atl_dev_xxx are for low-level contexts and implicitly reference
+ * struct atl_hw *hw;
+ *
+ * atl_nic_xxx are for high-level contexts and implicitly reference
+ * struct atl_nic *nic; */
+#define atl_dev_dbg(fmt, args...)			\
+	dev_dbg(&hw->pdev->dev, fmt, ## args)
+#define atl_dev_info(fmt, args...)			\
+	dev_info(&hw->pdev->dev, fmt, ## args)
+#define atl_dev_warn(fmt, args...)			\
+	dev_warn(&hw->pdev->dev, fmt, ## args)
+#define atl_dev_err(fmt, args...)			\
+	dev_err(&hw->pdev->dev, fmt, ## args)
+
+#define atl_nic_dbg(fmt, args...)		\
+	dev_dbg(&nic->hw.pdev->dev, fmt, ## args)
+#define atl_nic_info(fmt, args...)		\
+	dev_info(&nic->hw.pdev->dev, fmt, ## args)
+#define atl_nic_warn(fmt, args...)		\
+	dev_warn(&nic->hw.pdev->dev, fmt, ## args)
+#define atl_nic_err(fmt, args...)		\
+	dev_err(&nic->hw.pdev->dev, fmt, ## args)
+
+#define atl_module_param(_name, _type, _mode)			\
+	module_param_named(_name, atl_ ## _name, _type, _mode)
+
+netdev_tx_t atl_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int atl_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid);
+int atl_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid);
+void atl_set_rx_mode(struct net_device *ndev);
+int atl_set_features(struct net_device *ndev, netdev_features_t features);
+void atl_get_stats64(struct net_device *ndev,
+	struct rtnl_link_stats64 *stats);
+int atl_setup_datapath(struct atl_nic *nic);
+void atl_clear_datapath(struct atl_nic *nic);
+int atl_start_rings(struct atl_nic *nic);
+void atl_stop_rings(struct atl_nic *nic);
+int atl_alloc_rings(struct atl_nic *nic);
+void atl_free_rings(struct atl_nic *nic);
+irqreturn_t atl_ring_irq(int irq, void *priv);
+void atl_start_hw_global(struct atl_nic *nic);
+int atl_intr_init(struct atl_nic *nic);
+void atl_intr_release(struct atl_nic *nic);
+int atl_hw_reset(struct atl_hw *hw);
+int atl_fw_init(struct atl_hw *hw);
+int atl_reconfigure(struct atl_nic *nic);
+void atl_reset_stats(struct atl_nic *nic);
+void atl_update_global_stats(struct atl_nic *nic);
+void atl_set_loopback(struct atl_nic *nic, int idx, bool on);
+void atl_set_intr_mod(struct atl_nic *nic);
+void atl_update_ntuple_flt(struct atl_nic *nic, int idx);
+int atl_hwsem_get(struct atl_hw *hw, int idx);
+void atl_hwsem_put(struct atl_hw *hw, int idx);
+int __atl_msm_read(struct atl_hw *hw, uint32_t addr, uint32_t *val);
+int atl_msm_read(struct atl_hw *hw, uint32_t addr, uint32_t *val);
+int __atl_msm_write(struct atl_hw *hw, uint32_t addr, uint32_t val);
+int atl_msm_write(struct atl_hw *hw, uint32_t addr, uint32_t val);
+int atl_update_eth_stats(struct atl_nic *nic);
+void atl_adjust_eth_stats(struct atl_ether_stats *stats,
+	struct atl_ether_stats *base, bool add);
+void atl_fwd_release_rings(struct atl_nic *nic);
+int atl_get_lpi_timer(struct atl_nic *nic, uint32_t *lpi_delay);
+int atl_mdio_hwsem_get(struct atl_hw *hw);
+void atl_mdio_hwsem_put(struct atl_hw *hw);
+int __atl_mdio_read(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t *val);
+int atl_mdio_read(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t *val);
+int __atl_mdio_write(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t val);
+int atl_mdio_write(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t val);
+void atl_refresh_rxfs(struct atl_nic *nic);
+void atl_schedule_work(struct atl_nic *nic);
+int atl_hwmon_init(struct atl_nic *nic);
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.c
new file mode 100644
index 0000000..62a56aa
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.c
@@ -0,0 +1,157 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * Portions Copyright (C) various contributors (see specific commit references)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include "atl_common.h"
+#include "atl_ring.h"
+#include <linux/msi.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+
+#ifdef ATL_COMPAT_PCI_IRQ_VECTOR
+/* From commit aff171641d181ea573380efc3f559c9de4741fc5 */
+int atl_compat_pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+	if (dev->msix_enabled) {
+		struct msi_desc *entry;
+		int i = 0;
+
+		for_each_pci_msi_entry(entry, dev) {
+			if (i == nr)
+				return entry->irq;
+			i++;
+		}
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+
+	if (dev->msi_enabled) {
+		struct msi_desc *entry = first_pci_msi_entry(dev);
+
+		if (WARN_ON_ONCE(nr >= entry->nvec_used))
+			return -EINVAL;
+	} else {
+		if (WARN_ON_ONCE(nr > 0))
+			return -EINVAL;
+	}
+
+	return dev->irq + nr;
+}
+
+#endif
+
+#ifdef ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+
+void atl_compat_set_affinity(int vector, struct atl_queue_vec *qvec)
+{
+	cpumask_t *cpumask = qvec ? &qvec->affinity_hint : NULL;
+
+	irq_set_affinity_hint(vector, cpumask);
+}
+
+void atl_compat_calc_affinities(struct atl_nic *nic)
+{
+	struct pci_dev *pdev = nic->hw.pdev;
+	int i;
+	unsigned int cpu;
+
+	get_online_cpus();
+	cpu = cpumask_first(cpu_online_mask);
+
+	for (i = 0; i < nic->nvecs; i++) {
+		cpumask_t *cpumask = &nic->qvecs[i].affinity_hint;
+		int vector;
+
+		/* If some cpus went offline since allocating
+		 * vectors, leave the remaining vectors' affininty
+		 * unset.
+		 */
+		if (cpu >= nr_cpumask_bits)
+			break;
+
+		cpumask_clear(cpumask);
+		cpumask_set_cpu(cpu, cpumask);
+		cpu = cpumask_next(cpu, cpu_online_mask);
+		vector = pci_irq_vector(pdev, i + ATL_NUM_NON_RING_IRQS);
+	}
+	put_online_cpus();
+}
+
+/* from commit 6f9a22bc5775d231ab8fbe2c2f3c88e45e3e7c28 */
+static int irq_calc_affinity_vectors(int minvec, int maxvec,
+	const struct irq_affinity *affd)
+{
+	int resv = affd->pre_vectors + affd->post_vectors;
+	int vecs = maxvec - resv;
+	int cpus;
+
+	if (resv > minvec)
+		return 0;
+
+	/* Stabilize the cpumasks */
+	get_online_cpus();
+	cpus = cpumask_weight(cpu_online_mask);
+	put_online_cpus();
+
+	return min(cpus, vecs) + resv;
+}
+
+/* based on commit 402723ad5c625ee052432698ae5e56b02d38d4ec */
+int atl_compat_pci_alloc_irq_vectors_affinity(struct pci_dev *dev,
+	unsigned int min_vecs, unsigned int max_vecs, unsigned int flags,
+	const struct irq_affinity *affd)
+{
+	static const struct irq_affinity msi_default_affd;
+	int vecs = -ENOSPC;
+
+	if (flags & PCI_IRQ_AFFINITY) {
+		if (!affd)
+			affd = &msi_default_affd;
+	} else {
+		if (WARN_ON(affd))
+			affd = NULL;
+	}
+
+	if (affd)
+		max_vecs = irq_calc_affinity_vectors(min_vecs, max_vecs, affd);
+
+	if (flags & PCI_IRQ_MSIX) {
+		struct msix_entry *entries;
+		int i;
+
+		entries = kcalloc(max_vecs, sizeof(*entries), GFP_KERNEL);
+		if (!entries)
+			return -ENOMEM;
+
+		for (i = 0; i < max_vecs; i++)
+			entries[i].entry = i;
+
+		vecs = pci_enable_msix_range(dev, entries, min_vecs, max_vecs);
+		kfree(entries);
+		if (vecs > 0)
+			return vecs;
+	}
+
+	if (flags & PCI_IRQ_MSI) {
+		vecs = pci_enable_msi_range(dev, min_vecs, max_vecs);
+		if (vecs > 0)
+			return vecs;
+	}
+
+	/* use legacy irq if allowed */
+	if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) {
+		pci_intx(dev, 1);
+		return 1;
+	}
+
+	return vecs;
+}
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h
new file mode 100644
index 0000000..0ec20a3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h
@@ -0,0 +1,177 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * Portions Copyright (C) various contributors (see specific commit references)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_COMPAT_H_
+#define _ATL_COMPAT_H_
+
+#include <linux/version.h>
+
+#include <linux/pci.h>
+#include <linux/msi.h>
+
+struct atl_queue_vec;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
+
+/* introduced in commit 686fef928bba6be13cabe639f154af7d72b63120 */
+static inline void timer_setup(struct timer_list *timer,
+	void (*callback)(struct timer_list *), unsigned int flags)
+{
+	setup_timer(timer, (void (*)(unsigned long))callback,
+			(unsigned long)timer);
+}
+
+#endif	/* 4.14.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+
+/* ->ndo_get_stats64 return type was changed to void in commit
+ * bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221. It's safe to just cast
+ * the pointer to avoid the warning because the only place
+ * ->ndo_get_stats64 was invoked before the change ignored the return
+ * value. */
+#define ATL_COMPAT_CAST_NDO_GET_STATS64
+
+#endif	/* 4.11.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)
+
+/* introduced in commit 94842b4fc4d6b1691cfc86c6f5251f299d27f4ba */
+#define ETHTOOL_LINK_MODE_2500baseT_Full_BIT 47
+#define ETHTOOL_LINK_MODE_5000baseT_Full_BIT 48
+
+/* from commit 20e407e195b29a4f5a18d713a61f54a75f992bd5 */
+struct irq_affinity {
+	int	pre_vectors;
+	int	post_vectors;
+};
+
+#define ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+struct atl_nic;
+int atl_compat_pci_alloc_irq_vectors_affinity(struct pci_dev *dev,
+	unsigned int min_vecs, unsigned int max_vecs, unsigned int flags,
+	const struct irq_affinity *affd);
+static inline int pci_alloc_irq_vectors_affinity(struct pci_dev *dev,
+	unsigned int min_vecs, unsigned int max_vecs, unsigned int flags,
+	const struct irq_affinity *affd)
+{
+	return atl_compat_pci_alloc_irq_vectors_affinity(dev, min_vecs,
+		max_vecs, flags, affd);
+}
+
+#else  /* 4.10.0 */
+
+#define ATL_HAVE_MINMAX_MTU
+
+#endif	/* 4.10.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+
+/* from commit be9d2e8927cef02076bb7b5b2637cd9f4be2e8df */
+static inline int
+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
+{
+	return pci_request_selected_regions(pdev,
+			    pci_select_bars(pdev, IORESOURCE_MEM), name);
+}
+
+#define ATL_COMPAT_PCI_IRQ_VECTOR
+int atl_compat_pci_irq_vector(struct pci_dev *dev, unsigned int nr);
+static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+	return atl_compat_pci_irq_vector(dev, nr);
+}
+
+static inline void pci_free_irq_vectors(struct pci_dev *dev)
+{
+	pci_disable_msix(dev);
+	pci_disable_msi(dev);
+}
+
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+		      unsigned int max_vecs, unsigned int flags)
+{
+	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
+					      NULL);
+}
+
+/* from commit 4fe0d154880bb6eb833cbe84fa6f385f400f0b9c */
+#define PCI_IRQ_LEGACY		(1 << 0) /* allow legacy interrupts */
+#define PCI_IRQ_MSI		(1 << 1) /* allow MSI interrupts */
+#define PCI_IRQ_MSIX		(1 << 2) /* allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY	(1 << 3) /* auto-assign affinity */
+
+#endif /* 4.8.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+
+/* from commit 1dff8083a024650c75a9c961c38082473ceae8cf */
+#define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
+#endif	/* 4.7.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)
+
+/* from commit fe896d1878949ea92ba547587bc3075cc688fb8f */
+static inline void page_ref_inc(struct page *page)
+{
+	atomic_inc(&page->_count);
+}
+
+/* introduced in commit 795bb1c00dd338aa0d12f9a7f1f4776fb3160416 */
+#define napi_consume_skb(__skb, __budget) dev_consume_skb_any(__skb)
+
+/* from commit 3f1ac7a700d039c61d8d8b99f28d605d489a60cf */
+#define ETHTOOL_LINK_MODE_100baseT_Full_BIT 3
+#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
+#define ETHTOOL_LINK_MODE_10000baseT_Full_BIT 12
+
+/* IPv6 NFC API introduced in commit
+ * 72bb68721f80a1441e871b6afc9ab0b3793d5031 */
+
+/* Define the IPv6 constants for kernels not supporting IPv6 in the
+ * NFC API to reduce the number of #ifdefs in the code. The constants
+ * themselves may already be defined for RSS hash management API, so
+ * #undef them first */
+#undef TCP_V6_FLOW
+#define TCP_V6_FLOW 0x05
+
+#undef UDP_V6_FLOW
+#define UDP_V6_FLOW 0x06
+
+#undef SCTP_V6_FLOW
+#define SCTP_V6_FLOW 0x07
+
+#undef IPV6_USER_FLOW
+#define IPV6_USER_FLOW 0x0e
+#define IPV4_USER_FLOW IP_USER_FLOW
+
+#else
+
+/* introduced in commit 3f1ac7a700d039c61d8d8b99f28d605d489a60cf */
+#define ATL_HAVE_ETHTOOL_KSETTINGS
+
+/* introduced in commit 72bb68721f80a1441e871b6afc9ab0b3793d5031 */
+#define ATL_HAVE_IPV6_NTUPLE
+
+#endif	/* 4.6.0 */
+
+#ifdef ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+void atl_compat_set_affinity(int vector, struct atl_queue_vec *qvec);
+void atl_compat_calc_affinities(struct atl_nic *nic);
+#else  /* ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY */
+static inline void atl_compat_set_affinity(int vector, struct atl_queue_vec *qvec)
+{}
+static inline void atl_compat_calc_affinities(struct atl_nic *nic)
+{}
+#endif	/* ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY */
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h
new file mode 100644
index 0000000..63923ef
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h
@@ -0,0 +1,143 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_DESC_H_
+#define _ATL_DESC_H_
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+struct atl_tx_ctx {
+	unsigned long long :40; //0
+	unsigned tun_len:8;     //40
+	unsigned out_len:16;    //48
+	unsigned type:3;        //64
+	unsigned idx:1;         //67
+	unsigned vlan_tag:16;   //68
+	unsigned cmd:4;         //84
+	unsigned l2_len:7;      //88
+	unsigned l3_len:9;      //95
+	unsigned l4_len:8;      //104
+	unsigned mss_len:16;    //112
+} __attribute__((packed));
+
+struct atl_tx_desc {
+	unsigned long long daddr:64; //0
+	unsigned type:3;        //64
+	unsigned :1;            //67
+	unsigned len:16;        //68
+	unsigned dd:1;          //84
+	unsigned eop:1;         //85
+	unsigned cmd:8;         //86
+	unsigned :14;           //94
+	unsigned ct_idx:1;      //108
+	unsigned ct_en:1;       //109
+	unsigned pay_len:18;    //110
+} __attribute__((packed));
+
+#define ATL_DATA_PER_TXD 16384 // despite ->len being 16 bits
+
+enum atl_tx_desc_type {
+	tx_desc_type_desc = 1,
+	tx_desc_type_context = 2,
+};
+
+enum atl_tx_desc_cmd {
+	tx_desc_cmd_vlan = 1,
+	tx_desc_cmd_fcs = 2,
+	tx_desc_cmd_ipv4cs = 4,
+	tx_desc_cmd_l4cs = 8,
+	tx_desc_cmd_lso = 0x10,
+	tx_desc_cmd_wb = 0x20,
+};
+
+enum atl_tx_ctx_cmd {
+	ctx_cmd_snap = 1, // SNAP / ~802.3
+	ctx_cmd_ipv6 = 2, // IPv6 / ~IPv4
+	ctx_cmd_tcp = 4,  // TCP / ~UDP
+};
+
+struct atl_rx_desc {
+	uint64_t daddr;      			//0
+	union {
+		struct {
+			unsigned dd:1;		//64
+			uint64_t haddr63:63;	//65
+		};
+		uint64_t haddr;
+	};
+} __attribute__((packed));
+
+struct atl_rx_desc_wb {
+	unsigned rss_type:4;    //0
+	unsigned pkt_type:8;    //4
+	unsigned rdm_err:1;     //12
+	unsigned :6;            //13
+	unsigned rx_cntl:2;     //19
+	unsigned sph:1;         //21
+	unsigned hdr_len:10;    //22
+	unsigned rss_hash:32;   //32
+	unsigned dd:1;          //64
+	unsigned eop:1;         //65
+	unsigned rx_stat:4;     //66
+	unsigned rx_estat:6;    //70
+	unsigned rsc_cnt:4;     //76
+	unsigned pkt_len:16;    //80
+	unsigned next_desp:16;  //96
+	unsigned vlan_tag:16;   //112
+} __attribute__((packed));
+
+enum atl_rx_stat {
+	atl_rx_stat_mac_err = 1,
+	atl_rx_stat_ipv4_err = 2,
+	atl_rx_stat_l4_err = 4,
+	atl_rx_stat_l4_valid = 8,
+	atl_rx_stat_err_msk = atl_rx_stat_mac_err | atl_rx_stat_ipv4_err |
+		atl_rx_stat_l4_err,
+};
+
+enum atl_rx_estat {
+	atl_rx_estat_vlan_stripped = 1,
+	atl_rx_estat_l2_ucast_match = 2,
+	atl_rx_estat_vxlan = 1 << 2,
+	atl_rx_estat_nvgre = 2 << 2,
+	atl_rx_estat_geneve = 3 << 2,
+	atl_rx_estat_tun_msk = 3 << 2,
+	atl_rx_estat_outer_ipv4_err = 16,
+	atl_rx_estat_outer_ipv4_valid = 32,
+};
+
+enum atl_rx_pkt_type {
+	atl_rx_pkt_type_ipv4 = 0,
+	atl_rx_pkt_type_ipv6 = 1,
+	atl_rx_pkt_type_l3_other = 2,
+	atl_rx_pkt_type_l3_arp = 3,
+	atl_rx_pkt_type_l3_msk = 3,
+	atl_rx_pkt_type_tcp = 0 << 2,
+	atl_rx_pkt_type_udp = 1 << 2 ,
+	atl_rx_pkt_type_sctp = 2 << 2,
+	atl_rx_pkt_type_icmp = 3 << 2,
+	atl_rx_pkt_type_l4_msk = ((1 << 3) - 1) << 2,
+	atl_rx_pkt_type_vlan = 1 << 5,
+	atl_rx_pkt_type_dbl_vlan = 2 << 5,
+	atl_rx_pkt_type_vlan_msk = ((1 << 2) - 1) << 5,
+};
+
+#else // defined(__LITTLE_ENDIAN_BITFIELD)
+#error XXX Fix bigendian bitfields
+#endif // defined(__LITTLE_ENDIAN_BITFIELD)
+
+union atl_desc{
+	struct atl_rx_desc rx;
+	struct atl_rx_desc_wb wb;
+	struct atl_tx_ctx ctx;
+	struct atl_tx_desc tx;
+	uint8_t raw[16];
+}__attribute__((packed));
+
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_drviface.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_drviface.h
new file mode 100644
index 0000000..02449e8
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_drviface.h
@@ -0,0 +1,419 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_DRVIFACE_H_
+#define _ATL_DRVIFACE_H_
+
+typedef uint16_t in_port_t;
+typedef uint32_t in_addr_t;
+
+struct __attribute__((packed)) offloadKAv4 {
+    uint32_t timeout;
+    in_port_t local_port;
+    in_port_t remote_port;
+    uint8_t remote_mac_addr[6];
+    uint16_t win_size;
+    uint32_t seq_num;
+    uint32_t ack_num;
+    in_addr_t local_ip;
+    in_addr_t remote_ip;
+};
+
+struct __attribute__((packed)) offloadKAv6 {
+    uint32_t timeout;
+    in_port_t local_port;
+    in_port_t remote_port;
+    uint8_t remote_mac_addr[6];
+    uint16_t win_size;
+    uint32_t seq_num;
+    uint32_t ack_num;
+    struct in6_addr local_ip;
+    struct in6_addr remote_ip;
+};
+
+struct __attribute__((packed)) offloadIPInfo {
+    uint8_t v4LocalAddrCount;
+    uint8_t v4AddrCount;
+    uint8_t v6LocalAddrCount;
+    uint8_t v6AddrCount;
+    // FW will add the base to the following offset fields and will treat them as pointers.
+    // The offsets are relative to the start of this struct so that the struct is pretty much self-contained
+    // in_addr_t *
+    uint32_t v4AddrOfft;
+    // uint8_t *
+    uint32_t v4PrefixOfft;
+    // in6_addr *
+    uint32_t v6AddrOfft;
+    // uint8_t *
+    uint32_t v6PrefixOfft;
+};
+
+struct __attribute__((packed)) offloadPortInfo {
+    uint16_t UDPPortCount;
+    uint16_t TCPPortCount;
+    // in_port_t *
+    uint32_t UDPPortOfft;       // See the comment in the offloadIPInfo struct
+                                // in_port_t *
+    uint32_t TCPPortOfft;
+};
+
+struct __attribute__((packed))  offloadKAInfo {
+    uint16_t v4KACount;
+    uint16_t v6KACount;
+    uint32_t retryCount;
+    uint32_t retryInterval;
+    // struct offloadKAv4 *
+    uint32_t v4KAOfft;          // See the comment in the offloadIPInfo struct
+                                // struct offloadKAv6 *
+    uint32_t v6KAOfft;
+};
+
+struct  __attribute__((packed)) offloadRRInfo {
+    uint32_t RRCount;
+    uint32_t RRBufLen;
+    // Offset to RR index table relative to the start of offloadRRInfo struct. The indices
+    // themselves are relative to the start of RR buffer. FW will add the buffer address
+    // and will treat them as pointers.
+    // uint8_t **
+    uint32_t RRIdxOfft;
+    // Offset to the RR buffer relative to the start of offloadRRInfo struct.
+    // uint8_t *
+    uint32_t RRBufOfft;
+};
+
+struct __attribute__((packed)) offloadInfo {
+    uint32_t version;               // = 0 till it stabilizes some
+    uint32_t len;                   // The whole structure length including the variable-size buf
+    uint8_t macAddr[8];
+    struct offloadIPInfo ips;
+    struct offloadPortInfo ports;
+    struct offloadKAInfo kas;
+    struct offloadRRInfo rrs;
+    uint8_t buf[0];
+};
+
+#define FW_PACK_STRUCT __attribute__((packed))
+
+#define DRV_REQUEST_SIZE 3072
+#define DRV_MSG_PING            0x01
+#define DRV_MSG_ARP             0x02
+#define DRV_MSG_INJECT          0x03
+#define DRV_MSG_WOL_ADD         0x04
+#define DRV_MSG_WOL_REMOVE      0x05
+#define DRV_MSG_ENABLE_WAKEUP   0x06
+#define DRV_MSG_MSM             0x07
+#define DRV_MSG_PROVISIONING    0x08
+#define DRV_MSG_OFFLOAD_ADD     0x09
+#define DRV_MSG_OFFLOAD_REMOVE 	0x0A
+#define DRV_MSG_MSM_EX          0x0B
+#define DRV_MSG_SMBUS_PROXY     0x0C
+
+#define DRV_PROV_APPLY         1
+#define DRV_PROV_REPLACE       2
+#define DRV_PROV_ADD           3
+
+#define FW_RPC_INJECT_PACKET_LEN 1514U
+
+typedef enum {
+    EVENT_DRIVER_ENABLE_WOL
+} eDriverEvent;
+
+//typedef enum {
+//    HOST_UNINIT = 0,
+//    HOST_RESET,
+//    HOST_INIT,
+//    HOST_RESERVED,
+//    HOST_SLEEP,
+//    HOST_INVALID
+//} hostState_t;
+
+struct drvMsgPing {
+    uint32_t ping;
+} FW_PACK_STRUCT;
+
+union IPAddr {
+    struct
+    {
+        uint8_t addr[16];
+    } FW_PACK_STRUCT v6;
+    struct
+    {
+        uint8_t padding[12];
+        uint8_t addr[4];
+    } FW_PACK_STRUCT v4;
+} FW_PACK_STRUCT;
+
+struct drvMsgArp {
+    uint8_t macAddr[6];
+    uint32_t uIpAddrCnt;
+    struct
+    {
+        union IPAddr addr;
+        union IPAddr mask;
+    } FW_PACK_STRUCT ip[1];
+} FW_PACK_STRUCT;
+
+struct drvMsgInject {
+    uint32_t len;
+    uint8_t packet[FW_RPC_INJECT_PACKET_LEN];
+} FW_PACK_STRUCT;
+
+enum ndisPmWoLPacket {
+    ndisPMWoLPacketUnspecified = 0,
+    ndisPMWoLPacketBitmapPattern,
+    ndisPMWoLPacketMagicPacket,
+    ndisPMWoLPacketIPv4TcpSyn,
+    ndisPMWoLPacketIPv6TcpSyn,
+    ndisPMWoLPacketEapolRequestIdMessage,
+    ndisPMWoLPacketMaximum
+};
+
+enum aqPmWoLPacket {
+    aqPMWoLPacketUnspecified = 0x10000,
+    aqPMWoLPacketArp,
+    aqPMWoLPacketIPv4Ping,
+    aqPMWoLPacketIpv6NsPacket,
+    aqPMWoLPacketIpv6Ping,
+    aqPMWoLReasonLinkUp,
+    aqPMWoLReasonLinkDown,
+    aqPMWoLPacketMaximum
+};
+
+enum ndisPmProtocolOffloadType {
+    ndisPMProtocolOffloadIdUnspecified,
+    ndisPMProtocolOffloadIdIPv4ARP,
+    ndisPMProtocolOffloadIdIPv6NS,
+    ndisPMProtocolOffload80211RSNRekey,
+    ndisPMProtocolOffloadIdMaximum
+};
+
+struct drvMsgEnableWakeup {
+    uint32_t patternMaskWindows;
+    uint32_t patternMaskAquantia;
+    uint32_t patternMaskOther;
+    uint32_t offloadsMaskWindows;
+    uint32_t offloadsMaskAquantia;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddIpv4TcpSynWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4SourceAddress;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4DestAddress;
+    union {
+        uint8_t v8[2];
+        uint16_t v16;
+    } TCPSourcePortNumber;
+    union {
+        uint8_t v8[2];
+        uint16_t v16;
+    } TCPDestPortNumber;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddIpv6TcpSynWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } IPv6SourceAddress;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } IPv6DestAddress;
+    union {
+        uint8_t v8[2];
+        uint16_t v16;
+    } TCPSourcePortNumber;
+    union {
+        uint8_t v8[2];
+        uint16_t v16;
+    } TCPDestPortNumber;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddIpv4PingWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4SourceAddress;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4DestAddress;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddIpv6PingWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } IPv6SourceAddress;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } IPv6DestAddress;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddEapolRequestIdMessageWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4SourceAddress;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4DestAddress;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddBitmapPattern {
+    uint32_t Flags;
+    uint32_t MaskOffset;
+    uint32_t MaskSize;
+    uint32_t PatternOffset;
+    uint32_t PatternSize;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddMagicPacketPattern {
+    uint8_t macAddr[6];
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddArpWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4Address;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddLinkUpWoLParameters {
+    uint32_t timeout;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddLinkDownWoLParameters {
+    uint32_t timeout;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAdd {
+    uint32_t priority; // Currently not used
+    uint32_t packetType; // One of ndisPmWoLPacket or aqPmWoLPacket
+    uint32_t patternId; // Id to save - will be used in remove message
+    uint32_t nextWoLPatternOffset; // For chaining multiple additions in one request
+
+    // Depends on `parrernId`
+    union _WOL_PATTERN {
+        struct drvMsgWoLAddIpv4TcpSynWoLPacketParameters wolIpv4TcpSyn;
+        struct drvMsgWoLAddIpv6TcpSynWoLPacketParameters wolIpv6TcpSyn;
+        struct drvMsgWoLAddEapolRequestIdMessageWoLPacketParameters wolEapolRequestIdMessage;
+        struct drvMsgWoLAddBitmapPattern wolBitmap;
+        struct drvMsgWoLAddMagicPacketPattern wolMagicPacket;
+        struct drvMsgWoLAddIpv4PingWoLPacketParameters wolIpv4Ping;
+        struct drvMsgWoLAddIpv6PingWoLPacketParameters wolIpv6Ping;
+        struct drvMsgWoLAddArpWoLPacketParameters wolArp;
+        struct drvMsgWoLAddLinkUpWoLParameters wolLinkUpReason;
+        struct drvMsgWoLAddLinkDownWoLParameters wolLinkDownReason;
+    } wolPattern;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLRemove {
+    uint32_t id;
+} FW_PACK_STRUCT;
+
+struct ipv4ArpParameters {
+    uint32_t flags;
+    uint8_t remoteIPv4Address[4];
+    uint8_t hostIPv4Address[4];
+    uint8_t macAddress[6];
+} FW_PACK_STRUCT;
+
+struct ipv6NsParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } remoteIPv6Address;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } solicitedNodeIPv6Address;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } targetIPv6Addresses[2];
+    uint8_t macAddress[6];
+} FW_PACK_STRUCT;
+
+struct drvMsgOffloadAdd {
+    uint32_t priority;
+    uint32_t protocolOffloadType;
+    uint32_t protocolOffloadId;
+    uint32_t nextProtocolOffloadOffset;
+    union {
+        struct ipv4ArpParameters ipv4Arp;
+        struct ipv6NsParameters ipv6Ns;
+    } wolOffload;
+} FW_PACK_STRUCT;
+
+struct drvMsgOffloadRemove {
+    uint32_t id;
+} FW_PACK_STRUCT;
+
+struct drvMsmSettings {
+    uint32_t msmReg054;
+    uint32_t msmReg058;
+    uint32_t msmReg05c;
+    uint32_t msmReg060;
+    uint32_t msmReg064;
+    uint32_t msmReg068;
+    uint32_t msmReg06c;
+    uint32_t msmReg070;
+    uint32_t flags;     // Valid for message DRV_MSG_MSM_EX only
+} FW_PACK_STRUCT;
+
+//struct drvMsgProvisioning {
+//    uint32_t command;
+//    uint32_t len;
+//    provList_t list;
+//} FW_PACK_STRUCT;
+
+//struct drvMsgSmbusProxy {
+//    uint32_t typeMsg;
+//    union {
+//        struct smbusProxyWrite smbWrite;
+//        struct smbusProxyRead smbRead;
+//        struct smbusProxyGetStatus smbStatus;
+//        struct smbusProxyReadResp smbReadResp;
+//    } FW_PACK_STRUCT;
+//} FW_PACK_STRUCT;
+
+struct drvIface {
+    uint32_t msgId;
+
+    union {
+        struct drvMsgPing msgPing;
+        struct drvMsgArp msgArp;
+        struct drvMsgInject msgInject;
+        struct drvMsgWoLAdd msgWoLAdd;
+        struct drvMsgWoLRemove msgWoLRemove;
+        struct drvMsgEnableWakeup msgEnableWakeup;
+        struct drvMsmSettings msgMsm;
+//        struct drvMsgProvisioning msgProvisioning;
+        struct drvMsgOffloadAdd msgOffloadAdd;
+        struct drvMsgOffloadRemove msgOffloadRemove;
+//        struct drvMsgSmbusProxy msgSmbusProxy;
+        struct offloadInfo fw2xOffloads;
+    } FW_PACK_STRUCT;
+} FW_PACK_STRUCT;
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c
new file mode 100644
index 0000000..624a6e3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c
@@ -0,0 +1,2003 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/ethtool.h>
+
+#include "atl_common.h"
+#include "atl_ring.h"
+
+static uint32_t atl_ethtool_get_link(struct net_device *ndev)
+{
+	return ethtool_op_get_link(ndev);
+}
+
+static void atl_link_to_kernel(unsigned int bits, unsigned long *kernel,
+	bool legacy)
+{
+	struct atl_link_type *type;
+	int i;
+
+	atl_for_each_rate(i, type) {
+		if (legacy && type->ethtool_idx > 31)
+			continue;
+
+		if (bits & BIT(i))
+			__set_bit(type->ethtool_idx, kernel);
+	}
+}
+
+#define atl_ethtool_get_common(base, modes, lstate, legacy)		\
+do {									\
+	struct atl_fc_state *fc = &(lstate)->fc;			\
+	(base)->port = PORT_TP;						\
+	(base)->duplex = DUPLEX_FULL;					\
+	(base)->autoneg = AUTONEG_DISABLE;				\
+	(base)->eth_tp_mdix = ETH_TP_MDI_INVALID;			\
+	(base)->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;			\
+									\
+	atl_add_link_supported(modes, Autoneg);				\
+	atl_add_link_supported(modes, TP);				\
+	atl_add_link_supported(modes, Pause);				\
+	atl_add_link_supported(modes, Asym_Pause);			\
+	atl_add_link_advertised(modes, TP);				\
+	atl_add_link_lpadvertised(modes, Autoneg);			\
+									\
+	if (lstate->autoneg) {						\
+		(base)->autoneg = AUTONEG_ENABLE;			\
+		atl_add_link_advertised(modes, Autoneg);		\
+	}								\
+									\
+	if (fc->req & atl_fc_rx)					\
+		atl_add_link_advertised(modes, Pause);			\
+									\
+	if (!!(fc->req & atl_fc_rx) ^ !!(fc->req & atl_fc_tx))		\
+		atl_add_link_advertised(modes, Asym_Pause);		\
+									\
+	if (fc->cur & atl_fc_rx)					\
+		atl_add_link_lpadvertised(modes, Pause);		\
+									\
+	if (!!(fc->cur & atl_fc_rx) ^ !!(fc->cur & atl_fc_tx))		\
+		atl_add_link_lpadvertised(modes, Asym_Pause);		\
+									\
+	atl_link_to_kernel((lstate)->supported,				\
+		(unsigned long *)&(modes)->link_modes.supported,	\
+		legacy);						\
+	atl_link_to_kernel((lstate)->advertized,			\
+		(unsigned long *)&(modes)->link_modes.advertising,	\
+		legacy);						\
+	atl_link_to_kernel((lstate)->lp_advertized,			\
+		(unsigned long *)&(modes)->link_modes.lp_advertising,	\
+		legacy);						\
+} while (0)
+
+#define atl_add_link_supported(ptr, mode) \
+	atl_add_link_mode(ptr, SUPPORTED, supported, mode)
+
+#define atl_add_link_advertised(ptr, mode) \
+	atl_add_link_mode(ptr, ADVERTISED, advertising, mode)
+
+#define atl_add_link_lpadvertised(ptr, mode) \
+	atl_add_link_mode(ptr, ADVERTISED, lp_advertising, mode)
+
+#ifndef ATL_HAVE_ETHTOOL_KSETTINGS
+
+struct atl_ethtool_compat {
+	struct {
+		unsigned long supported;
+		unsigned long advertising;
+		unsigned long lp_advertising;
+	} link_modes;
+};
+
+#define atl_add_link_mode(ptr, nameuc, namelc, mode)	\
+	do { \
+		(ptr)->link_modes.namelc |= nameuc ## _ ## mode; \
+	} while (0)
+
+static int atl_ethtool_get_settings(struct net_device *ndev,
+				 struct ethtool_cmd *cmd)
+{
+	struct atl_ethtool_compat cmd_compat = {0};
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_link_state *lstate = &nic->hw.link_state;
+
+	atl_ethtool_get_common(cmd, &cmd_compat, lstate, true);
+	cmd->supported = cmd_compat.link_modes.supported;
+	cmd->advertising = cmd_compat.link_modes.advertising;
+	cmd->lp_advertising = cmd_compat.link_modes.lp_advertising;
+
+	ethtool_cmd_speed_set(cmd, lstate->link ? lstate->link->speed : 0);
+
+	return 0;
+}
+
+#else
+
+#define atl_add_link_mode(ptr, nameuc, namelc, mode)	\
+	ethtool_link_ksettings_add_link_mode(ptr, namelc, mode)
+
+static int atl_ethtool_get_ksettings(struct net_device *ndev,
+	struct ethtool_link_ksettings *cmd)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_link_state *lstate = &nic->hw.link_state;
+
+	ethtool_link_ksettings_zero_link_mode(cmd, supported);
+	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+	ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+
+	atl_ethtool_get_common(&cmd->base, cmd, lstate, false);
+
+	cmd->base.speed = lstate->link ? lstate->link->speed : 0;
+
+	return 0;
+}
+
+#endif
+
+#undef atl_add_link_supported
+#undef atl_add_link_advertised
+#undef atl_add_link_lpadvertised
+#undef atl_add_link_mode
+
+static unsigned int atl_kernel_to_link(const unsigned long int *bits,
+	bool legacy)
+{
+	unsigned int ret = 0;
+	int i;
+	struct atl_link_type *type;
+
+	atl_for_each_rate(i, type) {
+		if (legacy && type->ethtool_idx > 31)
+			continue;
+
+		if (test_bit(type->ethtool_idx, bits))
+			ret |= BIT(i);
+	}
+
+	return ret;
+}
+
+static int atl_set_fixed_speed(struct atl_hw *hw, unsigned int speed)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+	struct atl_link_type *type;
+	int i;
+
+	atl_for_each_rate(i, type)
+		if (type->speed == speed) {
+			if (!(lstate->supported & BIT(i)))
+				return -EINVAL;
+
+			lstate->advertized = BIT(i);
+			break;
+		}
+
+	lstate->autoneg = false;
+	hw->mcp.ops->set_link(hw, false);
+	return 0;
+}
+
+#define atl_ethtool_set_common(base, lstate, advertise, tmp, legacy, speed) \
+do {									\
+	struct atl_fc_state *fc = &lstate->fc;				\
+									\
+	if ((base)->port != PORT_TP || (base)->duplex != DUPLEX_FULL)	\
+		return -EINVAL;						\
+									\
+	if ((base)->autoneg != AUTONEG_ENABLE)				\
+		return atl_set_fixed_speed(hw, speed);			\
+									\
+	atl_add_link_bit(tmp, Autoneg);					\
+	atl_add_link_bit(tmp, TP);					\
+	atl_add_link_bit(tmp, Pause);					\
+	atl_add_link_bit(tmp, Asym_Pause);				\
+	atl_link_to_kernel((lstate)->supported, tmp, legacy);		\
+									\
+	if (atl_complement_intersect(advertise, tmp)) {			\
+		atl_nic_dbg("Unsupported advertising bits from ethtool\n"); \
+		return -EINVAL;						\
+	}								\
+									\
+	lstate->autoneg = true;						\
+	(lstate)->advertized &= ATL_EEE_MASK;				\
+	(lstate)->advertized |= atl_kernel_to_link(advertise, legacy);	\
+									\
+	fc->req = 0;							\
+	if (atl_test_link_bit(advertise, Pause))			\
+		fc->req	|= atl_fc_full;					\
+									\
+	if (atl_test_link_bit(advertise, Asym_Pause))			\
+		fc->req ^= atl_fc_tx;					\
+									\
+} while (0)
+
+#ifndef ATL_HAVE_ETHTOOL_KSETTINGS
+
+#define atl_add_link_bit(ptr, name)		\
+	(*(ptr) |= SUPPORTED_ ## name)
+
+#define atl_test_link_bit(ptr, name)		\
+	(*(ptr) & SUPPORTED_ ## name)
+
+static inline bool atl_complement_intersect(const unsigned long *advertised,
+	unsigned long *supported)
+{
+	return !!(*(uint32_t *)advertised & ~*(uint32_t *)supported);
+}
+
+static int atl_ethtool_set_settings(struct net_device *ndev,
+	struct ethtool_cmd *cmd)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_state *lstate = &hw->link_state;
+	unsigned long tmp = 0;
+	uint32_t speed = ethtool_cmd_speed(cmd);
+
+	atl_ethtool_set_common(cmd, lstate,
+		(unsigned long *)&cmd->advertising, &tmp, true, speed);
+	hw->mcp.ops->set_link(hw);
+	return 0;
+}
+
+#else
+
+#define atl_add_link_bit(ptr, name)				\
+	__set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, ptr)
+
+#define atl_test_link_bit(ptr, name)				\
+	test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, ptr)
+
+static inline bool atl_complement_intersect(const unsigned long *advertised,
+	unsigned long *supported)
+{
+	bitmap_complement(supported, supported,
+		__ETHTOOL_LINK_MODE_MASK_NBITS);
+	return bitmap_intersects(advertised, supported,
+		__ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int atl_ethtool_set_ksettings(struct net_device *ndev,
+	const struct ethtool_link_ksettings *cmd)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_state *lstate = &hw->link_state;
+	const struct ethtool_link_settings *base = &cmd->base;
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+	bitmap_zero(tmp, __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+	atl_ethtool_set_common(base, lstate, cmd->link_modes.advertising, tmp,
+		false, cmd->base.speed);
+	hw->mcp.ops->set_link(hw, false);
+	return 0;
+}
+
+#endif
+
+#undef atl_add_link_bit
+#undef atl_test_link_bit
+
+static uint32_t atl_rss_tbl_size(struct net_device *ndev)
+{
+	return ATL_RSS_TBL_SIZE;
+}
+
+static uint32_t atl_rss_key_size(struct net_device *ndev)
+{
+	return ATL_RSS_KEY_SIZE;
+}
+
+static int atl_rss_get_rxfh(struct net_device *ndev, uint32_t *tbl,
+	uint8_t *key, uint8_t *htype)
+{
+	struct atl_hw *hw = &((struct atl_nic *)netdev_priv(ndev))->hw;
+	int i;
+
+	if (htype)
+		*htype = ETH_RSS_HASH_TOP;
+
+	if (key)
+		memcpy(key, hw->rss_key, atl_rss_key_size(ndev));
+
+	if (tbl)
+		for (i = 0; i < atl_rss_tbl_size(ndev); i++)
+			tbl[i] = hw->rss_tbl[i];
+
+	return 0;
+}
+
+static int atl_rss_set_rxfh(struct net_device *ndev, const uint32_t *tbl,
+	const uint8_t *key, const uint8_t htype)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	int i;
+	uint32_t tbl_size = atl_rss_tbl_size(ndev);
+
+	if (htype && htype != ETH_RSS_HASH_TOP)
+		return -EINVAL;
+
+	if (tbl) {
+		for (i = 0; i < tbl_size; i++)
+			if (tbl[i] >= nic->nvecs)
+				return -EINVAL;
+
+		for (i = 0; i < tbl_size; i++)
+			hw->rss_tbl[i] = tbl[i];
+	}
+
+	if (key) {
+		memcpy(hw->rss_key, key, atl_rss_key_size(ndev));
+		atl_set_rss_key(hw);
+	}
+
+	if (tbl)
+		atl_set_rss_tbl(hw);
+
+	return 0;
+}
+
+static void atl_get_channels(struct net_device *ndev,
+	struct ethtool_channels *chan)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	chan->max_combined = ATL_MAX_QUEUES;
+	chan->combined_count = nic->nvecs;
+	if (nic->flags & ATL_FL_MULTIPLE_VECTORS)
+		chan->max_other = chan->other_count = ATL_NUM_NON_RING_IRQS;
+}
+
+static int atl_set_channels(struct net_device *ndev,
+			    struct ethtool_channels *chan)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	unsigned int nvecs = chan->combined_count;
+
+	if (!nvecs || chan->rx_count || chan->tx_count)
+		return -EINVAL;
+
+	if (nic->flags & ATL_FL_MULTIPLE_VECTORS &&
+		chan->other_count != ATL_NUM_NON_RING_IRQS)
+		return -EINVAL;
+
+	if (!(nic->flags & ATL_FL_MULTIPLE_VECTORS) &&
+		chan->other_count)
+		return -EINVAL;
+
+	if (nvecs > atl_max_queues)
+		return -EINVAL;
+
+	nic->requested_nvecs = nvecs;
+
+	return atl_reconfigure(nic);
+}
+
+static void atl_get_pauseparam(struct net_device *ndev,
+	struct ethtool_pauseparam *pause)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_fc_state *fc = &nic->hw.link_state.fc;
+
+	pause->autoneg = 1;
+	pause->rx_pause = !!(fc->cur & atl_fc_rx);
+	pause->tx_pause = !!(fc->cur & atl_fc_tx);
+}
+
+static int atl_set_pauseparam(struct net_device *ndev,
+	struct ethtool_pauseparam *pause)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_state *lstate = &hw->link_state;
+	struct atl_fc_state *fc = &lstate->fc;
+
+	if (atl_fw_major(hw) < 2)
+		return -EOPNOTSUPP;
+
+	if (pause->autoneg && !lstate->autoneg)
+		return -EINVAL;
+
+	fc->req = pause->autoneg ? atl_fc_full :
+		(!!pause->rx_pause << atl_fc_rx_shift) |
+		(!!pause->tx_pause << atl_fc_tx_shift);
+
+	hw->mcp.ops->set_link(hw, false);
+	return 0;
+}
+
+static int atl_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_link_state *lstate = &nic->hw.link_state;
+	int ret = 0;
+
+	eee->supported = eee->advertised = eee->lp_advertised = 0;
+
+	/* Casting to unsigned long is safe, as atl_link_to_kernel()
+	 * will only access low 32 bits when called with legacy == true
+	 */
+	atl_link_to_kernel(lstate->supported >> ATL_EEE_BIT_OFFT,
+		(unsigned long *)&eee->supported, true);
+	atl_link_to_kernel(lstate->advertized >> ATL_EEE_BIT_OFFT,
+		(unsigned long *)&eee->advertised, true);
+	atl_link_to_kernel(lstate->lp_advertized >> ATL_EEE_BIT_OFFT,
+		(unsigned long *)&eee->lp_advertised, true);
+
+	eee->eee_enabled = eee->tx_lpi_enabled = lstate->eee_enabled;
+	eee->eee_active = lstate->eee;
+
+	ret = atl_get_lpi_timer(nic, &eee->tx_lpi_timer);
+
+	return ret;
+}
+
+static int atl_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_state *lstate = &hw->link_state;
+	uint32_t tmp = 0;
+
+	if (atl_fw_major(hw) < 2)
+		return -EOPNOTSUPP;
+
+	atl_get_lpi_timer(nic, &tmp);
+	if (eee->tx_lpi_timer != tmp)
+		return -EOPNOTSUPP;
+
+	lstate->eee_enabled = eee->eee_enabled;
+
+	if (lstate->eee_enabled) {
+		atl_link_to_kernel(lstate->supported >> ATL_EEE_BIT_OFFT,
+			(unsigned long *)&tmp, true);
+		if (eee->advertised & ~tmp)
+			return -EINVAL;
+
+		/* advertize the requested link or all supported */
+		if (eee->advertised)
+			tmp = atl_kernel_to_link(
+					(unsigned long *)&eee->advertised,
+					true);
+		else
+			tmp = atl_kernel_to_link(
+					(unsigned long *)&tmp, true);
+	}
+
+	lstate->advertized &= ~ATL_EEE_MASK;
+	if (lstate->eee_enabled)
+		lstate->advertized |= tmp << ATL_EEE_BIT_OFFT;
+
+	hw->mcp.ops->set_link(hw, false);
+	return 0;
+}
+
+static void atl_get_drvinfo(struct net_device *ndev,
+	struct ethtool_drvinfo *drvinfo)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	uint32_t fw_rev = nic->hw.mcp.fw_rev;
+
+	strlcpy(drvinfo->driver, atl_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, ATL_VERSION, sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		"%d.%d.%d", fw_rev >> 24, fw_rev >> 16 & 0xff,
+		fw_rev & 0xffff);
+	strlcpy(drvinfo->bus_info, pci_name(nic->hw.pdev),
+		sizeof(drvinfo->bus_info));
+}
+
+static int atl_nway_reset(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+
+	return hw->mcp.ops->restart_aneg(hw);
+}
+
+static void atl_get_ringparam(struct net_device *ndev,
+	struct ethtool_ringparam *rp)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	rp->rx_mini_max_pending = rp->rx_mini_pending = 0;
+	rp->rx_jumbo_max_pending = rp->rx_jumbo_pending = 0;
+
+	rp->rx_max_pending = rp->tx_max_pending = ATL_MAX_RING_SIZE;
+
+	rp->rx_pending = nic->requested_rx_size;
+	rp->tx_pending = nic->requested_tx_size;
+}
+
+static int atl_set_ringparam(struct net_device *ndev,
+	struct ethtool_ringparam *rp)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	if (rp->rx_mini_pending || rp->rx_jumbo_pending)
+		return -EINVAL;
+
+	if (rp->rx_pending < 8 || rp->tx_pending < 8)
+		return -EINVAL;
+
+	nic->requested_rx_size = rp->rx_pending & ~7;
+	nic->requested_tx_size = rp->tx_pending & ~7;
+
+	return atl_reconfigure(nic);
+}
+
+struct atl_stat_desc {
+	char stat_name[ETH_GSTRING_LEN];
+	int idx;
+};
+
+#define ATL_TX_STAT(_name, _field)				\
+{								\
+	.stat_name = #_name,					\
+	.idx = offsetof(struct atl_tx_ring_stats, _field) /	\
+		sizeof(uint64_t),				\
+}
+
+#define ATL_RX_STAT(_name, _field)				\
+{								\
+	.stat_name = #_name,					\
+	.idx = offsetof(struct atl_rx_ring_stats, _field) /	\
+		sizeof(uint64_t),				\
+}
+
+#define ATL_ETH_STAT(_name, _field)				\
+{								\
+	.stat_name = #_name,					\
+	.idx = offsetof(struct atl_ether_stats, _field) /	\
+		sizeof(uint64_t),				\
+}
+
+static const struct atl_stat_desc tx_stat_descs[] = {
+	ATL_TX_STAT(tx_packets, packets),
+	ATL_TX_STAT(tx_bytes, bytes),
+	ATL_TX_STAT(tx_busy, tx_busy),
+	ATL_TX_STAT(tx_queue_restart, tx_restart),
+	ATL_TX_STAT(tx_dma_map_failed, dma_map_failed),
+};
+
+static const struct atl_stat_desc rx_stat_descs[] = {
+	ATL_RX_STAT(rx_packets, packets),
+	ATL_RX_STAT(rx_bytes, bytes),
+	ATL_RX_STAT(rx_multicast_packets, multicast),
+	ATL_RX_STAT(rx_lin_skb_overrun, linear_dropped),
+	ATL_RX_STAT(rx_skb_alloc_failed, alloc_skb_failed),
+	ATL_RX_STAT(rx_head_page_reused, reused_head_page),
+	ATL_RX_STAT(rx_data_page_reused, reused_data_page),
+	ATL_RX_STAT(rx_head_page_allocated, alloc_head_page),
+	ATL_RX_STAT(rx_data_page_allocated, alloc_data_page),
+	ATL_RX_STAT(rx_head_page_alloc_failed, alloc_head_page_failed),
+	ATL_RX_STAT(rx_data_page_alloc_failed, alloc_data_page_failed),
+	ATL_RX_STAT(rx_non_eop_descs, non_eop_descs),
+	ATL_RX_STAT(rx_mac_err, mac_err),
+	ATL_RX_STAT(rx_checksum_err, csum_err),
+};
+
+static const struct atl_stat_desc eth_stat_descs[] = {
+	ATL_ETH_STAT(tx_pause, tx_pause),
+	ATL_ETH_STAT(rx_pause, rx_pause),
+	ATL_ETH_STAT(rx_ether_drops, rx_ether_drops),
+	ATL_ETH_STAT(rx_ether_octets, rx_ether_octets),
+	ATL_ETH_STAT(rx_ether_pkts, rx_ether_pkts),
+	ATL_ETH_STAT(rx_ether_broacasts, rx_ether_broacasts),
+	ATL_ETH_STAT(rx_ether_multicasts, rx_ether_multicasts),
+	ATL_ETH_STAT(rx_ether_crc_align_errs, rx_ether_crc_align_errs),
+	ATL_ETH_STAT(rx_filter_host, rx_filter_host),
+	ATL_ETH_STAT(rx_filter_lost, rx_filter_lost),
+};
+
+#define ATL_PRIV_FLAG(_name, _bit)		\
+	[ATL_PF(_bit)] = #_name
+
+static const char atl_priv_flags[][ETH_GSTRING_LEN] = {
+	ATL_PRIV_FLAG(PKTSystemLoopback, LPB_SYS_PB),
+	ATL_PRIV_FLAG(DMASystemLoopback, LPB_SYS_DMA),
+	/* ATL_PRIV_FLAG(DMANetworkLoopback, LPB_NET_DMA), */
+	ATL_PRIV_FLAG(RX_LPI_MAC, LPI_RX_MAC),
+	ATL_PRIV_FLAG(TX_LPI_MAC, LPI_TX_MAC),
+	ATL_PRIV_FLAG(RX_LPI_PHY, LPI_RX_PHY),
+	ATL_PRIV_FLAG(TX_LPI_PHY, LPI_TX_PHY),
+	ATL_PRIV_FLAG(ResetStatistics, STATS_RESET),
+	ATL_PRIV_FLAG(StripEtherPadding, STRIP_PAD),
+};
+
+static int atl_get_sset_count(struct net_device *ndev, int sset)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(tx_stat_descs) * (nic->nvecs + 1) +
+			ARRAY_SIZE(rx_stat_descs) * (nic->nvecs + 1) +
+			ARRAY_SIZE(eth_stat_descs);
+
+	case ETH_SS_PRIV_FLAGS:
+		return ARRAY_SIZE(atl_priv_flags);
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void atl_copy_stats_strings(char **data, char *prefix,
+	const struct atl_stat_desc *descs, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		snprintf(*data, ETH_GSTRING_LEN, "%s%s",
+			prefix, descs[i].stat_name);
+		*data += ETH_GSTRING_LEN;
+	}
+}
+
+static void atl_copy_stats_string_set(char **data, char *prefix)
+{
+	atl_copy_stats_strings(data, prefix, tx_stat_descs,
+		ARRAY_SIZE(tx_stat_descs));
+	atl_copy_stats_strings(data, prefix, rx_stat_descs,
+		ARRAY_SIZE(rx_stat_descs));
+}
+
+static void atl_get_strings(struct net_device *ndev, uint32_t sset,
+	uint8_t *data)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	int i;
+	char prefix[16];
+	char *p = data;
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		atl_copy_stats_string_set(&p, "");
+
+		atl_copy_stats_strings(&p, "", eth_stat_descs,
+			ARRAY_SIZE(eth_stat_descs));
+
+		for (i = 0; i < nic->nvecs; i++) {
+			snprintf(prefix, sizeof(prefix), "ring_%d_", i);
+			atl_copy_stats_string_set(&p, prefix);
+		}
+		return;
+
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(p, atl_priv_flags, sizeof(atl_priv_flags));
+		return;
+	}
+}
+
+#define atl_write_stats(stats, descs, data, type)	\
+do {							\
+	type *_stats = (type *)(stats);			\
+	int i;						\
+							\
+	for (i = 0; i < ARRAY_SIZE(descs); i++)		\
+		*(data)++ = _stats[descs[i].idx];	\
+} while (0)
+
+
+static void atl_get_ethtool_stats(struct net_device *ndev,
+	struct ethtool_stats *stats, u64 *data)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	int i;
+
+	atl_update_global_stats(nic);
+
+	atl_write_stats(&nic->stats.tx, tx_stat_descs, data, uint64_t);
+	atl_write_stats(&nic->stats.rx, rx_stat_descs, data, uint64_t);
+
+	atl_write_stats(&nic->stats.eth, eth_stat_descs, data, uint64_t);
+
+	for (i = 0; i < nic->nvecs; i++) {
+		struct atl_queue_vec *qvec = &nic->qvecs[i];
+		struct atl_ring_stats tmp;
+
+		atl_get_ring_stats(&qvec->tx, &tmp);
+		atl_write_stats(&tmp.tx, tx_stat_descs, data, uint64_t);
+		atl_get_ring_stats(&qvec->rx, &tmp);
+		atl_write_stats(&tmp.rx, rx_stat_descs, data, uint64_t);
+	}
+}
+
+static int atl_update_eee_pflags(struct atl_nic *nic)
+{
+	int ret = 0;
+	uint8_t prtad = 0;
+	uint32_t val;
+	uint16_t phy_val;
+	uint32_t flags = nic->priv_flags;
+	struct atl_link_type *link = nic->hw.link_state.link;
+	struct atl_hw *hw = &nic->hw;
+
+	flags &= ~ATL_PF_LPI_MASK;
+
+	if (!link || link->speed == 100)
+		goto done;
+
+	if (link->speed == 1000) {
+		ret = atl_mdio_read(hw, prtad, 3, 1, &phy_val);
+		if (ret)
+			goto done;
+
+		if (phy_val & BIT(9))
+			flags |= ATL_PF_BIT(LPI_TX_PHY);
+
+		if (phy_val & BIT(8))
+			flags |= ATL_PF_BIT(LPI_RX_PHY);
+	} else {
+		ret = atl_mdio_read(hw, prtad, 3, 0xc830, &phy_val);
+		if (ret)
+			goto done;
+
+		if (phy_val & BIT(0))
+			flags |= ATL_PF_BIT(LPI_TX_PHY);
+
+		ret = atl_mdio_read(hw, prtad, 3, 0xe834, &phy_val);
+		if (ret)
+			goto done;
+
+		if (phy_val & BIT(0))
+			flags |= ATL_PF_BIT(LPI_RX_PHY);
+
+	}
+
+	ret = atl_msm_read(&nic->hw, ATL_MSM_GEN_STS, &val);
+	if (ret)
+		goto done;
+
+	if (val & BIT(8))
+		flags |= ATL_PF_BIT(LPI_TX_MAC);
+	if (val & BIT(4))
+		flags |= ATL_PF_BIT(LPI_RX_MAC);
+
+done:
+	nic->priv_flags = flags;
+	return ret;
+}
+
+void atl_reset_stats(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+
+	/* Fetch current MSM stats */
+	atl_update_eth_stats(nic);
+
+	spin_lock(&nic->stats_lock);
+	/* Adding current relative values to base makes it equal to
+	 * current absolute values, thus zeroing the relative values. */
+	atl_adjust_eth_stats(&nic->stats.eth_base, &nic->stats.eth, true);
+
+	atl_for_each_qvec(nic, qvec) {
+		memset(&qvec->rx.stats, 0, sizeof(qvec->rx.stats));
+		memset(&qvec->tx.stats, 0, sizeof(qvec->tx.stats));
+	}
+
+	spin_unlock(&nic->stats_lock);
+}
+
+static int atl_set_pad_stripping(struct atl_nic *nic, bool on)
+{
+	struct atl_hw *hw = &nic->hw;
+	int ret;
+	uint32_t ctrl;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MSM);
+	if (ret)
+		return ret;
+
+	ret = __atl_msm_read(hw, ATL_MSM_GEN_CTRL, &ctrl);
+	if (ret)
+		goto unlock;
+
+	if (on)
+		ctrl |= BIT(5);
+	else
+		ctrl &= ~BIT(5);
+
+	ret = __atl_msm_write(hw, ATL_MSM_GEN_CTRL, ctrl);
+
+unlock:
+	atl_hwsem_put(hw, ATL_MCP_SEM_MSM);
+	return ret;
+}
+
+static uint32_t atl_get_priv_flags(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	atl_update_eee_pflags(nic);
+	return nic->priv_flags;
+}
+
+static int atl_set_priv_flags(struct net_device *ndev, uint32_t flags)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	uint32_t diff = flags ^ nic->priv_flags;
+	uint32_t curr = nic->priv_flags & ATL_PF_LPB_MASK;
+	uint32_t lpb = flags & ATL_PF_LPB_MASK;
+	int ret;
+
+	if (diff & ATL_PF_RO_MASK)
+		return -EINVAL;
+
+	if (diff & ~ATL_PF_RW_MASK)
+		return -EOPNOTSUPP;
+
+	if (flags & ATL_PF_BIT(STATS_RESET))
+		atl_reset_stats(nic);
+	flags &= ~ATL_PF_BIT(STATS_RESET);
+
+	if (diff & ATL_PF_BIT(STRIP_PAD)) {
+		ret = atl_set_pad_stripping(nic,
+			!!(flags & ATL_PF_BIT(STRIP_PAD)));
+		if (ret)
+			return ret;
+	}
+
+	if (hweight32(lpb) > 1) {
+		atl_nic_err("Can't enable more than one loopback simultaneously\n");
+		return -EINVAL;
+	}
+
+	if (lpb & ATL_PF_BIT(LPB_SYS_DMA) && !atl_rx_linear) {
+		atl_nic_err("System DMA loopback suported only in rx_linear mode\n");
+		return -EINVAL;
+	}
+
+	if (curr)
+		atl_set_loopback(nic, ffs(curr) - 1, false);
+
+	if (lpb)
+		atl_set_loopback(nic, ffs(lpb) - 1, true);
+
+	nic->priv_flags = flags;
+	return 0;
+}
+
+static int atl_get_coalesce(struct net_device *ndev,
+			    struct ethtool_coalesce *ec)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	memset(ec, 0, sizeof(*ec));
+	ec->rx_coalesce_usecs = nic->rx_intr_delay;
+	ec->tx_coalesce_usecs = nic->tx_intr_delay;
+
+	return 0;
+}
+
+static int atl_set_coalesce(struct net_device *ndev,
+			    struct ethtool_coalesce *ec)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce ||
+		ec->rx_max_coalesced_frames || ec->tx_max_coalesced_frames ||
+		ec->rx_max_coalesced_frames_irq || ec->rx_coalesce_usecs_irq ||
+		ec->tx_max_coalesced_frames_irq || ec->tx_coalesce_usecs_irq)
+		return -EOPNOTSUPP;
+
+	if (ec->rx_coalesce_usecs < atl_min_intr_delay ||
+		ec->tx_coalesce_usecs < atl_min_intr_delay) {
+		atl_nic_err("Interrupt coalescing delays less than min_intr_delay (%d uS) not supported\n",
+			atl_min_intr_delay);
+		return -EINVAL;
+	}
+
+	nic->rx_intr_delay = ec->rx_coalesce_usecs;
+	nic->tx_intr_delay = ec->tx_coalesce_usecs;
+
+	atl_set_intr_mod(nic);
+
+	return 0;
+}
+
+struct atl_rxf_flt_desc {
+	int base;
+	int count;
+	uint32_t rxq_bit;
+	int rxq_shift;
+	size_t cmd_offt;
+	size_t count_offt;
+	int (*get_rxf)(const struct atl_rxf_flt_desc *desc,
+		struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp);
+	int (*set_rxf)(const struct atl_rxf_flt_desc *desc,
+		struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp);
+	void (*update_rxf)(struct atl_nic *nic, int idx);
+	int (*check_rxf)(const struct atl_rxf_flt_desc *desc,
+		struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp);
+};
+
+#define atl_for_each_rxf_desc(_desc)				\
+for (_desc = atl_rxf_descs;					\
+	_desc < atl_rxf_descs + ARRAY_SIZE(atl_rxf_descs);	\
+	_desc++)
+
+#define atl_for_each_rxf_idx(_desc, _idx)		\
+	for (_idx = 0; _idx < _desc->count; _idx++)
+
+static inline int atl_rxf_idx(const struct atl_rxf_flt_desc *desc,
+	struct ethtool_rx_flow_spec *fsp)
+{
+	return fsp->location - desc->base;
+}
+
+static inline uint64_t atl_ring_cookie(const struct atl_rxf_flt_desc *desc,
+	uint32_t cmd)
+{
+	if (cmd & desc->rxq_bit)
+		return (cmd >> desc->rxq_shift) & ATL_RXF_RXQ_MSK;
+	else if (cmd & ATL_RXF_ACT_TOHOST)
+		return ATL_RXF_RING_ANY;
+	else
+		return RX_CLS_FLOW_DISC;
+}
+
+static int atl_rxf_get_vlan(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	int idx = atl_rxf_idx(desc, fsp);
+	uint32_t cmd = vlan->cmd[idx];
+
+	if (!(cmd & ATL_RXF_EN))
+		return -EINVAL;
+
+	fsp->flow_type = ETHER_FLOW | FLOW_EXT;
+	fsp->h_ext.vlan_tci = htons(cmd & ATL_VLAN_VID_MASK);
+	fsp->m_ext.vlan_tci = htons(BIT(12) - 1);
+	fsp->ring_cookie = atl_ring_cookie(desc, cmd);
+
+	return 0;
+}
+
+static int atl_rxf_get_etype(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_etype *etype = &nic->rxf_etype;
+	int idx = atl_rxf_idx(desc, fsp);
+	uint32_t cmd = etype->cmd[idx];
+
+	if (!(cmd & ATL_RXF_EN))
+		return -EINVAL;
+
+	fsp->flow_type = ETHER_FLOW;
+	fsp->m_u.ether_spec.h_proto = 0xffff;
+	fsp->h_u.ether_spec.h_proto = htons(cmd & ATL_ETYPE_VAL_MASK);
+	fsp->ring_cookie = atl_ring_cookie(desc, cmd);
+
+	return 0;
+}
+
+static inline void atl_ntuple_swap_v6(__be32 dst[4], __be32 src[4])
+{
+	int i;
+
+	for (i = 0; i < 4; i++)
+		dst[i] = src[3 - i];
+}
+
+static int atl_rxf_get_ntuple(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_ntuple *ntuples = &nic->rxf_ntuple;
+	uint32_t idx = atl_rxf_idx(desc, fsp);
+	uint32_t cmd = ntuples->cmd[idx];
+
+	if (!(cmd & ATL_RXF_EN))
+		return -EINVAL;
+
+	if (cmd & ATL_NTC_PROTO) {
+		switch (cmd & ATL_NTC_L4_MASK) {
+		case ATL_NTC_L4_TCP:
+			fsp->flow_type = cmd & ATL_NTC_V6 ?
+				TCP_V6_FLOW : TCP_V4_FLOW;
+			break;
+
+		case ATL_NTC_L4_UDP:
+			fsp->flow_type = cmd & ATL_NTC_V6 ?
+				UDP_V6_FLOW : UDP_V4_FLOW;
+			break;
+
+		case ATL_NTC_L4_SCTP:
+			fsp->flow_type = cmd & ATL_NTC_V6 ?
+				SCTP_V6_FLOW : SCTP_V4_FLOW;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	} else {
+#ifdef ATL_HAVE_IPV6_NTUPLE
+		if (cmd & ATL_NTC_V6) {
+			fsp->flow_type = IPV6_USER_FLOW;
+		} else
+#endif
+		{
+			fsp->flow_type = IPV4_USER_FLOW;
+			fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+		}
+	}
+
+#ifdef ATL_HAVE_IPV6_NTUPLE
+	if (cmd & ATL_NTC_V6) {
+		struct ethtool_tcpip6_spec *rule = &fsp->h_u.tcp_ip6_spec;
+		struct ethtool_tcpip6_spec *mask = &fsp->m_u.tcp_ip6_spec;
+
+		if (cmd & ATL_NTC_SA) {
+			atl_ntuple_swap_v6(rule->ip6src,
+				ntuples->src_ip6[idx / 4]);
+			memset(mask->ip6src, 0xff, sizeof(mask->ip6src));
+		}
+
+		if (cmd & ATL_NTC_DA) {
+			atl_ntuple_swap_v6(rule->ip6dst,
+				ntuples->dst_ip6[idx / 4]);
+			memset(mask->ip6dst, 0xff, sizeof(mask->ip6dst));
+		}
+
+		if (cmd & ATL_NTC_SP) {
+			rule->psrc = ntuples->src_port[idx];
+			mask->psrc = -1;
+		}
+
+		if (cmd & ATL_NTC_DP) {
+			rule->pdst = ntuples->dst_port[idx];
+			mask->pdst = -1;
+		}
+	} else
+#endif
+	{
+		struct ethtool_tcpip4_spec *rule = &fsp->h_u.tcp_ip4_spec;
+		struct ethtool_tcpip4_spec *mask = &fsp->m_u.tcp_ip4_spec;
+
+		if (cmd & ATL_NTC_SA) {
+			rule->ip4src = ntuples->src_ip4[idx];
+			mask->ip4src = -1;
+		}
+
+		if (cmd & ATL_NTC_DA) {
+			rule->ip4dst = ntuples->dst_ip4[idx];
+			mask->ip4dst = -1;
+		}
+
+		if (cmd & ATL_NTC_SP) {
+			rule->psrc = ntuples->src_port[idx];
+			mask->psrc = -1;
+		}
+
+		if (cmd & ATL_NTC_DP) {
+			rule->pdst = ntuples->dst_port[idx];
+			mask->pdst = -1;
+		}
+	}
+
+	fsp->ring_cookie = atl_ring_cookie(desc, cmd);
+
+	return 0;
+}
+
+static int atl_get_rxf_locs(struct atl_nic *nic, struct ethtool_rxnfc *rxnfc,
+	uint32_t *rule_locs)
+{
+	struct atl_rxf_ntuple *ntuple = &nic->rxf_ntuple;
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	struct atl_rxf_etype *etype = &nic->rxf_etype;
+	int count = ntuple->count + vlan->count + etype->count;
+	int i;
+
+	if (rxnfc->rule_cnt < count)
+		return -EMSGSIZE;
+
+	for (i = 0; i < ATL_RXF_VLAN_MAX; i++)
+		if (vlan->cmd[i] & ATL_RXF_EN)
+			*rule_locs++ = i + ATL_RXF_VLAN_BASE;
+
+	for (i = 0; i < ATL_RXF_ETYPE_MAX; i++)
+		if (etype->cmd[i] & ATL_RXF_EN)
+			*rule_locs++ = i + ATL_RXF_ETYPE_BASE;
+
+	for (i = 0; i < ATL_RXF_NTUPLE_MAX; i++)
+		if (ntuple->cmd[i] & ATL_RXF_EN)
+			*rule_locs++ = i + ATL_RXF_NTUPLE_BASE;
+
+	rxnfc->rule_cnt = count;
+	return 0;
+}
+
+static int atl_check_mask(uint8_t *mask, int len, uint32_t *cmd, uint32_t flag)
+{
+	uint8_t first = mask[0];
+	uint8_t *p;
+
+	if (first != 0 && first != 0xff)
+		return -EINVAL;
+
+	for (p = mask; p < &mask[len]; p++)
+		if (*p != first)
+			return -EINVAL;
+
+	if (first == 0xff) {
+		if (cmd)
+			*cmd |= flag;
+		else
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int atl_rxf_set_ring(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp, uint32_t *cmd)
+{
+	uint64_t ring_cookie = fsp->ring_cookie;
+	uint32_t ring;
+
+	if (ring_cookie == RX_CLS_FLOW_DISC)
+		return 0;
+
+	ring = ethtool_get_flow_spec_ring(ring_cookie);
+	if (ring > ATL_RXF_RING_ANY ||
+		(ring >= nic->nvecs && ring != ATL_RXF_RING_ANY &&
+			!test_bit(ring, &nic->fwd.ring_map[ATL_FWDIR_RX]))) {
+		atl_nic_err("Invalid Rx filter queue %d\n", ring);
+		return -EINVAL;
+	}
+
+	if (ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+		atl_nic_err("Rx filter queue VF must be zero");
+		return -EINVAL;
+	}
+
+	*cmd |= ATL_RXF_ACT_TOHOST;
+
+	if (ring != ATL_RXF_RING_ANY)
+		*cmd |= ring << desc->rxq_shift | desc->rxq_bit;
+
+	return 0;
+}
+
+static int atl_rxf_check_vlan_etype_common(struct ethtool_rx_flow_spec *fsp)
+{
+	int ret;
+
+	ret = atl_check_mask((uint8_t *)&fsp->m_u.ether_spec.h_source,
+		sizeof(fsp->m_u.ether_spec.h_source), NULL, 0);
+	if (ret)
+		return ret;
+
+	ret = atl_check_mask((uint8_t *)&fsp->m_ext.data,
+		sizeof(fsp->m_ext.data), NULL, 0);
+	if (ret)
+		return ret;
+
+	ret = atl_check_mask((uint8_t *)&fsp->m_ext.vlan_etype,
+		sizeof(fsp->m_ext.vlan_etype), NULL, 0);
+
+	return ret;
+}
+
+static int atl_rxf_check_vlan(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	uint16_t vid, mask;
+	int ret;
+
+	if (fsp->flow_type != (ETHER_FLOW | FLOW_EXT)) {
+		if (!(fsp->location & RX_CLS_LOC_SPECIAL))
+			atl_nic_err("Only ether flow-type supported for VLAN filters\n");
+		return -EINVAL;
+	}
+
+	ret = atl_rxf_check_vlan_etype_common(fsp);
+	if (ret)
+		return ret;
+
+	if (fsp->m_u.ether_spec.h_proto)
+		return -EINVAL;
+
+	vid = ntohs(fsp->h_ext.vlan_tci);
+	mask = ntohs(fsp->m_ext.vlan_tci);
+
+	if (mask & 0xf000 && vid & 0xf000 & mask)
+		return -EINVAL;
+
+	if ((mask & 0xfff) != 0xfff)
+		return -EINVAL;
+
+	return 0;
+}
+
+enum atl_rxf_vlan_idx {
+	ATL_VIDX_FOUND = BIT(31),
+	ATL_VIDX_FREE = BIT(30),
+	ATL_VIDX_REPL = BIT(29),
+	ATL_VIDX_NONE = BIT(28),
+	ATL_VIDX_MASK = BIT(28) - 1,
+};
+
+/* If a filter is enabled for VID, return its index ored with
+ * ATL_VIDX_FOUND.  Otherwise find an unused filter index and return
+ * it ored with ATL_VIDX_FREE.  If no unused filter exists and
+ * try_repl is set, try finding a candidate for replacement and return
+ * its index ored with ATL_VIDX_REPL. If all of the above fail,
+ * return ATL_VIDX_NONE.
+ *
+ * A replacement candidate filter must be configured to accept
+ * packets, not set to direct to a specific ring and must match a VID
+ * from a VLAN subinterface.
+ */
+static uint32_t atl_rxf_find_vid(struct atl_nic *nic, uint16_t vid,
+	bool try_repl)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	int idx, free = ATL_RXF_VLAN_MAX, repl = ATL_RXF_VLAN_MAX;
+
+	for (idx = 0; idx < ATL_RXF_VLAN_MAX; idx++) {
+		uint32_t cmd = vlan->cmd[idx];
+
+		if (!(cmd & ATL_RXF_EN)) {
+			if (free == ATL_RXF_VLAN_MAX) {
+				free = idx;
+				if (vid == -1)
+					break;
+			}
+			continue;
+		}
+
+		if ((cmd & ATL_VLAN_VID_MASK) == vid)
+			return idx | ATL_VIDX_FOUND;
+
+		if (try_repl && repl == ATL_RXF_VLAN_MAX &&
+			(cmd & ATL_RXF_ACT_TOHOST) &&
+			!(cmd & ATL_VLAN_RXQ)) {
+
+			if (!test_bit(cmd & ATL_VLAN_VID_MASK, vlan->map))
+				continue;
+
+			repl = idx;
+		}
+	}
+
+	if (free != ATL_RXF_VLAN_MAX)
+		return free | ATL_VIDX_FREE;
+
+	if (try_repl && repl != ATL_RXF_VLAN_MAX)
+		return repl | ATL_VIDX_REPL;
+
+	return ATL_VIDX_NONE;
+}
+
+static uint16_t atl_rxf_vid(struct atl_rxf_vlan *vlan, int idx)
+{
+	uint32_t cmd = vlan->cmd[idx];
+
+	return cmd & ATL_RXF_EN ? cmd & ATL_VLAN_VID_MASK : -1;
+}
+
+static int atl_rxf_dup_vid(struct atl_rxf_vlan *vlan, int idx, uint16_t vid)
+{
+	int i;
+
+	for (i = 0; i < ATL_RXF_VLAN_MAX; i++) {
+		if (i == idx)
+			continue;
+
+		if (atl_rxf_vid(vlan, i) == vid)
+			return i;
+	}
+
+	return -1;
+}
+
+static int atl_rxf_set_vlan(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	int idx;
+	int ret, promisc_delta = 0;
+	uint32_t cmd = ATL_RXF_EN;
+	int present;
+	uint16_t old_vid, vid = ntohs(fsp->h_ext.vlan_tci) & 0xfff;
+
+	if (!(fsp->location & RX_CLS_LOC_SPECIAL)) {
+		int dup;
+
+		idx = atl_rxf_idx(desc, fsp);
+		dup = atl_rxf_dup_vid(vlan, idx, vid);
+		if (dup >= 0) {
+			atl_nic_err("Can't add duplicate VLAN filter @%d (existing @%d)\n",
+				idx, dup);
+			return -EINVAL;
+		}
+
+		old_vid = atl_rxf_vid(vlan, idx);
+		if (old_vid != -1 && vid != old_vid &&
+			test_bit(old_vid, vlan->map)) {
+			atl_nic_err("Can't overwrite Linux VLAN filter @%d VID %hd with a different VID %hd\n",
+				idx, old_vid, vid);
+			return -EINVAL;
+		}
+
+		ret = atl_rxf_check_vlan(desc, nic, fsp);
+		if (ret)
+			return ret;
+
+	} else {
+		/* atl_rxf_check_vlan() already succeeded */
+		idx = atl_rxf_find_vid(nic, vid, true);
+
+		if (idx == ATL_VIDX_NONE)
+			return -EINVAL;
+
+		/* If a filter is being added for a VID without a
+		 * corresponding VLAN subdevice, and we're reusing a
+		 * filter previously used for a VLAN subdevice-covered
+		 * VID, the promisc count needs to be bumped (but
+		 * only if filter change succeeds). */
+		if ((idx & ATL_VIDX_REPL) && !test_bit(vid, vlan->map))
+			promisc_delta++;
+
+		idx &= ATL_VIDX_MASK;
+		fsp->location = idx + desc->base;
+	}
+
+	cmd |= vid;
+
+	ret = atl_rxf_set_ring(desc, nic, fsp, &cmd);
+	if (ret)
+		return ret;
+
+	/* If a VLAN subdevice exists, override filter to accept
+	 * packets */
+	if (test_bit(vid, vlan->map))
+		cmd |= ATL_RXF_ACT_TOHOST;
+
+	present = !!(vlan->cmd[idx] & ATL_RXF_EN);
+	vlan->cmd[idx] = cmd;
+	vlan->promisc_count += promisc_delta;
+
+	return !present;
+}
+
+static int atl_rxf_set_etype(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_etype *etype = &nic->rxf_etype;
+	int idx = atl_rxf_idx(desc, fsp);
+	int ret;
+	uint32_t cmd = ATL_RXF_EN;
+	int present = !!(etype->cmd[idx] & ATL_RXF_EN);
+
+	if (fsp->flow_type != (ETHER_FLOW)) {
+		atl_nic_err("Only ether flow-type supported for ethertype filters\n");
+		return -EINVAL;
+	}
+
+	ret = atl_rxf_check_vlan_etype_common(fsp);
+	if (ret)
+		return ret;
+
+	if (fsp->m_ext.vlan_tci)
+		return -EINVAL;
+
+	if (fsp->m_u.ether_spec.h_proto != 0xffff)
+		return -EINVAL;
+
+	cmd |= ntohs(fsp->h_u.ether_spec.h_proto);
+
+	ret = atl_rxf_set_ring(desc, nic, fsp, &cmd);
+	if (ret)
+		return ret;
+
+	etype->cmd[idx] = cmd;
+
+	return !present;
+}
+
+static int atl_rxf_set_ntuple(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_ntuple *ntuple = &nic->rxf_ntuple;
+	int idx = atl_rxf_idx(desc, fsp);
+	uint32_t cmd = ATL_NTC_EN;
+	int ret;
+	__be16 sport, dport;
+	int present = !!(ntuple->cmd[idx] & ATL_RXF_EN);
+
+	ret = atl_rxf_set_ring(desc, nic, fsp, &cmd);
+	if (ret)
+		return ret;
+
+	switch (fsp->flow_type) {
+#ifdef ATL_HAVE_IPV6_NTUPLE
+	case TCP_V6_FLOW:
+	case UDP_V6_FLOW:
+	case SCTP_V6_FLOW:
+		if (fsp->m_u.tcp_ip6_spec.tclass != 0) {
+			atl_nic_err("Unsupported match field\n");
+			return -EINVAL;
+		}
+		cmd |= ATL_NTC_PROTO | ATL_NTC_V6;
+		break;
+
+	case IPV6_USER_FLOW:
+		if (fsp->m_u.usr_ip6_spec.l4_4_bytes != 0 ||
+			fsp->m_u.usr_ip6_spec.tclass != 0 ||
+			fsp->m_u.usr_ip6_spec.l4_proto != 0) {
+			atl_nic_err("Unsupported match field\n");
+			return -EINVAL;
+		}
+		cmd |= ATL_NTC_V6;
+		break;
+#endif
+
+	case TCP_V4_FLOW:
+	case UDP_V4_FLOW:
+	case SCTP_V4_FLOW:
+		if (fsp->m_u.tcp_ip4_spec.tos != 0) {
+			atl_nic_err("Unsupported match field\n");
+			return -EINVAL;
+		}
+		cmd |= ATL_NTC_PROTO;
+		break;
+
+	case IPV4_USER_FLOW:
+		if (fsp->m_u.usr_ip4_spec.l4_4_bytes != 0 ||
+			fsp->m_u.usr_ip4_spec.tos != 0 ||
+			fsp->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
+			fsp->h_u.usr_ip4_spec.proto != 0) {
+			atl_nic_err("Unsupported match field\n");
+			return -EINVAL;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	switch (fsp->flow_type) {
+	case TCP_V6_FLOW:
+	case TCP_V4_FLOW:
+		cmd |= ATL_NTC_L4_TCP;
+		break;
+
+	case UDP_V6_FLOW:
+	case UDP_V4_FLOW:
+		cmd |= ATL_NTC_L4_UDP;
+		break;
+
+	case SCTP_V6_FLOW:
+	case SCTP_V4_FLOW:
+		cmd |= ATL_NTC_L4_SCTP;
+		break;
+	}
+
+#ifdef ATL_HAVE_IPV6_NTUPLE
+	if (cmd & ATL_NTC_V6) {
+		int i;
+
+		if (idx & 3) {
+			atl_nic_err("IPv6 filters only supported in locations 8 and 12\n");
+			return -EINVAL;
+		}
+
+		for (i = idx + 1; i < idx + 4; i++)
+			if (ntuple->cmd[i] & ATL_NTC_EN) {
+				atl_nic_err("IPv6 filter %d overlaps an IPv4 filter %d\n",
+					    idx, i);
+				return -EINVAL;
+			}
+
+		ret = atl_check_mask((uint8_t *)fsp->m_u.tcp_ip6_spec.ip6src,
+			sizeof(fsp->m_u.tcp_ip6_spec.ip6src), &cmd, ATL_NTC_SA);
+		if (ret)
+			return ret;
+
+		ret = atl_check_mask((uint8_t *)fsp->m_u.tcp_ip6_spec.ip6dst,
+			sizeof(fsp->m_u.tcp_ip6_spec.ip6dst), &cmd, ATL_NTC_DA);
+		if (ret)
+			return ret;
+
+		sport = fsp->h_u.tcp_ip6_spec.psrc;
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip6_spec.psrc,
+			sizeof(fsp->m_u.tcp_ip6_spec.psrc), &cmd, ATL_NTC_SP);
+		if (ret)
+			return ret;
+
+		dport = fsp->h_u.tcp_ip6_spec.pdst;
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip6_spec.pdst,
+			sizeof(fsp->m_u.tcp_ip6_spec.pdst), &cmd, ATL_NTC_DP);
+		if (ret)
+			return ret;
+
+		if (cmd & ATL_NTC_SA)
+			atl_ntuple_swap_v6(ntuple->src_ip6[idx / 4],
+				fsp->h_u.tcp_ip6_spec.ip6src);
+
+		if (cmd & ATL_NTC_DA)
+			atl_ntuple_swap_v6(ntuple->dst_ip6[idx / 4],
+				fsp->h_u.tcp_ip6_spec.ip6dst);
+
+	} else
+#endif
+	{
+
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip4_spec.ip4src,
+			sizeof(fsp->m_u.tcp_ip4_spec.ip4src), &cmd, ATL_NTC_SA);
+		if (ret)
+			return ret;
+
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip4_spec.ip4dst,
+			sizeof(fsp->m_u.tcp_ip4_spec.ip4dst), &cmd, ATL_NTC_DA);
+		if (ret)
+			return ret;
+
+		sport = fsp->h_u.tcp_ip4_spec.psrc;
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip4_spec.psrc,
+			sizeof(fsp->m_u.tcp_ip4_spec.psrc), &cmd, ATL_NTC_SP);
+		if (ret)
+			return ret;
+
+		dport = fsp->h_u.tcp_ip4_spec.pdst;
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip4_spec.pdst,
+			sizeof(fsp->m_u.tcp_ip4_spec.psrc), &cmd, ATL_NTC_DP);
+		if (ret)
+			return ret;
+
+		if (cmd & ATL_NTC_SA)
+			ntuple->src_ip4[idx] = fsp->h_u.tcp_ip4_spec.ip4src;
+
+		if (cmd & ATL_NTC_DA)
+			ntuple->dst_ip4[idx] = fsp->h_u.tcp_ip4_spec.ip4dst;
+	}
+
+	if (cmd & ATL_NTC_SP)
+		ntuple->src_port[idx] = sport;
+
+	if (cmd & ATL_NTC_DP)
+		ntuple->dst_port[idx] = dport;
+
+	ntuple->cmd[idx] = cmd;
+
+	return !present;
+}
+
+static void atl_rxf_update_vlan(struct atl_nic *nic, int idx)
+{
+	atl_write(&nic->hw, ATL_RX_VLAN_FLT(idx), nic->rxf_vlan.cmd[idx]);
+}
+
+static void atl_rxf_update_etype(struct atl_nic *nic, int idx)
+{
+	atl_write(&nic->hw, ATL_RX_ETYPE_FLT(idx), nic->rxf_etype.cmd[idx]);
+}
+
+static const struct atl_rxf_flt_desc atl_rxf_descs[] = {
+	{
+		.base = ATL_RXF_VLAN_BASE,
+		.count = ATL_RXF_VLAN_MAX,
+		.rxq_bit = ATL_VLAN_RXQ,
+		.rxq_shift = ATL_VLAN_RXQ_SHIFT,
+		.cmd_offt = offsetof(struct atl_nic, rxf_vlan.cmd),
+		.count_offt = offsetof(struct atl_nic, rxf_vlan.count),
+		.get_rxf = atl_rxf_get_vlan,
+		.set_rxf = atl_rxf_set_vlan,
+		.update_rxf = atl_rxf_update_vlan,
+		.check_rxf = atl_rxf_check_vlan,
+	},
+	{
+		.base = ATL_RXF_ETYPE_BASE,
+		.count = ATL_RXF_ETYPE_MAX,
+		.rxq_bit = ATL_ETYPE_RXQ,
+		.rxq_shift = ATL_ETYPE_RXQ_SHIFT,
+		.cmd_offt = offsetof(struct atl_nic, rxf_etype.cmd),
+		.count_offt = offsetof(struct atl_nic, rxf_etype.count),
+		.get_rxf = atl_rxf_get_etype,
+		.set_rxf = atl_rxf_set_etype,
+		.update_rxf = atl_rxf_update_etype,
+	},
+	{
+		.base = ATL_RXF_NTUPLE_BASE,
+		.count = ATL_RXF_NTUPLE_MAX,
+		.rxq_bit = ATL_NTC_RXQ,
+		.rxq_shift = ATL_NTC_RXQ_SHIFT,
+		.cmd_offt = offsetof(struct atl_nic, rxf_ntuple.cmd),
+		.count_offt = offsetof(struct atl_nic, rxf_ntuple.count),
+		.get_rxf = atl_rxf_get_ntuple,
+		.set_rxf = atl_rxf_set_ntuple,
+		.update_rxf = atl_update_ntuple_flt,
+	},
+};
+
+static uint32_t *atl_rxf_cmd(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic)
+{
+	return (uint32_t *)((char *)nic + desc->cmd_offt);
+}
+
+static int *atl_rxf_count(const struct atl_rxf_flt_desc *desc, struct atl_nic *nic)
+{
+	return (int *)((char *)nic + desc->count_offt);
+}
+
+static const struct atl_rxf_flt_desc *atl_rxf_desc(struct atl_nic *nic,
+	struct ethtool_rx_flow_spec *fsp)
+{
+	uint32_t loc = fsp->location;
+	const struct atl_rxf_flt_desc *desc;
+
+	atl_for_each_rxf_desc(desc) {
+		if (loc & RX_CLS_LOC_SPECIAL) {
+			if (desc->check_rxf && !desc->check_rxf(desc, nic, fsp))
+				return desc;
+
+			continue;
+		}
+
+		if (loc < desc->base)
+			return NULL;
+
+		if (loc < desc->base + desc->count)
+			return desc;
+	}
+
+	return NULL;
+}
+
+static void atl_refresh_rxf_desc(struct atl_nic *nic,
+	const struct atl_rxf_flt_desc *desc)
+{
+	int idx;
+
+	atl_for_each_rxf_idx(desc, idx)
+		desc->update_rxf(nic, idx);
+
+	atl_set_vlan_promisc(&nic->hw, nic->rxf_vlan.promisc_count);
+}
+
+void atl_refresh_rxfs(struct atl_nic *nic)
+{
+	const struct atl_rxf_flt_desc *desc;
+
+	atl_for_each_rxf_desc(desc)
+		atl_refresh_rxf_desc(nic, desc);
+
+	atl_set_vlan_promisc(&nic->hw, nic->rxf_vlan.promisc_count);
+}
+
+static bool atl_vlan_pull_from_promisc(struct atl_nic *nic, uint32_t idx)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	unsigned long *map;
+	int i;
+	long vid = -1;
+
+	if (!vlan->promisc_count)
+		return false;
+
+	map = kcalloc(ATL_VID_MAP_LEN, sizeof(*map), GFP_KERNEL);
+	if (!map)
+		return false;
+
+	memcpy(map, vlan->map, ATL_VID_MAP_LEN * sizeof(*map));
+	for (i = 0; i < ATL_RXF_VLAN_MAX; i++) {
+		uint32_t cmd = vlan->cmd[i];
+
+		if (cmd & ATL_RXF_EN)
+			clear_bit(cmd & ATL_VLAN_VID_MASK, map);
+	}
+
+	do {
+		idx &= ATL_VIDX_MASK;
+		vid = find_next_bit(map, BIT(12), vid + 1);
+		vlan->cmd[idx] = ATL_RXF_EN | ATL_RXF_ACT_TOHOST | vid;
+		atl_rxf_update_vlan(nic, idx);
+		__clear_bit(vid, map);
+		vlan->promisc_count--;
+		vlan->count++;
+		if (vlan->promisc_count == 0)
+			break;
+
+		idx = atl_rxf_find_vid(nic, -1, false);
+	} while (idx & ATL_VIDX_FREE);
+
+	kfree(map);
+	atl_set_vlan_promisc(&nic->hw, vlan->promisc_count);
+	return true;
+}
+
+static bool atl_rxf_del_vlan_override(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	uint32_t *cmd = &vlan->cmd[atl_rxf_idx(desc, fsp)];
+	uint16_t vid = *cmd & ATL_VLAN_VID_MASK;
+
+	if (!test_bit(vid, vlan->map))
+		return false;
+
+	/* Trying to delete filter via ethtool while VLAN subdev still
+	 * exists. Just drop queue assignment. */
+	*cmd &= ~ATL_VLAN_RXQ;
+	return true;
+}
+
+static int atl_set_rxf(struct atl_nic *nic,
+	struct ethtool_rx_flow_spec *fsp, bool delete)
+{
+	const struct atl_rxf_flt_desc *desc;
+	uint32_t *cmd;
+	int *count, ret, idx;
+
+	desc = atl_rxf_desc(nic, fsp);
+	if (!desc)
+		return -EINVAL;
+
+	count = atl_rxf_count(desc, nic);
+
+	if (delete) {
+		idx = atl_rxf_idx(desc, fsp);
+		cmd = &atl_rxf_cmd(desc, nic)[idx];
+
+		if (!(*cmd & ATL_RXF_EN))
+			/* Attempting to delete non-existent filter */
+			return -EINVAL;
+
+		if (desc->base == ATL_RXF_VLAN_BASE &&
+			atl_rxf_del_vlan_override(desc, nic, fsp))
+			goto done;
+
+		*cmd = 0;
+		(*count)--;
+
+		if (desc->base == ATL_RXF_VLAN_BASE &&
+			atl_vlan_pull_from_promisc(nic, idx))
+			/* Filter already updated by
+			 * atl_vlan_pull_from_promisc(), can just
+			 * return */
+			return 0;
+	} else {
+		ret = desc->set_rxf(desc, nic, fsp);
+		if (ret < 0)
+			return ret;
+
+		/* fsp->location may have been set in
+		 * ->set_rxf(). Guaranteed to be valid now. */
+		idx = atl_rxf_idx(desc, fsp);
+		*count += ret;
+	}
+
+done:
+	desc->update_rxf(nic, idx);
+	return 0;
+}
+
+static int atl_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
+	uint32_t *rule_locs)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct ethtool_rx_flow_spec *fsp = &rxnfc->fs;
+	int ret = -ENOTSUPP;
+	const struct atl_rxf_flt_desc *desc;
+
+	switch (rxnfc->cmd) {
+	case ETHTOOL_GRXRINGS:
+		rxnfc->data = nic->nvecs;
+		return 0;
+
+	case ETHTOOL_GRXCLSRLCNT:
+		rxnfc->rule_cnt = nic->rxf_ntuple.count + nic->rxf_vlan.count +
+			nic->rxf_etype.count;
+		rxnfc->data = (ATL_RXF_VLAN_MAX + ATL_RXF_ETYPE_MAX +
+			ATL_RXF_NTUPLE_MAX) | RX_CLS_LOC_SPECIAL;
+		return 0;
+
+	case ETHTOOL_GRXCLSRULE:
+		desc = atl_rxf_desc(nic, fsp);
+		if (!desc)
+			return -EINVAL;
+
+		memset(&fsp->h_u, 0, sizeof(fsp->h_u));
+		memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+		memset(&fsp->h_ext, 0, sizeof(fsp->h_ext));
+		memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+		ret = desc->get_rxf(desc, nic, fsp);
+		break;
+
+	case ETHTOOL_GRXCLSRLALL:
+		ret = atl_get_rxf_locs(nic, rxnfc, rule_locs);
+		break;
+
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static int atl_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct ethtool_rx_flow_spec *fsp = &rxnfc->fs;
+
+	switch (rxnfc->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		return atl_set_rxf(nic, fsp, false);
+
+	case ETHTOOL_SRXCLSRLDEL:
+		return atl_set_rxf(nic, fsp, true);
+	}
+
+	return -ENOTSUPP;
+}
+
+int atl_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	int idx;
+
+	atl_nic_dbg("Add vlan id %hd\n", vid);
+
+	vid &= 0xfff;
+	if (__test_and_set_bit(vid, vlan->map))
+		/* Already created -- shouldn't happen? */
+		return 0;
+
+	vlan->vlans_active++;
+	idx = atl_rxf_find_vid(nic, vid, false);
+
+	if (idx == ATL_VIDX_NONE) {
+		/* VID not found and no unused filters */
+		vlan->promisc_count++;
+		atl_set_vlan_promisc(&nic->hw, vlan->promisc_count);
+		return 0;
+	}
+
+	if (idx & ATL_VIDX_FREE) {
+		/* VID not found, program unused filter */
+		idx &= ATL_VIDX_MASK;
+		vlan->cmd[idx] = ATL_VLAN_EN | ATL_RXF_ACT_TOHOST | vid;
+		vlan->count++;
+		atl_rxf_update_vlan(nic, idx);
+		return 0;
+	}
+
+	idx &= ATL_VIDX_MASK;
+	if (vlan->cmd[idx]  & ATL_RXF_ACT_TOHOST)
+		/* VID already added via ethtool */
+		return 0;
+
+	/* Ethtool filter set to drop. Override. */
+	atl_nic_warn("%s: Overriding VLAN filter for VID %hd @%d set to drop\n",
+		__func__, vid, idx);
+
+	vlan->cmd[idx] = ATL_RXF_EN | ATL_RXF_ACT_TOHOST | vid;
+	atl_rxf_update_vlan(nic, idx);
+	return 0;
+}
+
+int atl_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	uint32_t cmd;
+	int idx;
+
+	atl_nic_dbg("Kill vlan id %hd\n", vid);
+
+	vid &= 0xfff;
+	if (!__test_and_clear_bit(vid, vlan->map))
+		return -EINVAL;
+
+	vlan->vlans_active--;
+
+	idx = atl_rxf_find_vid(nic, vid, false);
+	if (!(idx & ATL_VIDX_FOUND)) {
+		/* VID not present in filters, decrease promisc count */
+		vlan->promisc_count--;
+		atl_set_vlan_promisc(&nic->hw, vlan->promisc_count);
+		return 0;
+	}
+
+	idx &= ATL_VIDX_MASK;
+	cmd = vlan->cmd[idx];
+	if (cmd & ATL_VLAN_RXQ)
+		/* Queue explicitly set via ethtool, leave the filter
+		 * intact */
+		return 0;
+
+	/* Delete filter, maybe pull vid from promisc overflow */
+	vlan->cmd[idx] = 0;
+	vlan->count--;
+	if (!atl_vlan_pull_from_promisc(nic, idx))
+		atl_rxf_update_vlan(nic, idx);
+
+	return 0;
+}
+
+static void atl_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	wol->supported = WAKE_MAGIC;
+	wol->wolopts = nic->flags & ATL_FL_WOL ? WAKE_MAGIC : 0;
+}
+
+static int atl_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	int ret;
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	if (wol->wolopts & ~WAKE_MAGIC) {
+		atl_nic_err("%s: unsupported WoL mode %x\n", __func__,
+			wol->wolopts);
+		return -EINVAL;
+	}
+
+	if (wol->wolopts & WAKE_MAGIC)
+		nic->flags |= ATL_FL_WOL;
+	else
+		nic->flags &= ~ATL_FL_WOL;
+
+	ret = device_set_wakeup_enable(&nic->hw.pdev->dev,
+		!!(nic->flags & ATL_FL_WOL));
+
+	if (ret)
+		atl_nic_err("device_set_wakeup_enable failed: %d\n", -ret);
+
+	return ret;
+}
+
+const struct ethtool_ops atl_ethtool_ops = {
+	.get_link = atl_ethtool_get_link,
+#ifndef ATL_HAVE_ETHTOOL_KSETTINGS
+	.get_settings = atl_ethtool_get_settings,
+	.set_settings = atl_ethtool_set_settings,
+#else
+	.get_link_ksettings = atl_ethtool_get_ksettings,
+	.set_link_ksettings = atl_ethtool_set_ksettings,
+#endif
+
+	.get_rxfh_indir_size = atl_rss_tbl_size,
+	.get_rxfh_key_size = atl_rss_key_size,
+	.get_rxfh = atl_rss_get_rxfh,
+	.set_rxfh = atl_rss_set_rxfh,
+	.get_channels = atl_get_channels,
+	.set_channels = atl_set_channels,
+	.get_rxnfc = atl_get_rxnfc,
+	.set_rxnfc = atl_set_rxnfc,
+	.get_pauseparam = atl_get_pauseparam,
+	.set_pauseparam = atl_set_pauseparam,
+	.get_eee = atl_get_eee,
+	.set_eee = atl_set_eee,
+	.get_drvinfo = atl_get_drvinfo,
+	.nway_reset = atl_nway_reset,
+	.get_ringparam = atl_get_ringparam,
+	.set_ringparam = atl_set_ringparam,
+	.get_sset_count = atl_get_sset_count,
+	.get_strings = atl_get_strings,
+	.get_ethtool_stats = atl_get_ethtool_stats,
+	.get_priv_flags = atl_get_priv_flags,
+	.set_priv_flags = atl_set_priv_flags,
+	.get_coalesce = atl_get_coalesce,
+	.set_coalesce = atl_set_coalesce,
+	.get_wol = atl_get_wol,
+	.set_wol = atl_set_wol,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c
new file mode 100644
index 0000000..86bc2eb
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c
@@ -0,0 +1,470 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+#include "atl_common.h"
+#include "atl_hw.h"
+#include "atl_drviface.h"
+
+struct atl_link_type atl_link_types[] = {
+#define LINK_TYPE(_name, _speed, _ethtl_idx, _fw1_bit, _fw2_bit)	\
+	{								\
+		.name = _name,						\
+		.speed = _speed,					\
+		.ethtool_idx = _ethtl_idx,				\
+		.fw_bits = {						\
+		[0] = _fw1_bit,						\
+		[1] = _fw2_bit,						\
+		},							\
+	},
+
+	LINK_TYPE("100BaseTX-FD", 100, ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+		0x20, 1 << 5)
+	LINK_TYPE("1000BaseT-FD", 1000, ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+		0x10, 1 << 8)
+	LINK_TYPE("2.5GBaseT-FD", 2500, ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+		8, 1 << 9)
+	LINK_TYPE("5GBaseT-FD", 5000, ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+		2, 1 << 10)
+	LINK_TYPE("10GBaseT-FD", 10000, ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+		1, 1 << 11)
+};
+
+const int atl_num_rates = ARRAY_SIZE(atl_link_types);
+
+static inline void atl_lock_fw(struct atl_hw *hw)
+{
+	mutex_lock(&hw->mcp.lock);
+}
+
+static inline void atl_unlock_fw(struct atl_hw *hw)
+{
+	mutex_unlock(&hw->mcp.lock);
+}
+
+static int atl_fw1_wait_fw_init(struct atl_hw *hw)
+{
+	uint32_t hostData_addr;
+	uint32_t id, new_id;
+	int ret;
+
+	mdelay(10);
+
+	busy_wait(2000, mdelay(1), hostData_addr,
+		  atl_read(hw, ATL_MCP_SCRATCH(FW_STAT_STRUCT)),
+		  hostData_addr == 0);
+
+	atl_dev_dbg("got hostData address: 0x%x\n", hostData_addr);
+
+	ret = atl_read_mcp_mem(hw, hostData_addr + 4, &id, 4);
+	if (ret)
+		return  ret;
+
+	busy_wait(10000, mdelay(1), ret,
+		  atl_read_mcp_mem(hw, hostData_addr + 4, &new_id, 4),
+		  !ret && new_id == id);
+	if (ret)
+		return ret;
+	if (new_id == id) {
+		atl_dev_err("timeout waiting for FW to start (initial transactionId 0x%x, hostData addr 0x%x)\n",
+			    id, hostData_addr);
+		return -EIO;
+	}
+
+	/* return fw1_wait_drviface(hw, NULL); */
+	return 0;
+}
+
+static int atl_fw2_wait_fw_init(struct atl_hw *hw)
+{
+	uint32_t reg;
+
+	busy_wait(1000, mdelay(1), reg, atl_read(hw, ATL_GLOBAL_FW_IMAGE_ID),
+		!reg);
+	if (!reg)
+		return -EIO;
+	return 0;
+}
+
+static struct atl_link_type *atl_parse_fw_bits(struct atl_hw *hw,
+	uint32_t low, uint32_t high, int fw_idx)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+	unsigned int lp_adv = 0, adv = lstate->advertized;
+	struct atl_link_type *link;
+	bool eee = false;
+	int last = -1;
+	int i;
+
+	atl_for_each_rate(i, link) {
+		uint32_t link_bit = link->fw_bits[fw_idx];
+
+		if (!(low & link_bit))
+			continue;
+
+		if (high & link_bit)
+			lp_adv |= BIT(i + ATL_EEE_BIT_OFFT);
+
+		lp_adv |= BIT(i);
+		if (adv & BIT(i))
+			last = i;
+	}
+
+	lstate->lp_advertized = lp_adv;
+
+	link = 0;
+	if (last >= 0) {
+		link = &atl_link_types[last];
+		if ((lp_adv & BIT(last + ATL_EEE_BIT_OFFT)) &&
+			(adv & BIT(last + ATL_EEE_BIT_OFFT)))
+			eee = true;
+	}
+
+	lstate->link = link;
+	lstate->eee = eee;
+	return link;
+}
+
+static struct atl_link_type *atl_fw1_check_link(struct atl_hw *hw)
+{
+	uint32_t reg;
+	struct atl_link_type *link;
+
+	atl_lock_fw(hw);
+	reg = atl_read(hw, ATL_MCP_SCRATCH(FW1_LINK_STS));
+
+	if ((reg & 0xf) != 2)
+		reg = 0;
+
+	reg = (reg >> 16) & 0xff;
+
+	link = atl_parse_fw_bits(hw, reg, 0, 0);
+
+	atl_unlock_fw(hw);
+	return link;
+}
+
+static struct atl_link_type *atl_fw2_check_link(struct atl_hw *hw)
+{
+	struct atl_link_type *link;
+	struct atl_link_state *lstate = &hw->link_state;
+	uint32_t low;
+	uint32_t high;
+	enum atl_fc_mode fc = atl_fc_none;
+
+	atl_lock_fw(hw);
+
+	low = atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_RES_LOW));
+	high = atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_RES_HIGH));
+
+	link = atl_parse_fw_bits(hw, low, high, 1);
+	if (!link)
+		goto unlock;
+
+	if (high & atl_fw2_pause)
+		fc |= atl_fc_rx;
+	if (high & atl_fw2_asym_pause)
+		fc |= atl_fc_tx;
+
+	lstate->fc.cur = fc;
+
+unlock:
+	atl_unlock_fw(hw);
+	return link;
+}
+
+static int atl_fw1_get_link_caps(struct atl_hw *hw)
+{
+	return 0;
+}
+
+static int atl_fw2_get_link_caps(struct atl_hw *hw)
+{
+	uint32_t fw_stat_addr = hw->mcp.fw_stat_addr;
+	unsigned int supported = 0;
+	uint32_t caps[2];
+	int i, ret;
+
+	atl_lock_fw(hw);
+
+	atl_dev_dbg("Host data struct addr: %#x\n", fw_stat_addr);
+	ret = atl_read_mcp_mem(hw, fw_stat_addr + atl_fw2_stat_lcaps,
+		caps, 8);
+	if (ret)
+		goto unlock;
+
+	for (i = 0; i < atl_num_rates; i++)
+		if (atl_link_types[i].fw_bits[1] & caps[0]) {
+			supported |= BIT(i);
+			if (atl_link_types[i].fw_bits[1] & caps[1])
+				supported |= BIT(i + ATL_EEE_BIT_OFFT);
+		}
+
+	hw->link_state.supported = supported;
+
+unlock:
+	atl_unlock_fw(hw);
+	return ret;
+}
+
+static inline unsigned int atl_link_adv(struct atl_link_state *lstate)
+{
+	return lstate->force_off ? 0 : lstate->advertized;
+}
+
+static inline bool atl_fw1_set_link_needed(struct atl_link_state *lstate)
+{
+	bool ret = false;
+
+	if (atl_link_adv(lstate) != lstate->prev_advertized) {
+		ret = true;
+		lstate->prev_advertized = atl_link_adv(lstate);
+	}
+
+	return ret;
+}
+
+static inline bool atl_fw2_set_link_needed(struct atl_link_state *lstate)
+{
+	struct atl_fc_state *fc = &lstate->fc;
+	bool ret = false;
+
+	if (fc->req != fc->prev_req) {
+		ret = true;
+		fc->prev_req = fc->req;
+	}
+
+	return atl_fw1_set_link_needed(lstate) || ret;
+}
+
+static uint64_t atl_set_fw_bits(struct atl_hw *hw, int fw_idx)
+{
+	unsigned int adv = atl_link_adv(&hw->link_state);
+	struct atl_link_type *ltype;
+	uint64_t link = 0;
+	int i;
+
+	atl_for_each_rate(i, ltype) {
+		uint32_t bit = ltype->fw_bits[fw_idx];
+
+		if (adv & BIT(i)) {
+			link |= bit;
+			if (adv & BIT(i + ATL_EEE_BIT_OFFT))
+				link |= (uint64_t)bit << 32;
+		}
+	}
+
+	return link;
+}
+
+static void atl_fw1_set_link(struct atl_hw *hw, bool force)
+{
+	uint32_t bits;
+
+	if (!force && !atl_fw1_set_link_needed(&hw->link_state))
+		return;
+
+	atl_lock_fw(hw);
+
+	bits = (atl_set_fw_bits(hw, 0) << 16) | 2;
+	atl_write(hw, ATL_MCP_SCRATCH(FW1_LINK_REQ), bits);
+
+	atl_unlock_fw(hw);
+}
+
+static void atl_fw2_set_link(struct atl_hw *hw, bool force)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+	uint32_t hi_bits = 0;
+	uint64_t bits;
+
+	if (!force && !atl_fw2_set_link_needed(lstate))
+		return;
+
+	atl_lock_fw(hw);
+
+	if (lstate->fc.req & atl_fc_rx)
+		hi_bits |= atl_fw2_pause | atl_fw2_asym_pause;
+
+	if (lstate->fc.req & atl_fc_tx)
+		hi_bits ^= atl_fw2_asym_pause;
+
+	bits = atl_set_fw_bits(hw, 1);
+
+	hi_bits |= bits >> 32;
+
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_LOW), bits);
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH), hi_bits);
+
+	atl_unlock_fw(hw);
+}
+
+static int atl_fw1_unsupported(struct atl_hw *hw)
+{
+	return -EOPNOTSUPP;
+}
+
+static int atl_fw2_restart_aneg(struct atl_hw *hw)
+{
+	atl_lock_fw(hw);
+	atl_set_bits(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH), BIT(31));
+	atl_unlock_fw(hw);
+	return 0;
+}
+
+static void atl_fw1_set_default_link(struct atl_hw *hw)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+
+	lstate->autoneg = true;
+	lstate->advertized = hw->link_state.supported;
+}
+
+static void atl_fw2_set_default_link(struct atl_hw *hw)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+
+	atl_fw1_set_default_link(hw);
+	lstate->fc.req = atl_fc_full;
+	lstate->eee_enabled = 1;
+}
+
+static int atl_fw2_enable_wol(struct atl_hw *hw)
+{
+	int ret;
+	struct offloadInfo *info;
+	struct drvIface *msg;
+	uint32_t val, wol_bits = atl_fw2_nic_proxy | atl_fw2_wol;
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	info = &msg->fw2xOffloads;
+	info->version = 0;
+	info->len = sizeof(*info);
+	memcpy(info->macAddr, hw->mac_addr, ETH_ALEN);
+
+	atl_lock_fw(hw);
+
+	ret = atl_write_mcp_mem(hw, 0, msg,
+		(info->len + offsetof(struct drvIface, fw2xOffloads) + 3) & ~3);
+	if (ret) {
+		atl_dev_err("Failed to upload sleep proxy info to FW\n");
+		goto free;
+	}
+
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_LOW), 0);
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH), wol_bits);
+	busy_wait(100, mdelay(1), val,
+		atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_RES_HIGH)),
+		(val & wol_bits) != wol_bits);
+
+	ret = (val & wol_bits) == wol_bits ? 0 : -EIO;
+	if (ret)
+		atl_dev_err("Timeout waiting for WoL enable\n");
+
+free:
+	atl_unlock_fw(hw);
+	kfree(msg);
+	return ret;
+}
+
+int atl_read_fwstat_word(struct atl_hw *hw, uint32_t offt, uint32_t *val)
+{
+	int ret;
+	uint32_t addr = hw->mcp.fw_stat_addr + (offt & ~3);
+
+	ret = atl_read_mcp_mem(hw, addr, val, 4);
+	if (ret)
+		return ret;
+
+	*val >>= 8 * (offt & 3);
+	return 0;
+}
+
+static int atl_fw2_get_phy_temperature(struct atl_hw *hw, int *temp)
+{
+	uint32_t req, res;
+	int ret = 0;
+
+	atl_lock_fw(hw);
+
+	req = atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH));
+	req ^= atl_fw2_phy_temp;
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH), req);
+
+	busy_wait(1000, udelay(10), res,
+		atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_RES_HIGH)),
+		((res ^ req) & atl_fw2_phy_temp) != 0);
+	if (((res ^ req) & atl_fw2_phy_temp) != 0) {
+		atl_dev_err("Timeout waiting for PHY temperature\n");
+		ret = -EIO;
+		goto unlock;
+	}
+
+	ret = atl_read_fwstat_word(hw, atl_fw2_stat_temp, &res);
+	if (ret)
+		goto unlock;
+
+	*temp = (res & 0xffff) * 1000 / 256;
+
+unlock:
+	atl_unlock_fw(hw);
+	return ret;
+}
+
+static struct atl_fw_ops atl_fw_ops[2] = {
+	[0] = {
+		.wait_fw_init = atl_fw1_wait_fw_init,
+		.set_link = atl_fw1_set_link,
+		.check_link = atl_fw1_check_link,
+		.get_link_caps = atl_fw1_get_link_caps,
+		.restart_aneg = atl_fw1_unsupported,
+		.set_default_link = atl_fw1_set_default_link,
+		.enable_wol = atl_fw1_unsupported,
+		.get_phy_temperature = (void *)atl_fw1_unsupported,
+		.efuse_shadow_addr_reg = ATL_MCP_SCRATCH(FW1_EFUSE_SHADOW),
+	},
+	[1] = {
+		.wait_fw_init = atl_fw2_wait_fw_init,
+		.set_link = atl_fw2_set_link,
+		.check_link = atl_fw2_check_link,
+		.get_link_caps = atl_fw2_get_link_caps,
+		.restart_aneg = atl_fw2_restart_aneg,
+		.set_default_link = atl_fw2_set_default_link,
+		.enable_wol = atl_fw2_enable_wol,
+		.get_phy_temperature = atl_fw2_get_phy_temperature,
+		.efuse_shadow_addr_reg = ATL_MCP_SCRATCH(FW2_EFUSE_SHADOW),
+	},
+};
+
+int atl_fw_init(struct atl_hw *hw)
+{
+	uint32_t tries, reg, major;
+
+	tries = busy_wait(10000, mdelay(1), reg, atl_read(hw, 0x18), !reg);
+	if (!reg) {
+		atl_dev_err("Timeout waiting for FW version\n");
+		return -EIO;
+	}
+	atl_dev_dbg("FW startup took %d ms\n", tries);
+
+	major = (reg >> 24) & 0xff;
+	if (!major || major > 3) {
+		atl_dev_err("Unsupported FW major version: %u\n", major);
+		return -EINVAL;
+	}
+	if (major > 2)
+		major--;
+	hw->mcp.ops = &atl_fw_ops[major - 1];
+	hw->mcp.poll_link = major == 1;
+	hw->mcp.fw_rev = reg;
+	hw->mcp.fw_stat_addr = atl_read(hw, ATL_MCP_SCRATCH(FW_STAT_STRUCT));
+
+	return hw->mcp.ops->wait_fw_init(hw);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h
new file mode 100644
index 0000000..a3712e2
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h
@@ -0,0 +1,94 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_FW_H_
+#define _ATL_FW_H_
+
+struct atl_hw;
+
+struct atl_link_type {
+	unsigned speed;
+	unsigned ethtool_idx;
+	uint32_t fw_bits[2];
+	const char *name;
+};
+
+extern struct atl_link_type atl_link_types[];
+extern const int atl_num_rates;
+
+#define atl_for_each_rate(idx, type)		\
+	for (idx = 0, type = atl_link_types;	\
+	     idx < atl_num_rates;		\
+	     idx++, type++)
+
+#define atl_define_bit(_name, _bit)		\
+	_name ## _shift = (_bit),		\
+	_name = BIT(_name ## _shift),
+
+enum atl_fw2_opts {
+	atl_define_bit(atl_fw2_pause, 3)
+	atl_define_bit(atl_fw2_asym_pause, 4)
+	atl_fw2_pause_mask = atl_fw2_pause | atl_fw2_asym_pause,
+	atl_define_bit(atl_fw2_phy_temp, 18)
+	atl_define_bit(atl_fw2_nic_proxy, 0x17)
+	atl_define_bit(atl_fw2_wol, 0x18)
+};
+
+enum atl_fw2_stat_offt {
+	atl_fw2_stat_temp = 0x50,
+	atl_fw2_stat_lcaps = 0x84,
+};
+
+enum atl_fc_mode {
+	atl_fc_none = 0,
+	atl_define_bit(atl_fc_rx, 0)
+	atl_define_bit(atl_fc_tx, 1)
+	atl_fc_full = atl_fc_rx | atl_fc_tx,
+};
+
+struct atl_fc_state {
+	enum atl_fc_mode req;
+	enum atl_fc_mode prev_req;
+	enum atl_fc_mode cur;
+};
+
+#define ATL_EEE_BIT_OFFT 16
+#define ATL_EEE_MASK ~(BIT(ATL_EEE_BIT_OFFT) - 1)
+
+struct atl_link_state{
+	/* The following three bitmaps use alt_link_types[] indices
+	 * as link bit positions. Conversion to/from ethtool bits is
+	 * done in atl_ethtool.c. */
+	unsigned supported;
+	unsigned advertized;
+	unsigned lp_advertized;
+	unsigned prev_advertized;
+	bool force_off;
+	bool autoneg;
+	bool eee;
+	bool eee_enabled;
+	struct atl_link_type *link;
+	struct atl_fc_state fc;
+};
+
+struct atl_fw_ops {
+	void (*set_link)(struct atl_hw *hw, bool force);
+	struct atl_link_type *(*check_link)(struct atl_hw *hw);
+	int (*wait_fw_init)(struct atl_hw *hw);
+	int (*get_link_caps)(struct atl_hw *hw);
+	int (*restart_aneg)(struct atl_hw *hw);
+	void (*set_default_link)(struct atl_hw *hw);
+	int (*enable_wol)(struct atl_hw *hw);
+	int (*get_phy_temperature)(struct atl_hw *hw, int *temp);
+	unsigned efuse_shadow_addr_reg;
+};
+
+int atl_read_fwstat_word(struct atl_hw *hw, uint32_t offt, uint32_t *val);
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c
new file mode 100644
index 0000000..d6cdfb3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c
@@ -0,0 +1,598 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2018 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/etherdevice.h>
+#include "atl_common.h"
+#include "atl_desc.h"
+
+static const char *atl_fwd_dir_str(struct atl_fwd_ring *ring)
+{
+	return ring->flags & ATL_FWR_TX ? "Tx" : "Rx";
+}
+
+static int atl_fwd_ring_tx(struct atl_fwd_ring *ring)
+{
+	return !!(ring->flags & ATL_FWR_TX);
+}
+
+static int atl_fwd_get_page(struct atl_fwd_buf_page *bpg, struct device *dev,
+	int order)
+{
+	struct page *pg = dev_alloc_pages(order);
+	dma_addr_t daddr;
+
+	if (!pg)
+		return -ENOMEM;
+
+	daddr = dma_map_page(dev, pg, 0, PAGE_SIZE << order, DMA_FROM_DEVICE);
+
+	if (dma_mapping_error(dev, daddr)) {
+		__free_pages(pg, order);
+		return -ENOMEM;
+	}
+
+	bpg->daddr = daddr;
+	bpg->page = pg;
+
+	return 0;
+}
+
+static void atl_fwd_free_bufs(struct atl_fwd_ring *ring)
+{
+	struct atl_nic *nic = ring->nic;
+	struct device *dev = &nic->hw.pdev->dev;
+	struct atl_fwd_bufs *bufs = ring->bufs;
+	int ring_size = ring->hw.size;
+	int order = bufs->order;
+	int i;
+
+	if (!bufs)
+		return;
+
+	if (bufs->daddr_vec) {
+		dma_free_coherent(dev, ring_size * sizeof(dma_addr_t),
+			bufs->daddr_vec, bufs->daddr_vec_base);
+		kfree(bufs->vaddr_vec);
+	}
+
+
+	for (i = 0; i < bufs->num_pages; i++) {
+		struct atl_fwd_buf_page *bpg = &bufs->bpgs[i];
+
+		if (bpg->page) {
+			dma_unmap_page(dev, bpg->daddr,
+				PAGE_SIZE << order,
+				DMA_FROM_DEVICE);
+			__free_pages(bpg->page, order);
+		}
+	}
+
+	kfree(bufs);
+	ring->bufs = NULL;
+}
+
+static int atl_fwd_alloc_bufs(struct atl_fwd_ring *ring,
+	int order)
+{
+	struct atl_nic *nic = ring->nic;
+	int flags = ring->flags;
+	int ring_size = ring->hw.size;
+	int buf_size = ring->buf_size;
+	struct device *dev = &nic->hw.pdev->dev;
+	struct atl_fwd_buf_page *bpg;
+	struct atl_fwd_bufs *bufs;
+	int num_pages, i;
+	int ret;
+	unsigned int pg_off = 0;
+	bool want_vecs = !!(flags & ATL_FWR_WANT_BUF_VECS);
+
+	if (!(flags & ATL_FWR_ALLOC_BUFS))
+		return 0;
+
+	if (flags & ATL_FWR_CONTIG_BUFS) {
+		order = get_order(buf_size * ring_size);
+		num_pages = 1;
+	} else {
+		int bufs_per_page = (PAGE_SIZE << order) / buf_size;
+		num_pages = ring_size / bufs_per_page +
+			!!(ring_size % bufs_per_page);
+	}
+
+	bufs = kzalloc(sizeof(*bufs) +
+			sizeof(struct atl_fwd_buf_page) * num_pages,
+		GFP_KERNEL);
+	if (!bufs)
+		return -ENOMEM;
+
+	ring->bufs = bufs;
+	bufs->num_pages = num_pages;
+	bufs->order = order;
+
+	bpg = bufs->bpgs;
+	for (i = 0; i < num_pages; i++) {
+		ret = atl_fwd_get_page(&bpg[i], dev, order);
+		if (ret)
+			goto free;
+	}
+
+	if (want_vecs) {
+		ret = -ENOMEM;
+		bufs->daddr_vec = dma_alloc_coherent(dev,
+			ring_size * sizeof(dma_addr_t),
+			&bufs->daddr_vec_base, GFP_KERNEL);
+		if (!bufs->daddr_vec)
+			goto free;
+
+		bufs->vaddr_vec = kcalloc(ring_size, sizeof(void *),
+			GFP_KERNEL);
+		if (!bufs->vaddr_vec)
+			goto free;
+	} else {
+		bufs->daddr_vec_base = bpg[0].daddr;
+		bufs->vaddr_vec = page_to_virt(bpg[0].page);
+	}
+
+	bufs->paddr = page_to_phys(bpg[0].page);
+
+	bpg = bufs->bpgs;
+	for (i = 0; i < ring_size; i++) {
+		union atl_desc *desc = &ring->hw.descs[i];
+		dma_addr_t daddr = bpg->daddr + pg_off;
+
+		if (want_vecs) {
+			bufs->daddr_vec[i] = daddr;
+			bufs->vaddr_vec[i] = page_to_virt(bpg->page) + pg_off;
+		}
+
+		if (atl_fwd_ring_tx(ring))
+			desc->tx.daddr = daddr;
+		else
+			desc->rx.daddr = daddr;
+
+		pg_off += buf_size;
+		if (pg_off + buf_size <= (PAGE_SIZE << order))
+			continue;
+
+		bpg++;
+		pg_off = 0;
+	}
+
+	return 0;
+
+free:
+	atl_fwd_free_bufs(ring);
+	return ret;
+}
+
+static void atl_fwd_update_im(struct atl_fwd_ring *ring)
+{
+	struct atl_hw *hw = &ring->nic->hw;
+	int idx = ring->idx;
+	uint32_t addr;
+
+	addr = atl_fwd_ring_tx(ring) ? ATL_TX_INTR_MOD_CTRL(idx) :
+		ATL_RX_INTR_MOD_CTRL(idx);
+
+	atl_write(hw, addr, (ring->intr_mod_max / 2) << 0x10 |
+		(ring->intr_mod_min / 2) << 8 | 2);
+}
+
+static void atl_fwd_init_ring(struct atl_fwd_ring *fwd_ring)
+{
+	struct atl_hw *hw = &fwd_ring->nic->hw;
+	struct atl_hw_ring *ring = &fwd_ring->hw;
+	unsigned int flags = fwd_ring->flags;
+	int dir_tx = atl_fwd_ring_tx(fwd_ring);
+	int idx = fwd_ring->idx;
+	int lxo_bit = !!(flags & ATL_FWR_LXO);
+
+	atl_write(hw, ATL_RING_BASE_LSW(ring), ring->daddr);
+	atl_write(hw, ATL_RING_BASE_MSW(ring), ring->daddr >> 32);
+
+	if (dir_tx) {
+		atl_write(hw, ATL_TX_RING_THRESH(ring),
+			8 << 8 | 8 << 0x10 | 24 << 0x18);
+		atl_write(hw, ATL_TX_RING_CTL(ring), ring->size);
+
+		atl_write_bit(hw, ATL_TX_LSO_CTRL, idx, lxo_bit);
+	} else {
+		uint32_t ctrl = ring->size |
+			!!(flags & ATL_FWR_VLAN) << 29;
+
+		atl_write(hw, ATL_RX_RING_BUF_SIZE(ring),
+			fwd_ring->buf_size / 1024);
+		atl_write(hw, ATL_RX_RING_THRESH(ring),
+			8 << 0x10 | 24 << 0x18);
+		atl_write(hw, ATL_RX_RING_TAIL(ring), ring->size - 1);
+		atl_write(hw, ATL_RX_RING_CTL(ring), ctrl);
+
+		if (lxo_bit)
+			atl_write_bits(hw, ATL_RX_LRO_PKT_LIM(idx),
+				(idx & 7) * 4, 2, 3);
+
+		atl_write_bit(hw, ATL_RX_LRO_CTRL1, idx, lxo_bit);
+		atl_write_bit(hw, ATL_INTR_RSC_EN, idx, lxo_bit);
+	}
+
+	atl_fwd_update_im(fwd_ring);
+}
+
+void atl_fwd_release_ring(struct atl_fwd_ring *ring)
+{
+	struct atl_nic *nic = ring->nic;
+	int idx = ring->idx;
+	int dir_tx = atl_fwd_ring_tx(ring);
+	struct atl_fwd *fwd = &nic->fwd;
+	unsigned long *map = &fwd->ring_map[dir_tx];
+	struct atl_fwd_ring **rings = fwd->rings[dir_tx];
+
+	atl_fwd_disable_ring(ring);
+
+	if (ring->evt) {
+		atl_fwd_disable_event(ring->evt);
+		atl_fwd_release_event(ring->evt);
+	}
+
+	__clear_bit(idx, map);
+	rings[idx - ATL_FWD_RING_BASE] = NULL;
+	atl_fwd_free_bufs(ring);
+	atl_free_descs(nic, &ring->hw);
+	kfree(ring);
+}
+EXPORT_SYMBOL(atl_fwd_release_ring);
+
+static phys_addr_t atl_dma_coherent_virt_to_phys(void *vaddr)
+{
+	if (is_vmalloc_addr(vaddr))
+		return page_to_phys(vmalloc_to_page(vaddr));
+	else
+		return virt_to_phys(vaddr);
+}
+
+static unsigned int atl_fwd_rx_mod_max = 25, atl_fwd_rx_mod_min = 15,
+	atl_fwd_tx_mod_max = 25, atl_fwd_tx_mod_min = 15;
+atl_module_param(fwd_rx_mod_max, uint, 0644);
+atl_module_param(fwd_rx_mod_min, uint, 0644);
+atl_module_param(fwd_tx_mod_max, uint, 0644);
+atl_module_param(fwd_tx_mod_min, uint, 0644);
+
+struct atl_fwd_ring *atl_fwd_request_ring(struct net_device *ndev,
+	int flags, int ring_size, int buf_size, int page_order)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_fwd *fwd = &nic->fwd;
+	int dir_tx = !!(flags & ATL_FWR_TX);
+	unsigned long *map = &fwd->ring_map[dir_tx];
+	struct atl_fwd_ring **rings = fwd->rings[dir_tx], *ring;
+	int ret = -ENOMEM;
+	int idx;
+
+	if (ring_size & 7 || ring_size > ATL_MAX_RING_SIZE) {
+		atl_nic_err("%s: bad ring size %d, must be no more than %d "
+			"and a multiple of 8\n", __func__, ring_size,
+			ATL_MAX_RING_SIZE);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (buf_size & 1023 || buf_size > 16 * 1024) {
+		atl_nic_err("%s: bad buffer size %d, must be no more than 16k "
+			"and a multiple of 1024\n",
+			__func__, buf_size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	idx = find_next_zero_bit(map, ATL_FWD_RING_BASE + ATL_NUM_FWD_RINGS,
+		ATL_FWD_RING_BASE);
+	if (idx >= ATL_FWD_RING_BASE + ATL_NUM_FWD_RINGS)
+		return ERR_PTR(ret);
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		return ERR_PTR(ret);
+
+	ring->nic = nic;
+	ring->idx = idx;
+	ring->flags = flags;
+	ring->hw.size = ring_size;
+	ring->buf_size = buf_size;
+
+	ret = atl_alloc_descs(nic, &ring->hw);
+	if (ret)
+		goto free_ring;
+
+	ring->hw.reg_base = dir_tx ? ATL_TX_RING(idx) : ATL_RX_RING(idx);
+
+	ret = atl_fwd_alloc_bufs(ring, page_order);
+	if (ret)
+		goto free_descs;
+
+	__set_bit(idx, map);
+	rings[idx - ATL_FWD_RING_BASE] = ring;
+
+	if (dir_tx) {
+		ring->intr_mod_max = atl_fwd_tx_mod_max;
+		ring->intr_mod_min = atl_fwd_tx_mod_min;
+	} else {
+		ring->intr_mod_max = atl_fwd_rx_mod_max;
+		ring->intr_mod_min = atl_fwd_rx_mod_min;
+	}
+
+	ring->desc_paddr = atl_dma_coherent_virt_to_phys(ring->hw.descs);
+
+	atl_fwd_init_ring(ring);
+	return ring;
+
+free_descs:
+	atl_free_descs(nic, &ring->hw);
+
+free_ring:
+	kfree(ring);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(atl_fwd_request_ring);
+
+int atl_fwd_set_ring_intr_mod(struct atl_fwd_ring *ring, int min, int max)
+{
+	if (atl_fwd_ring_tx(ring) && ring->evt &&
+		ring->evt->flags & ATL_FWD_EVT_TXWB) {
+		struct atl_nic *nic = ring->nic;
+
+		atl_nic_err("%s: Interrupt moderation not supported for head pointer writeback events\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (min >= 0)
+		ring->intr_mod_min = min;
+
+	if (max >= 0)
+		ring->intr_mod_max = max;
+
+	atl_fwd_update_im(ring);
+	return 0;
+}
+EXPORT_SYMBOL(atl_fwd_set_ring_intr_mod);
+
+void atl_fwd_release_rings(struct atl_nic *nic)
+{
+	struct atl_fwd_ring **rings = nic->fwd.rings[0];
+	int i;
+
+	for (i = 0; i < ATL_NUM_FWD_RINGS * 2; i++)
+		if (rings[i])
+			atl_fwd_release_ring(rings[i]);
+}
+
+static void atl_fwd_reset_ring(struct atl_fwd_ring *fwd_ring)
+{
+	struct atl_hw *hw = &fwd_ring->nic->hw;
+	struct atl_hw_ring *ring = &fwd_ring->hw;
+
+	atl_write(hw, ATL_RING_CTL(ring), BIT(19));
+	udelay(10);
+	atl_write(hw, ATL_RING_CTL(ring), 0);
+}
+
+int atl_fwd_enable_ring(struct atl_fwd_ring *ring)
+{
+	struct atl_hw *hw = &ring->nic->hw;
+
+	atl_set_bits(hw, ATL_RING_CTL(&ring->hw), BIT(31));
+	ring->state |= ATL_FWR_ST_ENABLED;
+
+	return 0;
+}
+EXPORT_SYMBOL(atl_fwd_enable_ring);
+
+void atl_fwd_disable_ring(struct atl_fwd_ring *ring)
+{
+	if (!(ring->state & ATL_FWR_ST_ENABLED))
+		return;
+
+	atl_fwd_reset_ring(ring);
+	atl_fwd_init_ring(ring);
+	ring->state &= ~ATL_FWR_ST_ENABLED;
+}
+EXPORT_SYMBOL(atl_fwd_disable_ring);
+
+static void __iomem *atl_msix_bar(struct atl_nic *nic)
+{
+	struct pci_dev *pdev = nic->hw.pdev;
+	struct msi_desc *msi;
+
+	if (!pdev->msix_enabled)
+		return NULL;
+
+	msi = list_first_entry(dev_to_msi_list(&pdev->dev),
+		struct msi_desc, list);
+	return msi->mask_base;
+}
+
+static int atl_fwd_set_msix_vec(struct atl_nic *nic, struct atl_fwd_event *evt)
+{
+	int idx = evt->idx;
+	uint64_t addr = evt->msi_addr;
+	uint32_t data = evt->msi_data;
+	uint32_t ctrl;
+	void __iomem *desc = atl_msix_bar(nic);
+
+	if (!desc)
+		return -EIO;
+
+	desc += idx * PCI_MSIX_ENTRY_SIZE;
+
+	/* MSI-X table updates must be atomic, so mask first */
+	ctrl = readl(desc + PCI_MSIX_ENTRY_VECTOR_CTRL);
+	writel(ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT,
+		desc + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+	/* Program the vector */
+	writel(addr & (BIT_ULL(32) - 1), desc + PCI_MSIX_ENTRY_LOWER_ADDR);
+	writel(addr >> 32, desc + PCI_MSIX_ENTRY_UPPER_ADDR);
+	writel(data, desc + PCI_MSIX_ENTRY_DATA);
+
+	/* Unmask */
+	writel(ctrl & ~PCI_MSIX_ENTRY_CTRL_MASKBIT,
+		desc + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+	return 0;
+}
+
+void atl_fwd_release_event(struct atl_fwd_event *evt)
+{
+	struct atl_fwd_ring *ring = evt->ring;
+	struct atl_nic *nic = ring->nic;
+	unsigned long *map = &nic->fwd.msi_map;
+	int idx = evt->idx;
+
+	if (ring->evt != evt) {
+		atl_nic_err("%s: attempt to release unset event\n", __func__);
+		return;
+	}
+
+	atl_fwd_disable_event(evt);
+
+	if (evt->flags & ATL_FWD_EVT_TXWB)
+		return;
+
+	__clear_bit(idx, map);
+	atl_set_intr_bits(&nic->hw, ring->idx,
+		atl_fwd_ring_tx(ring) ? -1 : ATL_NUM_MSI_VECS,
+		atl_fwd_ring_tx(ring) ? ATL_NUM_MSI_VECS : -1);
+}
+EXPORT_SYMBOL(atl_fwd_release_event);
+
+int atl_fwd_request_event(struct atl_fwd_event *evt)
+{
+	struct atl_fwd_ring *ring = evt->ring;
+	int dir_tx = atl_fwd_ring_tx(ring);
+	struct atl_nic *nic = ring->nic;
+	struct atl_hw *hw = &nic->hw;
+	unsigned long *map = &nic->fwd.msi_map;
+	bool tx_wb = !!(evt->flags & ATL_FWD_EVT_TXWB);
+	int idx;
+	int ret;
+
+	if (ring->evt) {
+		atl_nic_err("%s: event already set for %s ring %d\n",
+			__func__, atl_fwd_dir_str(ring), ring->idx);
+		return -EEXIST;
+	}
+
+	if (!tx_wb && !(nic->flags & ATL_FL_MULTIPLE_VECTORS)) {
+		atl_nic_err("%s: MSI-X interrupts are disabled\n", __func__);
+		return -EINVAL;
+	}
+
+	if (tx_wb && !atl_fwd_ring_tx(ring)) {
+		atl_nic_err("%s: head pointer writeback events supported "
+			"on Tx rings only\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((evt->flags & (ATL_FWD_EVT_TXWB | ATL_FWD_EVT_AUTOMASK)) ==
+		(ATL_FWD_EVT_TXWB | ATL_FWD_EVT_AUTOMASK)) {
+		atl_nic_err("%s: event automasking supported "
+			"for MSI events only\n", __func__);
+		return -EINVAL;
+	}
+
+	ring->evt = evt;
+
+	if (tx_wb) {
+		struct atl_hw_ring *hwring = &ring->hw;
+
+		atl_write(hw, ATL_TX_RING_HEAD_WB_LSW(hwring),
+			evt->tx_head_wrb);
+		atl_write(hw, ATL_TX_RING_HEAD_WB_MSW(hwring),
+			evt->tx_head_wrb >> 32);
+		return 0;
+	}
+
+	idx = find_next_zero_bit(map, ATL_NUM_MSI_VECS, ATL_FWD_MSI_BASE);
+	if (idx >= ATL_NUM_MSI_VECS) {
+		atl_nic_err("%s: no MSI vectors left\n", __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	evt->idx = idx;
+
+	ret = atl_fwd_set_msix_vec(nic, evt);
+	if (ret)
+		goto fail;
+
+	__set_bit(idx, map);
+
+	atl_set_intr_bits(&nic->hw, ring->idx,
+		dir_tx ? -1 : idx,
+		dir_tx ? idx : -1);
+
+	atl_write_bit(hw, ATL_INTR_AUTO_CLEAR, idx, 1);
+	atl_write_bit(hw, ATL_INTR_AUTO_MASK, idx,
+		!!(evt->flags & ATL_FWD_EVT_AUTOMASK));
+
+	return 0;
+
+fail:
+	ring->evt = NULL;
+	return ret;
+}
+EXPORT_SYMBOL(atl_fwd_request_event);
+
+int atl_fwd_enable_event(struct atl_fwd_event *evt)
+{
+	struct atl_fwd_ring *ring = evt->ring;
+	struct atl_hw *hw = &ring->nic->hw;
+
+	if (evt->flags & ATL_FWD_EVT_TXWB) {
+		if (ring->state & ATL_FWR_ST_ENABLED)
+			return -EINVAL;
+
+		atl_write_bit(hw, ATL_TX_RING_CTL(&ring->hw), 28, 1);
+		return 0;
+	}
+
+	atl_intr_enable(hw, BIT(evt->idx));
+	return 0;
+}
+EXPORT_SYMBOL(atl_fwd_enable_event);
+
+int atl_fwd_disable_event(struct atl_fwd_event *evt)
+{
+	struct atl_fwd_ring *ring = evt->ring;
+	struct atl_hw *hw = &ring->nic->hw;
+
+	if (evt->flags & ATL_FWD_EVT_TXWB) {
+		if (ring->state & ATL_FWR_ST_ENABLED)
+			return -EINVAL;
+
+		atl_write_bit(hw, ATL_TX_RING_CTL(&ring->hw), 28, 0);
+		return 0;
+	}
+
+	atl_intr_disable(hw, BIT(evt->idx));
+	return 0;
+}
+EXPORT_SYMBOL(atl_fwd_disable_event);
+
+int atl_fwd_receive_skb(struct net_device *ndev, struct sk_buff *skb)
+{
+	skb->protocol = eth_type_trans(skb, ndev);
+	return netif_rx(skb);
+}
+EXPORT_SYMBOL(atl_fwd_receive_skb);
+
+int atl_fwd_transmit_skb(struct net_device *ndev, struct sk_buff *skb)
+{
+	skb->dev = ndev;
+	return dev_queue_xmit(skb);
+}
+EXPORT_SYMBOL(atl_fwd_transmit_skb);
+
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.h
new file mode 100644
index 0000000..a6b2658
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.h
@@ -0,0 +1,300 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2018 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_FWD_H_
+#define _ATL_FWD_H_
+
+#include "atl_common.h"
+
+struct atl_fwd_event;
+
+struct atl_fwd_buf_page {
+	struct page *page;
+	dma_addr_t daddr;
+};
+
+/**
+ *	atl_fwd_rxbufs - offload engine's ring's Rx buffers
+ *
+ *	Buffers are allocated by the driver when a ring is created
+ *
+ *	The entire buffer space for the ring may optionally be
+ *	allocated as a single physically-contiguous block.
+ *
+ *	Descriptors are overwritten with the write-back descriptor
+ *	format on Rx and optionally on Tx. To simplify Rx descriptor
+ *	refill by the offload engine, vectors containing virtual addresses and
+ *	DMA-addresses of each buffer are provided in @vaddr_vec and
+ *	@daddr_vec respectively if @ATL_FWR_WANT_BUF_VECS flag is set
+ *	on @atl_fwd_request_ring().
+ *
+ *	If @ATL_FWR_WANT_BUF_VECS is not set, @daddr_vec_base contains
+ *	the DMA address of the first buffer page and @vaddr_vec
+ *	contains its virtual address.
+ *
+ *	@daddr_vec_base:	DMA address of the base of the @daddr_vec
+ *    	@daddr_vec:		A vector of buffers' DMA ddresses
+ *    	@vaddr_vec:		A vector of buffers' virtual addresses
+ *    				or first buffer's virtual address
+ *    				depending on ring flags
+ *    	@paddr:			Physical address of the first (or
+ *    				only) buffer page
+ */
+struct atl_fwd_bufs {
+	dma_addr_t daddr_vec_base;
+	dma_addr_t *daddr_vec;
+	void **vaddr_vec;
+	phys_addr_t paddr;
+
+	/* The following is not part of API and subject to change */
+	int num_pages;
+	int order;
+	struct atl_fwd_buf_page bpgs[0];
+};
+
+union atl_desc;
+
+/**
+ * 	atl_hw_ring - low leverl descriptor ring structure
+ *
+ * 	@descs:		Pointer to the descriptor ring
+ * 	@size:		Number of descriptors in the ring
+ * 	@reg_base:	Offset of ring's register block from start of
+ * 			BAR 0
+ * 	@daddr:		DMA address of the ring
+ */
+/* atl_hw_ring defined in "atl_hw.h" */
+
+/**
+ *	atl_fwd_ring - Offload engine-controlled ring
+ *
+ *	Buffer space is allocated by the driver on ring creation.
+ *
+ *	@hw:    	Low-level ring information
+ *	@evt:		Ring's event, either an MSI-X vector (either
+ *			Tx or Rx) or head pointer writeback address
+ *			(Tx ring only). NULL on ring allocation, set
+ *			by atl_fwd_request_event()
+ *	@bufs:		Ring's buffers. Allocated only if
+ *			@ATL_FWR_ALLOC_BUFS flag is set on ring
+ *			request.
+ *	@nic:		struct atl_nic backreference
+ *	@idx:		Ring index
+ *	@desc_paddr:	Physical address of the descriptor ring
+ */
+struct atl_fwd_ring {
+	struct atl_hw_ring hw;
+	struct atl_fwd_event *evt;
+	struct atl_fwd_bufs *bufs;
+	struct atl_nic *nic;
+	int idx;
+	phys_addr_t desc_paddr;
+
+	/* The following is not part of API and subject to change */
+	unsigned int flags;
+	unsigned long state;
+	int buf_size;
+	unsigned intr_mod_min;
+	unsigned intr_mod_max;
+};
+
+enum atl_fwd_event_flags {
+	ATL_FWD_EVT_TXWB = BIT(0), /* Event type: 0 for MSI, 1 for Tx
+				    * head WB */
+	ATL_FWD_EVT_AUTOMASK = BIT(1), /* Disable event after
+					* raising, MSI only. */
+};
+
+/**
+ * 	atl_fwd_event - Ring's notification event
+ *
+ * 	@flags		Event type and flags
+ * 	@ring		Ring backreference
+ * 	@msi_addr	MSI message address
+ * 	@msi_data	MSI message data
+ * 	@idx		MSI index (0 .. 31)
+ * 	@tx_head_wrb	Tx head writeback location
+ */
+struct atl_fwd_event {
+	enum atl_fwd_event_flags flags;
+	struct atl_fwd_ring *ring;
+	union {
+		struct {
+			dma_addr_t msi_addr;
+			uint32_t msi_data;
+			int idx;
+		};
+		dma_addr_t tx_head_wrb;
+	};
+};
+
+enum atl_fwd_ring_flags {
+	ATL_FWR_TX = BIT(0),	/* Direction: 0 for Rx, 1 for Tx */
+	ATL_FWR_VLAN = BIT(1),	/* Enable VLAN tag stripping / insertion */
+	ATL_FWR_LXO = BIT(2),	/* Enable LRO / LSO */
+	ATL_FWR_ALLOC_BUFS = BIT(3), /* Allocate buffers */
+	ATL_FWR_CONTIG_BUFS = BIT(4), /* Alloc buffers as physically
+				       * contiguous. May fail if
+				       * total buffer space required
+				       * is larger than a max-order
+				       * compound page. */
+	ATL_FWR_WANT_BUF_VECS = BIT(5), /* Alloc and fill per-buffer
+					 * DMA and virt address
+					 * vectors. If unset, first
+					 * buffer's daddr and vaddr
+					 * are provided in ring's
+					 * @daddr_vec_base and @vaddr_vec */
+};
+
+/**
+ * atl_fwd_request_ring() - Create a ring for an offload engine
+ *
+ * 	@ndev:		network device
+ * 	@flags:		ring flags
+ * 	@ring_size:	number of descriptors
+ * 	@buf_size:	individual buffer's size
+ * 	@page_order:	page order to use when @ATL_FWR_CONTIG_BUFS is
+ * 			not set
+ *
+ * atl_fwd_request_ring() creates a ring for an offload engine,
+ * allocates buffer memory if @ATL_FWR_ALLOC_BUFS flag is set,
+ * initializes ring's registers and fills the address fields in
+ * descriptors. Ring is inactive until explicitly enabled via
+ * atl_fwd_enable_ring().
+ *
+ * Buffers can be allocated either as a single physically-contiguous
+ * compound page, or as a sequence of compound pages of @page_order
+ * order. In the latter case, depending on the requested buffer size,
+ * tweaking the page order allows to pack buffers into buffer pages
+ * with less wasted space.
+ *
+ * Returns the ring pointer on success, ERR_PTR(error code) on failure
+ */
+struct atl_fwd_ring *atl_fwd_request_ring(struct net_device *ndev,
+	int flags, int ring_size, int buf_size, int page_order);
+
+/**
+ * atl_fwd_release_ring() - Free offload engine's ring
+ *
+ * 	@ring:	ring to be freed
+ *
+ * Stops the ring, frees buffers if they were allocated, disables and
+ * releases ring's event if non-NULL, and frees the ring.
+ */
+void atl_fwd_release_ring(struct atl_fwd_ring *ring);
+
+/**
+ * atl_fwd_set_ring_intr_mod() - Set ring's interrupt moderation
+ * delays
+ *
+ * 	@ring:	ring
+ * 	@min:	min delay
+ * 	@max:	max delay
+ *
+ * Each ring has two configurable interrupt moderation timers. When an
+ * interrupt condition occurs (write-back of the final descriptor of a
+ * packet on receive, or writeback on a transmit descriptor with WB
+ * bit set), the min timer is restarted unconditionally and max timer
+ * is started only if it's not running yet. When any of the timers
+ * expires, the interrupt is signalled.
+ *
+ * Thus if a single interrupt event occurs it will be subjected to min
+ * delay. If subsequent events keep occuring with intervals less than
+ * min_delay between each other, the interrupt will be triggered
+ * max_delay after the initial event.
+ *
+ * When called with negative @min or @max, the corresponding setting
+ * is left unchanged.
+ *
+ * Interrupt moderation is only supported for MSI-X vectors, not head
+ * pointer writeback events.
+ *
+ * Returns 0 on success or -EINVAL on attempt to set moderation delays
+ * for a ring with attached Tx WB event.
+ */
+int atl_fwd_set_ring_intr_mod(struct atl_fwd_ring *ring, int min, int max);
+
+/**
+ * atl_fwd_enable_channel() - Enable offload engine's ring
+ *
+ * 	@ring: ring to be enabled
+ *
+ * Starts the ring. Returns 0 on success or negative error code.
+ */
+int atl_fwd_enable_ring(struct atl_fwd_ring *ring);
+/**
+ * atl_fwd_disable_channel() - Disable offload engine's ring
+ *
+ * 	@ring: ring to be disabled
+ *
+ * Stops and resets the ring. On next ring enable head and tail
+ * pointers will be zero.
+ */
+void atl_fwd_disable_ring(struct atl_fwd_ring *ring);
+
+/**
+ * atl_fwd_request_event() - Creates and attaches a ring notification
+ * event
+ *
+ * 	@evt:		event structure
+ *
+ * Caller must allocate a struct atl_fwd_event and fill the @flags,
+ * @ring and either @tx_head_wrb or @msi_addr and @msi_data depending
+ * on the type bit in @flags. Event is created in disabled state.
+ *
+ * For an MSI event type, an MSI vector table slot is
+ * allocated and programmed, and it's index is saved in @evt->idx.
+ *
+ * @evt is then attached to the ring.
+ *
+ * Returns 0 on success or negative error code.
+ */
+int atl_fwd_request_event(struct atl_fwd_event *evt);
+
+/**
+ * atl_fwd_release_event() - Release a ring notification event
+ *
+ * 	@evt:		event structure
+ *
+ * Disables the event if enabled, frees the MSI vector for an MSI-type
+ * event and detaches @evt from the ring. The @evt structure itself is
+ * not freed.
+ */
+void atl_fwd_release_event(struct atl_fwd_event *evt);
+
+/**
+ * atl_fwd_enable_event() - Enable a ring event
+ *
+ * 	@evt:		event structure
+ *
+ * Enables the event.
+ *
+ * Returns 0 on success or negative error code.
+ */
+int atl_fwd_enable_event(struct atl_fwd_event *evt);
+
+/**
+ * atl_fwd_disable_event() - Disable a ring event
+ *
+ * 	@evt:		event structure
+ *
+ * Disables the event.
+ *
+ * Returns 0 on success or negative error code.
+ */
+int atl_fwd_disable_event(struct atl_fwd_event *evt);
+
+int atl_fwd_receive_skb(struct net_device *ndev, struct sk_buff *skb);
+int atl_fwd_transmit_skb(struct net_device *ndev, struct sk_buff *skb);
+
+enum atl_fwd_ring_state {
+	ATL_FWR_ST_ENABLED = BIT(0),
+};
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c
new file mode 100644
index 0000000..1afd7b7
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c
@@ -0,0 +1,1042 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+
+#include "atl_common.h"
+#include "atl_hw.h"
+#include "atl_ring.h"
+
+struct atl_board_info {
+	unsigned int link_mask;
+};
+
+static struct atl_board_info atl_boards[] = {
+	[ATL_UNKNOWN] = {
+		.link_mask = 0x1f,
+	},
+	[ATL_AQC107] = {
+		.link_mask = 0x1f,
+	},
+	[ATL_AQC108] = {
+		.link_mask = 0xf,
+	},
+	[ATL_AQC109] = {
+		.link_mask = 7,
+	},
+	[ATL_AQC100] = {
+		.link_mask = 0x1f,
+	},
+};
+
+static void atl_unplugged(struct atl_hw *hw)
+{
+	if (!hw->regs)
+		return;
+	hw->regs = 0;
+	dev_err(&hw->pdev->dev, "Device removed\n");
+}
+
+void atl_check_unplug(struct atl_hw *hw, uint32_t addr)
+{
+	uint32_t val;
+
+	if (addr == ATL_GLOBAL_MIF_ID) {
+		atl_unplugged(hw);
+		return;
+	}
+
+	val = atl_read(hw, ATL_GLOBAL_MIF_ID);
+	if (val == 0xffffffff)
+		atl_unplugged(hw);
+}
+
+int atl_read_mcp_mem(struct atl_hw *hw, uint32_t mcp_addr, void *host_addr,
+		      unsigned int size)
+{
+	uint32_t *addr = (uint32_t *)host_addr;
+
+	size = (size + 3) & ~3u;
+	atl_write(hw, ATL_GLOBAL_MBOX_ADDR, mcp_addr);
+	while (size) {
+		uint32_t next;
+
+		atl_write(hw, ATL_GLOBAL_MBOX_CTRL, 0x8000);
+
+		busy_wait(100, udelay(10), next,
+			  atl_read(hw, ATL_GLOBAL_MBOX_ADDR), next == mcp_addr);
+		if (next == mcp_addr) {
+			atl_dev_err("mcp mem read timed out (%d remaining)\n",
+				    size);
+			return -EIO;
+		}
+		*addr = atl_read(hw, ATL_GLOBAL_MBOX_DATA);
+		mcp_addr += 4;
+		addr++;
+		size -= 4;
+	}
+	return 0;
+}
+
+
+static inline void atl_glb_soft_reset(struct atl_hw *hw)
+{
+	atl_write_bit(hw, ATL_GLOBAL_STD_CTRL, 14, 0);
+	atl_write_bit(hw, ATL_GLOBAL_STD_CTRL, 15, 1);
+}
+
+static inline void atl_glb_soft_reset_full(struct atl_hw *hw)
+{
+	atl_write_bit(hw, ATL_TX_CTRL1, 29, 0);
+	atl_write_bit(hw, ATL_RX_CTRL1, 29, 0);
+	atl_write_bit(hw, ATL_INTR_CTRL, 29, 0);
+	atl_write_bit(hw, ATL_MPI_CTRL1, 29, 0);
+	atl_glb_soft_reset(hw);
+}
+
+static int atl_hw_reset_nonrbl(struct atl_hw *hw)
+{
+	uint32_t tries;
+	uint32_t reg = atl_read(hw, ATL_GLOBAL_DAISY_CHAIN_STS1);
+
+	bool daisychain_running = (reg & 0x30) != 0x30;
+
+	if (daisychain_running)
+		atl_dev_dbg("AQDBG: daisychain running (0x18: %#x)\n",
+			    atl_read(hw, ATL_GLOBAL_FW_IMAGE_ID));
+
+	atl_write(hw, 0x404, 0x40e1);
+	mdelay(50);
+
+	atl_write(hw, 0x534, 0xa0);
+	atl_write(hw, 0x100, 0x9f);
+	atl_write(hw, 0x100, 0x809f);
+	mdelay(50);
+
+	atl_glb_soft_reset(hw);
+
+	atl_write(hw, 0x404, 0x80e0);
+	atl_write(hw, 0x32a8, 0);
+	atl_write(hw, 0x520, 1);
+	mdelay(50);
+	atl_write(hw, 0x404, 0x180e0);
+
+	tries = busy_wait(10000, mdelay(1), reg, atl_read(hw, 0x704),
+		!(reg & 0x10));
+	if (!(reg & 0x10)) {
+		atl_dev_err("FLB kickstart timed out: %#x\n", reg);
+		return -EIO;
+	}
+	atl_dev_dbg("FLB kickstart took %d ms\n", tries);
+
+	atl_write(hw, 0x404, 0x80e0);
+	mdelay(50);
+	atl_write(hw, 0x3a0, 1);
+
+	atl_glb_soft_reset_full(hw);
+
+	return atl_fw_init(hw);
+}
+
+int atl_hw_reset(struct atl_hw *hw)
+{
+	uint32_t reg = atl_read(hw, ATL_MCP_SCRATCH(RBL_STS));
+	uint32_t flb_stat = atl_read(hw, ATL_GLOBAL_DAISY_CHAIN_STS1);
+	int tries = 0;
+	/* bool host_load_done = false; */
+
+	while (!reg && flb_stat == 0x6000000 && tries++ < 1000) {
+		mdelay(1);
+		reg = atl_read(hw, ATL_MCP_SCRATCH(RBL_STS));
+		flb_stat = atl_read(hw, ATL_GLOBAL_DAISY_CHAIN_STS1);
+	}
+
+	atl_dev_dbg("0x388: %#x 0x704: %#x\n", reg, flb_stat);
+	if (tries >= 1000) {
+		atl_dev_err("Timeout waiting to choose RBL or FLB path\n");
+		return -EIO;
+	}
+
+	if (!reg)
+		return atl_hw_reset_nonrbl(hw);
+
+	atl_write(hw, 0x404, 0x40e1);
+	atl_write(hw, 0x3a0, 1);
+	atl_write(hw, 0x32a8, 0);
+
+	atl_write(hw, ATL_MCP_SCRATCH(RBL_STS), 0xdead);
+
+	atl_glb_soft_reset_full(hw);
+
+	atl_write(hw, ATL_GLOBAL_CTRL2, 0x40e0);
+
+	for (tries = 0; tries < 10000; mdelay(1)) {
+		tries++;
+		reg = atl_read(hw, ATL_MCP_SCRATCH(RBL_STS)) & 0xffff;
+
+		if (!reg || reg == 0xdead)
+			continue;
+
+		/* if (reg != 0xf1a7) */
+			break;
+
+		/* if (host_load_done) */
+		/* 	continue; */
+
+		/* ret = atl_load_mac_fw(hw); */
+		/* if (ret) { */
+		/* 	atl_dev_err("MAC FW host load failed\n"); */
+		/* 	return ret; */
+		/* } */
+		/* host_load_done = true; */
+	}
+
+	if (reg == 0xf1a7) {
+		atl_dev_err("MAC FW Host load not supported yet\n");
+		return -EIO;
+	}
+	if (!reg || reg == 0xdead) {
+		atl_dev_err("RBL restart timeout: %#x\n", reg);
+		return -EIO;
+	}
+	atl_dev_dbg("RBL restart took %d ms result %#x\n", tries, reg);
+
+	/* if (host_load_done) { */
+	/* 	// Wait for MAC FW to decide whether it wants to reload the PHY FW */
+	/* 	busy_wait(10, mdelay(1), reg, atl_read(hw, 0x340), !(reg & (1 << 9 | 1 << 1 | 1 << 0))); */
+
+	/* 	if (reg & 1 << 9) { */
+	/* 		ret = atl_load_phy_fw(hw); */
+	/* 		if (ret) { */
+	/* 			atl_dev_err("PHY FW host load failed\n"); */
+	/* 			return ret; */
+	/* 		} */
+	/* 	} */
+	/* } */
+
+	return atl_fw_init(hw);
+}
+
+static int atl_get_mac_addr(struct atl_hw *hw, uint8_t *buf)
+{
+	uint32_t efuse_shadow_addr =
+		atl_read(hw, hw->mcp.ops->efuse_shadow_addr_reg);
+	uint8_t tmp[8];
+	int ret;
+
+	if (!efuse_shadow_addr)
+		return false;
+
+	ret = atl_read_mcp_mem(hw, efuse_shadow_addr + 40 * 4, tmp, 8);
+	*(uint32_t *)buf = htonl(*(uint32_t *)tmp);
+	*(uint16_t *)&buf[4] = (uint16_t)htonl(*(uint32_t *)&tmp[4]);
+
+	return ret;
+}
+
+int atl_hwinit(struct atl_nic *nic, enum atl_board brd_id)
+{
+	struct atl_hw *hw = &nic->hw;
+	struct atl_board_info *brd = &atl_boards[brd_id];
+	int ret;
+
+	/* Default supported speed set based on device id. */
+	hw->link_state.supported = brd->link_mask;
+
+	ret = atl_hw_reset(hw);
+
+	atl_dev_info("rev 0x%x chip 0x%x FW img 0x%x\n",
+		 atl_read(hw, ATL_GLOBAL_CHIP_REV) & 0xffff,
+		 atl_read(hw, ATL_GLOBAL_CHIP_ID) & 0xffff,
+		 atl_read(hw, ATL_GLOBAL_FW_IMAGE_ID));
+
+	if (ret)
+		return ret;
+
+	ret = atl_get_mac_addr(hw, hw->mac_addr);
+	if (ret) {
+		atl_dev_err("couldn't read MAC address\n");
+		return ret;
+	}
+
+	return hw->mcp.ops->get_link_caps(hw);
+}
+
+static void atl_rx_xoff_set(struct atl_nic *nic, bool fc)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	atl_write_bit(hw, ATL_RX_PBUF_REG2(0), 31, fc);
+}
+
+void atl_refresh_link(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_type *link, *prev_link = hw->link_state.link;
+
+	link = hw->mcp.ops->check_link(hw);
+
+	if (link) {
+		if (link != prev_link)
+			atl_nic_info("Link up: %s\n", link->name);
+		netif_carrier_on(nic->ndev);
+	} else {
+		if (link != prev_link)
+			atl_nic_info("Link down\n");
+		netif_carrier_off(nic->ndev);
+	}
+	atl_rx_xoff_set(nic, !!(hw->link_state.fc.cur & atl_fc_rx));
+}
+
+static irqreturn_t atl_link_irq(int irq, void *priv)
+{
+	struct atl_nic *nic = (struct atl_nic *)priv;
+
+	atl_schedule_work(nic);
+	atl_intr_enable(&nic->hw, BIT(0));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t atl_legacy_irq(int irq, void *priv)
+{
+	struct atl_nic *nic = priv;
+	struct atl_hw *hw = &nic->hw;
+	uint32_t mask = hw->intr_mask | atl_qvec_intr(nic->qvecs);
+	uint32_t stat;
+
+
+	stat = atl_read(hw, ATL_INTR_STS);
+
+	/* Mask asserted intr sources */
+	atl_intr_disable(hw, stat);
+
+	if (!(stat & mask))
+		/* Interrupt from another device on a shared int
+		 * line. As no status bits were set, nothing was
+		 * masked above, so no need to unmask anything. */
+		return IRQ_NONE;
+
+	if (likely(stat & BIT(ATL_NUM_NON_RING_IRQS)))
+		/* Only one qvec when using legacy interrupts */
+		atl_ring_irq(irq, &nic->qvecs[0].napi);
+
+	if (unlikely(stat & BIT(0)))
+		atl_link_irq(irq, nic);
+	return IRQ_HANDLED;
+}
+
+int atl_alloc_link_intr(struct atl_nic *nic)
+{
+	struct pci_dev *pdev = nic->hw.pdev;
+	int ret;
+
+	if (nic->flags & ATL_FL_MULTIPLE_VECTORS) {
+		ret = request_irq(pci_irq_vector(pdev, 0), atl_link_irq, 0,
+		nic->ndev->name, nic);
+		if (ret)
+			atl_nic_err("request MSI link vector failed: %d\n",
+				-ret);
+		return ret;
+	}
+
+	ret = request_irq(pci_irq_vector(pdev, 0), atl_legacy_irq, IRQF_SHARED,
+		nic->ndev->name, nic);
+	if (ret)
+		atl_nic_err("request legacy irq failed: %d\n", -ret);
+
+	return ret;
+}
+
+void atl_free_link_intr(struct atl_nic *nic)
+{
+	free_irq(pci_irq_vector(nic->hw.pdev, 0), nic);
+}
+
+void atl_set_uc_flt(struct atl_hw *hw, int idx, uint8_t mac_addr[ETH_ALEN])
+{
+	atl_write(hw, ATL_RX_UC_FLT_REG1(idx),
+		be32_to_cpu(*(uint32_t *)&mac_addr[2]));
+	atl_write(hw, ATL_RX_UC_FLT_REG2(idx),
+		(uint32_t)be16_to_cpu(*(uint16_t *)mac_addr) |
+		1 << 16 | 1 << 31);
+}
+
+static void atl_disable_uc_flt(struct atl_hw *hw, int idx)
+{
+	atl_write(hw, ATL_RX_UC_FLT_REG2(idx), 0);
+}
+
+void atl_set_rss_key(struct atl_hw *hw)
+{
+	int i;
+	uint32_t val;
+
+	for (i = 0; i < ATL_RSS_KEY_SIZE / 4; i++) {
+		val = swab32(((uint32_t *)hw->rss_key)[i]);
+		atl_write(hw, ATL_RX_RSS_KEY_WR_DATA, val);
+		atl_write(hw, ATL_RX_RSS_KEY_ADDR, i | BIT(5));
+		busy_wait(100, udelay(1), val,
+			atl_read(hw, ATL_RX_RSS_KEY_ADDR),
+			val & BIT(5));
+		if (val & BIT(5)) {
+			atl_dev_err("Timeout writing RSS key[%d]: %#x\n",
+				i, val);
+			return;
+		}
+	}
+}
+
+void atl_set_rss_tbl(struct atl_hw *hw)
+{
+	int i, shift = 0, addr = 0;
+	uint32_t val = 0, stat;
+
+	for (i = 0; i < ATL_RSS_TBL_SIZE; i++) {
+		val |= (uint32_t)(hw->rss_tbl[i]) << shift;
+		shift += 3;
+
+		if (shift < 16)
+			continue;
+
+		atl_write(hw, ATL_RX_RSS_TBL_WR_DATA, val & 0xffff);
+		atl_write(hw, ATL_RX_RSS_TBL_ADDR, addr | BIT(4));
+
+		busy_wait(100, udelay(1), stat,
+			atl_read(hw, ATL_RX_RSS_TBL_ADDR), stat & BIT(4));
+		if (stat & BIT(4)) {
+			atl_dev_err("Timeout writing RSS redir table[%d] (addr %d): %#x\n",
+				    i, addr, stat);
+			return;
+		}
+
+		shift -= 16;
+		val >>= 16;
+		addr++;
+	}
+}
+
+unsigned int atl_fwd_rx_buf_reserve = 0, atl_fwd_tx_buf_reserve = 0;
+module_param_named(fwd_tx_buf_reserve, atl_fwd_tx_buf_reserve, uint, 0444);
+module_param_named(fwd_rx_buf_reserve, atl_fwd_rx_buf_reserve, uint, 0444);
+
+void atl_start_hw_global(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	/* Enable TPO2 */
+	atl_write(hw, 0x7040, 0x10000);
+	/* Enable RPF2, filter logic 3 */
+	atl_write(hw, 0x5040, BIT(16) | (3 << 17));
+
+	/* Alloc TPB */
+	/* TC1: space for offload engine iface */
+	atl_write(hw, ATL_TX_PBUF_REG1(1), atl_fwd_tx_buf_reserve);
+	/* TC0: 160k minus TC1 size */
+	atl_write(hw, ATL_TX_PBUF_REG1(0), 160 - atl_fwd_tx_buf_reserve);
+	/* 4-TC | Enable TPB */
+	atl_set_bits(hw, ATL_TX_PBUF_CTRL1, BIT(8) | BIT(0));
+
+	/* Alloc RPB */
+	/* TC1: space for offload engine iface */
+	atl_write(hw, ATL_RX_PBUF_REG1(1), atl_fwd_rx_buf_reserve);
+	atl_write(hw, ATL_RX_PBUF_REG2(1), BIT(31) |
+		(atl_fwd_rx_buf_reserve * 32 * 66 / 100) << 16 |
+		(atl_fwd_rx_buf_reserve * 32 * 50 / 100));
+	/* TC1: 320k minus TC1 size */
+	atl_write(hw, ATL_RX_PBUF_REG1(0), 320 - atl_fwd_rx_buf_reserve);
+	atl_write(hw, ATL_RX_PBUF_REG2(0), BIT(31) |
+		((320 - atl_fwd_rx_buf_reserve) * 32 * 66 / 100) << 16 |
+		((320 - atl_fwd_rx_buf_reserve) * 32 * 50 / 100));
+	/* 4-TC | Enable RPB */
+	atl_set_bits(hw, ATL_RX_PBUF_CTRL1, BIT(8) | BIT(4) | BIT(0));
+
+	/* TPO */
+	/* Enable L3 | L4 chksum */
+	atl_set_bits(hw, ATL_TX_PO_CTRL1, 3);
+	/* TSO TCP flags bitmask first / middle */
+	atl_write(hw, ATL_TX_LSO_TCP_CTRL1, 0x0ff60ff6);
+	/* TSO TCP flags bitmask last */
+	atl_write(hw, ATL_TX_LSO_TCP_CTRL2, 0xf7f);
+
+	/* RPO */
+	/* Enable  L3 | L4 chksum */
+	atl_set_bits(hw, ATL_RX_PO_CTRL1, 3);
+	atl_write_bits(hw, ATL_RX_LRO_CTRL2, 12, 2, 0);
+	atl_write_bits(hw, ATL_RX_LRO_CTRL2, 5, 2, 0);
+	/* 10uS base, 20uS inactive timeout, 60 uS max coalescing
+	 * interval
+	 */
+	atl_write(hw, ATL_RX_LRO_TMRS, 0xc35 << 20 | 2 << 10 | 6);
+	atl_write(hw, ATL_INTR_RSC_DELAY, (atl_min_intr_delay / 2) - 1);
+
+	/* RPF */
+	/* Default RPF2 parser options */
+	atl_write(hw, ATL_RX_FLT_CTRL2, 0x0);
+	atl_set_uc_flt(hw, 0, hw->mac_addr);
+	/* BC action host */
+	atl_write_bits(hw, ATL_RX_FLT_CTRL1, 12, 3, 1);
+	/* Enable BC */
+	atl_write_bit(hw, ATL_RX_FLT_CTRL1, 0, 1);
+	/* BC thresh */
+	atl_write_bits(hw, ATL_RX_FLT_CTRL1, 16, 16, 0x1000);
+
+	/* Enable untagged packets */
+	atl_write(hw, ATL_RX_VLAN_FLT_CTRL1, 1 << 2 | 1 << 3);
+
+	/* Reprogram ethtool Rx filters */
+	atl_refresh_rxfs(nic);
+
+	atl_set_rss_key(hw);
+	/* Enable RSS | 8 queues per TC */
+	atl_write(hw, ATL_RX_RSS_CTRL, BIT(31) | 3);
+
+	/* Global interrupt block init */
+	if (nic->flags & ATL_FL_MULTIPLE_VECTORS) {
+		/* MSI or MSI-X mode interrupt mode */
+		uint32_t ctrl = hw->pdev->msix_enabled ? 2 : 1;
+
+		/* Enable multi-vector mode and mask autoclear
+		 * register */
+		ctrl |= BIT(2) | BIT(5);
+
+		atl_write(hw, ATL_INTR_CTRL, ctrl);
+
+		/* Enable auto-masking of link interrupt on intr generation */
+		atl_set_bits(hw, ATL_INTR_AUTO_MASK, BIT(0));
+		/* Enable status auto-clear on link intr generation */
+		atl_set_bits(hw, ATL_INTR_AUTO_CLEAR, BIT(0));
+	} else
+		/* Enable legacy INTx mode and status clear-on-read */
+		atl_write(hw, ATL_INTR_CTRL, BIT(7));
+
+	/* Map link interrupt to cause 0 */
+	atl_write(hw, ATL_INTR_GEN_INTR_MAP4, BIT(7) | (0 << 0));
+
+	atl_write(hw, ATL_TX_INTR_CTRL, BIT(4));
+	atl_write(hw, ATL_RX_INTR_CTRL, 2 << 4 | BIT(3));
+
+	/* Reset Rx/Tx on unexpected PERST# */
+	atl_write_bit(hw, 0x1000, 29, 0);
+	atl_write(hw, 0x448, 3);
+
+	/* Enable non-ring interrupts */
+	atl_intr_enable(hw, hw->intr_mask | (uint32_t)(nic->fwd.msi_map));
+}
+
+#define atl_vlan_flt_val(vid) ((uint32_t)(vid) | 1 << 16 | 1 << 31)
+
+static void atl_set_all_multi(struct atl_hw *hw, bool all_multi)
+{
+	atl_write_bit(hw, ATL_RX_MC_FLT_MSK, 14, all_multi);
+	atl_write(hw, ATL_RX_MC_FLT(0), all_multi ? 0x80010000 : 0);
+}
+
+void atl_set_rx_mode(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	int uc_count = netdev_uc_count(ndev), mc_count = netdev_mc_count(ndev);
+	int promisc_needed = !!(ndev->flags & IFF_PROMISC);
+	int all_multi_needed = !!(ndev->flags & IFF_ALLMULTI);
+	int i = 1; /* UC filter 0 reserved for MAC address */
+	struct netdev_hw_addr *hwaddr;
+
+	if (uc_count > ATL_UC_FLT_NUM - 1)
+		promisc_needed |= 1;
+	else if (uc_count + mc_count > ATL_UC_FLT_NUM - 1)
+		all_multi_needed |= 1;
+
+
+	/* Enable promisc VLAN mode iff IFF_PROMISC explicitly
+	 * requested or too many VIDs registered
+	 */
+	atl_set_vlan_promisc(hw,
+		ndev->flags & IFF_PROMISC || nic->rxf_vlan.promisc_count);
+
+	atl_write_bit(hw, ATL_RX_FLT_CTRL1, 3, promisc_needed);
+	if (promisc_needed)
+		return;
+
+	netdev_for_each_uc_addr(hwaddr, ndev)
+		atl_set_uc_flt(hw, i++, hwaddr->addr);
+
+	atl_set_all_multi(hw, all_multi_needed);
+
+	if (!all_multi_needed)
+		netdev_for_each_mc_addr(hwaddr, ndev)
+			atl_set_uc_flt(hw, i++, hwaddr->addr);
+
+	while (i < ATL_UC_FLT_NUM)
+		atl_disable_uc_flt(hw, i++);
+}
+
+int atl_alloc_descs(struct atl_nic *nic, struct atl_hw_ring *ring)
+{
+	struct device *dev = &nic->hw.pdev->dev;
+
+	ring->descs = dma_alloc_coherent(dev, ring->size * sizeof(*ring->descs),
+					 &ring->daddr, GFP_KERNEL);
+
+	if (!ring->descs)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void atl_free_descs(struct atl_nic *nic, struct atl_hw_ring *ring)
+{
+	struct device *dev = &nic->hw.pdev->dev;
+
+	if (!ring->descs)
+		return;
+
+	dma_free_coherent(dev, ring->size * sizeof(*ring->descs),
+		ring->descs, ring->daddr);
+	ring->descs = 0;
+}
+
+void atl_set_intr_bits(struct atl_hw *hw, int idx, int rxbit, int txbit)
+{
+	int shift = idx & 1 ? 0 : 8;
+	uint32_t clear_mask = 0;
+	uint32_t set_mask = 0;
+	uint32_t val;
+
+	if (rxbit >= 0) {
+		clear_mask |= BIT(7) | (BIT(5) - 1);
+		if (rxbit < ATL_NUM_MSI_VECS)
+			set_mask |= BIT(7) | rxbit;
+	}
+	if (txbit >= 0) {
+		clear_mask |= (BIT(7) | (BIT(5) - 1)) << 0x10;
+		if (txbit < ATL_NUM_MSI_VECS)
+			set_mask |= (BIT(7) | txbit) << 0x10;
+	}
+
+	val = atl_read(hw, ATL_INTR_RING_INTR_MAP(idx));
+	val &= ~(clear_mask << shift);
+	val |= set_mask << shift;
+	atl_write(hw, ATL_INTR_RING_INTR_MAP(idx), val);
+}
+
+void atl_set_loopback(struct atl_nic *nic, int idx, bool on)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	switch (idx) {
+	case ATL_PF_LPB_SYS_DMA:
+		atl_write_bit(hw, ATL_TX_CTRL1, 6, on);
+		atl_write_bit(hw, ATL_RX_CTRL1, 6, on);
+		break;
+	case ATL_PF_LPB_SYS_PB:
+		atl_write_bit(hw, ATL_TX_CTRL1, 7, on);
+		atl_write_bit(hw, ATL_RX_CTRL1, 8, on);
+		break;
+	/* case ATL_PF_LPB_NET_DMA: */
+	/* 	atl_write_bit(hw, ATL_TX_CTRL1, 4, on); */
+	/* 	atl_write_bit(hw, ATL_RX_CTRL1, 4, on); */
+	/* 	break; */
+	}
+}
+
+void atl_update_ntuple_flt(struct atl_nic *nic, int idx)
+{
+	struct atl_hw *hw = &nic->hw;
+	struct atl_rxf_ntuple *ntuple = &nic->rxf_ntuple;
+	uint32_t cmd = ntuple->cmd[idx];
+	int i, len = 1;
+
+	if (!(cmd & ATL_NTC_EN)) {
+		atl_write(hw, ATL_NTUPLE_CTRL(idx), cmd);
+		return;
+	}
+
+	if (cmd & ATL_NTC_V6)
+		len = 4;
+
+	for (i = idx; i < idx + len; i++) {
+		if (cmd & ATL_NTC_SA)
+			atl_write(hw, ATL_NTUPLE_SADDR(i),
+				swab32(ntuple->src_ip4[i]));
+
+		if (cmd & ATL_NTC_DA)
+			atl_write(hw, ATL_NTUPLE_DADDR(i),
+				swab32(ntuple->dst_ip4[i]));
+	}
+
+	if (cmd & ATL_NTC_SP)
+		atl_write(hw, ATL_NTUPLE_SPORT(idx),
+			swab16(ntuple->src_port[idx]));
+
+	if (cmd & ATL_NTC_DP)
+		atl_write(hw, ATL_NTUPLE_DPORT(idx),
+			swab16(ntuple->dst_port[idx]));
+
+	if (cmd & ATL_NTC_RXQ)
+		cmd |= 1 << ATL_NTC_ACT_SHIFT;
+
+	atl_write(hw, ATL_NTUPLE_CTRL(idx), cmd);
+}
+
+int atl_hwsem_get(struct atl_hw *hw, int idx)
+{
+	uint32_t val;
+
+	busy_wait(10000, udelay(1), val, atl_read(hw, ATL_MCP_SEM(idx)), !val);
+
+	if (!val)
+		return -ETIME;
+
+	return 0;
+}
+
+void atl_hwsem_put(struct atl_hw *hw, int idx)
+{
+	atl_write(hw, ATL_MCP_SEM(idx), 1);
+}
+
+static int atl_msm_wait(struct atl_hw *hw)
+{
+	uint32_t val;
+
+	busy_wait(10, udelay(1), val, atl_read(hw, ATL_MPI_MSM_ADDR),
+		val & BIT(12));
+	if (val & BIT(12))
+		return -ETIME;
+
+	return 0;
+}
+
+int __atl_msm_read(struct atl_hw *hw, uint32_t addr, uint32_t *val)
+{
+	int ret;
+
+	ret = atl_msm_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_write(hw, ATL_MPI_MSM_ADDR, (addr >> 2) | BIT(9));
+	ret = atl_msm_wait(hw);
+	if (ret)
+		return ret;
+
+	*val = atl_read(hw, ATL_MPI_MSM_RD);
+	return 0;
+}
+
+int atl_msm_read(struct atl_hw *hw, uint32_t addr, uint32_t *val)
+{
+	int ret;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MSM);
+	if (ret)
+		return ret;
+
+	ret = __atl_msm_read(hw, addr, val);
+	atl_hwsem_put(hw, ATL_MCP_SEM_MSM);
+
+	return ret;
+}
+
+int __atl_msm_write(struct atl_hw *hw, uint32_t addr, uint32_t val)
+{
+	int ret;
+
+	ret = atl_msm_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_write(hw, ATL_MPI_MSM_WR, val);
+	atl_write(hw, ATL_MPI_MSM_ADDR, addr | BIT(8));
+	ret = atl_msm_wait(hw);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int atl_msm_write(struct atl_hw *hw, uint32_t addr, uint32_t val)
+{
+	int ret;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MSM);
+	if (ret)
+		return ret;
+
+	ret = __atl_msm_write(hw, addr, val);
+	atl_hwsem_put(hw, ATL_MCP_SEM_MSM);
+
+	return ret;
+}
+
+static int atl_mdio_wait(struct atl_hw *hw)
+{
+	uint32_t val;
+
+	busy_wait(20, udelay(1), val, atl_read(hw, ATL_GLOBAL_MDIO_CMD),
+		val & BIT(31));
+	if (val & BIT(31))
+		return -ETIME;
+
+	return 0;
+}
+
+int atl_mdio_hwsem_get(struct atl_hw *hw)
+{
+	int ret;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MDIO);
+	if (ret)
+		return ret;
+
+	/* Enable MDIO Clock (active low) in case MBU have disabled
+	 * it. */
+	atl_write_bit(hw, ATL_GLOBAL_MDIO_CTL, 14, 0);
+	return 0;
+}
+
+void atl_mdio_hwsem_put(struct atl_hw *hw)
+{
+	/* It's ok to leave MDIO Clock running according to FW
+	 * guys. In fact that's what FW does. */
+	atl_hwsem_put(hw, ATL_MCP_SEM_MDIO);
+}
+
+static void atl_mdio_set_addr(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr)
+{
+	/* Set address */
+	atl_write(hw, ATL_GLOBAL_MDIO_ADDR, addr & (BIT(16) - 1));
+	/* Address operation | execute | prtad + mmd */
+	atl_write(hw, ATL_GLOBAL_MDIO_CMD, BIT(15) | 3 << 12 |
+		prtad << 5 | mmd);
+}
+
+int __atl_mdio_read(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t *val)
+{
+	int ret;
+
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_mdio_set_addr(hw, prtad, mmd, addr);
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	/* Read operation | execute | prtad + mmd */
+	atl_write(hw, ATL_GLOBAL_MDIO_CMD, BIT(15) | 1 << 12 |
+		prtad << 5 | mmd);
+
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	*val = atl_read(hw, ATL_GLOBAL_MDIO_RDATA);
+	return 0;
+}
+
+int atl_mdio_read(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t *val)
+{
+	int ret;
+
+	ret = atl_mdio_hwsem_get(hw);
+	if (ret)
+		return ret;
+
+	ret = __atl_mdio_read(hw, prtad, mmd, addr, val);
+	atl_mdio_hwsem_put(hw);
+
+	return ret;
+}
+
+int __atl_mdio_write(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t val)
+{
+	int ret;
+
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_mdio_set_addr(hw, prtad, mmd, addr);
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_write(hw, ATL_GLOBAL_MDIO_WDATA, val);
+	/* Write operation | execute | prtad + mmd */
+	atl_write(hw, ATL_GLOBAL_MDIO_CMD, BIT(15) | 2 << 12 |
+		prtad << 5 | mmd);
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int atl_mdio_write(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t val)
+{
+	int ret;
+
+	ret = atl_mdio_hwsem_get(hw);
+	if (ret)
+		return ret;
+
+	ret = __atl_mdio_write(hw, prtad, mmd, addr, val);
+	atl_mdio_hwsem_put(hw);
+
+	return 0;
+}
+
+#define __READ_MSM_OR_GOTO(RET, HW, REGISTER, PVARIABLE, label) \
+	RET = __atl_msm_read(HW, REGISTER, PVARIABLE); \
+	if (RET)							\
+		goto label;
+
+void atl_adjust_eth_stats(struct atl_ether_stats *stats,
+	struct atl_ether_stats *base, bool add)
+{
+	int i;
+	uint64_t *_stats = (uint64_t *)stats;
+	uint64_t *_base = (uint64_t *)base;
+
+	for (i = 0; i < sizeof(*stats) / sizeof(uint64_t); i++)
+		_stats[i] += add ? _base[i] : - _base[i];
+}
+
+int atl_update_eth_stats(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	struct atl_ether_stats stats = {0};
+	uint32_t reg = 0, reg2 = 0;
+	int ret;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MSM);
+	if (ret)
+		return ret;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_TX_PAUSE, &reg, hwsem_put);
+	stats.tx_pause = reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_PAUSE, &reg, hwsem_put);
+	stats.rx_pause = reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_OCTETS_LO, &reg, hwsem_put);
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_OCTETS_HI, &reg2, hwsem_put);
+	stats.rx_ether_octets = ((uint64_t)reg2 << 32) | reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_PKTS_GOOD, &reg, hwsem_put);
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_ERRS, &reg2, hwsem_put);
+	stats.rx_ether_pkts = reg + reg2;;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_BROADCAST, &reg, hwsem_put);
+	stats.rx_ether_broacasts = reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_MULTICAST, &reg, hwsem_put);
+	stats.rx_ether_multicasts = reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_FCS_ERRS, &reg, hwsem_put);
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_ALIGN_ERRS, &reg2, hwsem_put);
+	stats.rx_ether_crc_align_errs = reg + reg2;
+
+	stats.rx_ether_drops = atl_read(hw, ATL_RX_DMA_STATS_CNT7);
+
+	/* capture debug counters*/
+	atl_write_bit(hw, ATL_RX_RPF_DBG_CNT_CTRL, 0x1f, 1);
+
+	reg = atl_read(hw, ATL_RX_RPF_HOST_CNT_LO);
+	reg2 = atl_read(hw, ATL_RX_RPF_HOST_CNT_HI);
+	stats.rx_filter_host = ((uint64_t)reg2 << 32) | reg;
+
+	reg = atl_read(hw, ATL_RX_RPF_LOST_CNT_LO);
+	reg2 = atl_read(hw, ATL_RX_RPF_LOST_CNT_HI);
+	stats.rx_filter_lost = ((uint64_t)reg2 << 32) | reg;
+
+	spin_lock(&nic->stats_lock);
+
+	atl_adjust_eth_stats(&stats, &nic->stats.eth_base, false);
+	nic->stats.eth = stats;
+
+	spin_unlock(&nic->stats_lock);
+
+	ret = 0;
+
+hwsem_put:
+	atl_hwsem_put(hw, ATL_MCP_SEM_MSM);
+	return ret;
+}
+#undef __READ_MSM_OR_GOTO
+
+int atl_get_lpi_timer(struct atl_nic *nic, uint32_t *lpi_delay)
+{
+	struct atl_hw *hw = &nic->hw;
+	uint32_t lpi;
+	int ret = 0;
+
+
+	ret = atl_msm_read(hw, ATL_MSM_TX_LPI_DELAY, &lpi);
+	if (ret)
+		return ret;
+	*lpi_delay = ATL_HW_CLOCK_TO_US(lpi);
+
+	return ret;
+}
+
+static uint32_t atl_mcp_mbox_wait(struct atl_hw *hw, int loops)
+{
+	uint32_t stat;
+
+	busy_wait(loops, cpu_relax(), stat,
+		(atl_read(hw, ATL_MCP_SCRATCH(FW2_MBOX_CMD)) >> 28) & 0xf,
+		stat == 8);
+
+	return stat;
+}
+
+int atl_write_mcp_mem(struct atl_hw *hw, uint32_t offt, void *host_addr,
+	size_t size)
+{
+	uint32_t *addr = (uint32_t *)host_addr;
+
+	while (size) {
+		uint32_t stat;
+
+		atl_write(hw, ATL_MCP_SCRATCH(FW2_MBOX_DATA), *addr++);
+		atl_write(hw, ATL_MCP_SCRATCH(FW2_MBOX_CMD), BIT(31) | offt);
+		ndelay(750);
+		stat = atl_mcp_mbox_wait(hw, 5);
+
+		if (stat == 8) {
+			/* Send MCP mbox interrupt */
+			atl_set_bits(hw, ATL_GLOBAL_CTRL2, BIT(1));
+			ndelay(1200);
+			stat = atl_mcp_mbox_wait(hw, 10000);
+		}
+
+		if (stat == 8) {
+			atl_dev_err("FW mbox timeout offt %x, remaining %lx\n",
+				offt, size);
+			return -ETIME;
+		} else if (stat != 4) {
+			atl_dev_err("FW mbox error status %x, offt %x, remaining %lx\n",
+				stat, offt, size);
+			return -EIO;
+		}
+
+		offt += 4;
+		size -= 4;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h
new file mode 100644
index 0000000..5acc58a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h
@@ -0,0 +1,185 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_HW_H_
+#define _ATL_HW_H_
+#include <linux/pci.h>
+#include <linux/if_ether.h>
+
+#include "atl_regs.h"
+#include "atl_fw.h"
+
+#define PCI_VENDOR_ID_AQUANTIA 0x1d6a
+
+/* clock is 3.2 ns*/
+#define ATL_HW_CLOCK_TO_US(clk)  (clk * 32 / 10000)
+
+#define busy_wait(tries, wait, lvalue, fetch, cond)	\
+({							\
+	uint32_t _dummy = 0;				\
+	int i = (tries);				\
+	int orig = i;					\
+	(void)_dummy;					\
+	do {						\
+		wait;					\
+		(lvalue) = (fetch);			\
+	} while ((cond) && --i);			\
+	(orig - i);					\
+})
+
+enum atl_board {
+	ATL_UNKNOWN,
+	ATL_AQC107,
+	ATL_AQC108,
+	ATL_AQC109,
+	ATL_AQC100,
+};
+
+struct atl_hw {
+	uint8_t __iomem *regs;
+	struct pci_dev *pdev;
+	struct atl_link_state link_state;
+	struct {
+		uint32_t fw_rev;
+		bool poll_link;
+		struct atl_fw_ops *ops;
+		uint32_t fw_stat_addr;
+		struct mutex lock;
+	} mcp;
+	uint32_t intr_mask;
+	uint8_t mac_addr[ETH_ALEN];
+#define ATL_RSS_KEY_SIZE 40
+	uint8_t rss_key[ATL_RSS_KEY_SIZE];
+#define ATL_RSS_TBL_SIZE (1 << 6)
+	uint8_t rss_tbl[ATL_RSS_TBL_SIZE];
+};
+
+union atl_desc;
+struct atl_hw_ring {
+	union atl_desc *descs;
+	uint32_t size;
+	uint32_t reg_base;
+	dma_addr_t daddr;
+};
+
+#define offset_ptr(ptr, ring, amount)					\
+	({								\
+		uint32_t size = ((struct atl_hw_ring *)(ring))->size;	\
+									\
+		uint32_t res = (ptr) + (amount);			\
+		if ((int32_t)res < 0)					\
+			res += size;					\
+		else if (res >= size)					\
+			res -= size;					\
+		res;							\
+	})
+
+void atl_check_unplug(struct atl_hw *hw, uint32_t addr);
+
+static inline uint32_t atl_read(struct atl_hw *hw, uint32_t addr)
+{
+	uint8_t __iomem *base = READ_ONCE(hw->regs);
+	uint32_t val = 0xffffffff;
+
+	if (unlikely(!base))
+		return val;
+
+	val = readl(base + addr);
+	if (unlikely(val == 0xffffffff))
+		atl_check_unplug(hw, addr);
+	return val;
+}
+
+static inline void atl_write(struct atl_hw *hw, uint32_t addr, uint32_t val)
+{
+	uint8_t __iomem *base = READ_ONCE(hw->regs);
+
+	if (unlikely(!base))
+		return;
+
+	writel(val, base + addr);
+}
+
+static inline void atl_write_bits(struct atl_hw *hw, uint32_t addr,
+			     uint32_t shift, uint32_t width, uint32_t val)
+{
+	uint32_t mask = ((1u << width) - 1) << shift;
+
+	atl_write(hw, addr,
+		  (atl_read(hw, addr) & ~mask) | ((val << shift) & mask));
+}
+
+static inline void atl_write_bit(struct atl_hw *hw, uint32_t addr,
+			    uint32_t shift, uint32_t val)
+{
+	atl_write_bits(hw, addr, shift, 1, val);
+}
+
+static inline void atl_set_bits(struct atl_hw *hw, uint32_t addr,
+	uint32_t bits)
+{
+	atl_write(hw, addr, atl_read(hw, addr) | bits);
+}
+
+static inline void atl_clear_bits(struct atl_hw *hw, uint32_t addr,
+	uint32_t bits)
+{
+	atl_write(hw, addr, atl_read(hw, addr) & ~bits);
+}
+
+static inline void atl_intr_enable(struct atl_hw *hw, uint32_t mask)
+{
+	atl_write(hw, ATL_INTR_MSK_SET, mask);
+}
+
+static inline void atl_intr_disable(struct atl_hw *hw, uint32_t mask)
+{
+	atl_write(hw, ATL_INTR_MSK_CLEAR, mask);
+}
+
+static inline void atl_intr_disable_all(struct atl_hw *hw)
+{
+	atl_intr_disable(hw, 0xffffffff);
+}
+
+static inline unsigned atl_fw_major(struct atl_hw *hw)
+{
+	return (hw->mcp.fw_rev >> 24) & 0xff;
+}
+
+static inline void atl_init_rss_table(struct atl_hw *hw, int nvecs)
+{
+	int i;
+
+	for (i = 0; i < ATL_RSS_TBL_SIZE; i++)
+		hw->rss_tbl[i] = i % nvecs;
+}
+
+static inline void atl_set_vlan_promisc(struct atl_hw *hw, int promisc)
+{
+	atl_write_bit(hw, ATL_RX_VLAN_FLT_CTRL1, 1, !!promisc);
+}
+
+int atl_read_mcp_mem(struct atl_hw *hw, uint32_t mcp_addr, void *host_addr,
+	unsigned size);
+int atl_hwinit(struct atl_nic *nic, enum atl_board brd_id);
+void atl_refresh_link(struct atl_nic *nic);
+void atl_set_rss_key(struct atl_hw *hw);
+void atl_set_rss_tbl(struct atl_hw *hw);
+void atl_set_uc_flt(struct atl_hw *hw, int idx, uint8_t mac_addr[ETH_ALEN]);
+
+int atl_alloc_descs(struct atl_nic *nic, struct atl_hw_ring *ring);
+void atl_free_descs(struct atl_nic *nic, struct atl_hw_ring *ring);
+void atl_set_intr_bits(struct atl_hw *hw, int idx, int rxbit, int txbit);
+int atl_alloc_link_intr(struct atl_nic *nic);
+void atl_free_link_intr(struct atl_nic *nic);
+int atl_write_mcp_mem(struct atl_hw *hw, uint32_t offt, void *addr,
+	size_t size);
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hwmon.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hwmon.c
new file mode 100644
index 0000000..21e3ea0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hwmon.c
@@ -0,0 +1,84 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2018 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include "atl_common.h"
+#include <linux/hwmon.h>
+
+static char *atl_hwmon_labels[] = {
+	"PHY Temperature",
+};
+
+static const uint32_t atl_hwmon_temp_config[] = {
+	HWMON_T_INPUT | HWMON_T_LABEL,
+	0,
+};
+
+static const struct hwmon_channel_info atl_hwmon_temp = {
+	.type = hwmon_temp,
+	.config = atl_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *atl_hwmon_info[] = {
+	&atl_hwmon_temp,
+	NULL,
+};
+
+static umode_t atl_hwmon_is_visible(const void *p,
+	enum hwmon_sensor_types type, uint32_t attr, int channel)
+{
+	return type == hwmon_temp ? S_IRUGO : 0;
+}
+
+static int atl_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+	uint32_t attr, int channel, long *val)
+{
+	struct atl_hw *hw = dev_get_drvdata(dev);
+	int temp, ret;
+
+	if (type != hwmon_temp || attr != hwmon_temp_input)
+		return -EINVAL;
+
+	ret = hw->mcp.ops->get_phy_temperature(hw, &temp);
+	if (ret)
+		return ret;
+
+	*val = temp;
+	return 0;
+}
+
+static int atl_hwmon_read_string(struct device *dev,
+	enum hwmon_sensor_types type, u32 attr, int channel, const char **str)
+{
+	if (type != hwmon_temp || attr != hwmon_temp_label)
+		return -EINVAL;
+
+	*str = atl_hwmon_labels[channel];
+	return 0;
+}
+
+static const struct hwmon_ops atl_hwmon_ops = {
+	.is_visible = atl_hwmon_is_visible,
+	.read = atl_hwmon_read,
+	.read_string = atl_hwmon_read_string,
+};
+
+static const struct hwmon_chip_info atl_hwmon = {
+	.ops = &atl_hwmon_ops,
+	.info = atl_hwmon_info,
+};
+
+int atl_hwmon_init(struct atl_nic *nic)
+{
+	struct device *hwmon_dev;
+
+	hwmon_dev = devm_hwmon_device_register_with_info(&nic->hw.pdev->dev,
+		nic->ndev->name, &nic->hw, &atl_hwmon, NULL);
+
+	return PTR_ERR_OR_ZERO(hwmon_dev);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c
new file mode 100644
index 0000000..4f76d9b
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c
@@ -0,0 +1,612 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include "atl_common.h"
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+
+const char atl_driver_name[] = "atlantic-fwd";
+
+int atl_max_queues = ATL_MAX_QUEUES;
+module_param_named(max_queues, atl_max_queues, uint, 0444);
+
+static unsigned int atl_rx_mod = 15, atl_tx_mod = 15;
+module_param_named(rx_mod, atl_rx_mod, uint, 0444);
+module_param_named(tx_mod, atl_tx_mod, uint, 0444);
+
+static unsigned int atl_keep_link = 0;
+module_param_named(keep_link, atl_keep_link, uint, 0644);
+
+static void atl_link_up(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	if (hw->mcp.poll_link)
+		mod_timer(&nic->link_timer, jiffies + HZ);
+
+	hw->link_state.force_off = 0;
+	hw->mcp.ops->set_link(hw, true);
+}
+
+static int atl_do_open(struct atl_nic *nic)
+{
+	int ret;
+
+	ret = atl_start_rings(nic);
+	if (ret)
+		return ret;
+
+	if (!atl_keep_link)
+		atl_link_up(nic);
+
+	return 0;
+}
+
+static int atl_open(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	int ret;
+
+	if (!test_bit(ATL_ST_CONFIGURED, &nic->state)) {
+		/* A previous atl_reconfigure() had failed. Try once more. */
+		ret = atl_setup_datapath(nic);
+		if (ret)
+			return ret;
+	}
+
+	ret = atl_alloc_rings(nic);
+	if (ret)
+		return ret;
+
+	ret = netif_set_real_num_tx_queues(ndev, nic->nvecs);
+	if (ret)
+		goto free_rings;
+	ret = netif_set_real_num_rx_queues(ndev, nic->nvecs);
+	if (ret)
+		goto free_rings;
+
+	ret = atl_do_open(nic);
+	if (ret)
+		goto free_rings;
+
+	netif_tx_start_all_queues(ndev);
+
+	set_bit(ATL_ST_UP, &nic->state);
+	return 0;
+
+free_rings:
+	atl_free_rings(nic);
+	return ret;
+}
+
+static void atl_link_down(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	del_timer_sync(&nic->link_timer);
+	hw->link_state.force_off = 1;
+	hw->mcp.ops->set_link(hw, true);
+	hw->link_state.link = 0;
+	netif_carrier_off(nic->ndev);
+}
+
+static void atl_do_close(struct atl_nic *nic)
+{
+	if (!atl_keep_link)
+		atl_link_down(nic);
+
+	atl_stop_rings(nic);
+}
+
+static int atl_close(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	/* atl_close() can be called a second time if
+	 * atl_reconfigure() fails. Just return
+	 */
+	if (!test_and_clear_bit(ATL_ST_UP, &nic->state))
+		return 0;
+
+	netif_tx_stop_all_queues(ndev);
+
+	atl_do_close(nic);
+	atl_free_rings(nic);
+
+	return 0;
+}
+
+#ifndef ATL_HAVE_MINMAX_MTU
+
+static int atl_change_mtu(struct net_device *ndev, int mtu)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	if (mtu < 64 || mtu > nic->max_mtu)
+		return -EINVAL;
+
+	ndev->mtu = mtu;
+	return 0;
+}
+
+#endif
+
+static int atl_set_mac_address(struct net_device *ndev, void *priv)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct sockaddr *addr = priv;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	ether_addr_copy(hw->mac_addr, addr->sa_data);
+	ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+	if (netif_running(ndev))
+		atl_set_uc_flt(hw, 0, hw->mac_addr);
+
+	return 0;
+}
+
+static const struct net_device_ops atl_ndev_ops = {
+	.ndo_open = atl_open,
+	.ndo_stop = atl_close,
+	.ndo_start_xmit = atl_start_xmit,
+	.ndo_vlan_rx_add_vid = atl_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid = atl_vlan_rx_kill_vid,
+	.ndo_set_rx_mode = atl_set_rx_mode,
+#ifndef ATL_HAVE_MINMAX_MTU
+	.ndo_change_mtu = atl_change_mtu,
+#endif
+	.ndo_set_features = atl_set_features,
+	.ndo_set_mac_address = atl_set_mac_address,
+#ifdef ATL_COMPAT_CAST_NDO_GET_STATS64
+	.ndo_get_stats64 = (void *)atl_get_stats64,
+#else
+	.ndo_get_stats64 = atl_get_stats64,
+#endif
+};
+
+/* RTNL lock must be held */
+int atl_reconfigure(struct atl_nic *nic)
+{
+	struct net_device *ndev = nic->ndev;
+	int was_up = netif_running(ndev);
+	int ret = 0;
+
+	if (was_up)
+		atl_close(ndev);
+
+	atl_clear_datapath(nic);
+
+	ret = atl_setup_datapath(nic);
+	if (ret)
+		goto err;
+
+	/* Number of rings might have changed, re-init RSS
+	 * redirection table.
+	 */
+	atl_init_rss_table(&nic->hw, nic->nvecs);
+
+	if (was_up) {
+		ret = atl_open(ndev);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	if (was_up)
+		dev_close(ndev);
+	return ret;
+}
+
+static struct workqueue_struct *atl_wq;
+
+void atl_schedule_work(struct atl_nic *nic)
+{
+	if (!test_and_set_bit(ATL_ST_WORK_SCHED, &nic->state))
+		queue_work(atl_wq, &nic->work);
+}
+
+static void atl_work(struct work_struct *work)
+{
+	struct atl_nic *nic = container_of(work, struct atl_nic, work);
+
+	atl_refresh_link(nic);
+	clear_bit(ATL_ST_WORK_SCHED, &nic->state);
+}
+
+static void atl_link_timer(struct timer_list *timer)
+{
+	struct atl_nic *nic =
+		container_of(timer, struct atl_nic, link_timer);
+
+	atl_schedule_work(nic);
+	mod_timer(&nic->link_timer, jiffies + HZ);
+}
+
+static const struct pci_device_id atl_pci_tbl[] = {
+	{ PCI_VDEVICE(AQUANTIA, 0x0001), ATL_UNKNOWN},
+	{ PCI_VDEVICE(AQUANTIA, 0xd107), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0x07b1), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0x87b1), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0xd108), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x08b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x88b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0xd109), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0x09b1), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0x89b1), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0xd100), ATL_AQC100},
+	{ PCI_VDEVICE(AQUANTIA, 0x00b1), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0x80b1), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0x11b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x91b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x51b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x12b1), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0x92b1), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0x52b1), ATL_AQC109},
+	{}
+};
+
+static uint8_t atl_def_rss_key[ATL_RSS_KEY_SIZE] = {
+	0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
+	0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
+	0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
+	0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
+	0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
+};
+
+static void atl_setup_rss(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	memcpy(hw->rss_key, atl_def_rss_key, sizeof(hw->rss_key));
+
+	atl_init_rss_table(hw, nic->nvecs);
+}
+
+static int atl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int ret, pci_64 = 0;
+	struct net_device *ndev;
+	struct atl_nic *nic = NULL;
+	struct atl_hw *hw;
+	int disable_needed;
+
+	if (atl_max_queues < 1 || atl_max_queues > ATL_MAX_QUEUES) {
+		dev_err(&pdev->dev, "Bad atl_max_queues value %d, must be between 1 and %d inclusive\n",
+			 atl_max_queues, ATL_MAX_QUEUES);
+		return -EINVAL;
+	}
+
+	ret = pci_enable_device_mem(pdev);
+	if (ret)
+		return ret;
+
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+		pci_64 = 1;
+	else {
+		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (ret) {
+			dev_err(&pdev->dev, "Set DMA mask failed: %d\n", ret);
+			goto err_dma;
+		}
+	}
+
+	ret = pci_request_mem_regions(pdev, atl_driver_name);
+	if (ret) {
+		dev_err(&pdev->dev, "Request PCI regions failed: %d\n", ret);
+		goto err_pci_reg;
+	}
+
+	pci_set_master(pdev);
+
+	ndev = alloc_etherdev_mq(sizeof(struct atl_nic), atl_max_queues);
+	if (!ndev) {
+		ret = -ENOMEM;
+		goto err_alloc_ndev;
+	}
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	nic = netdev_priv(ndev);
+	nic->ndev = ndev;
+	nic->hw.pdev = pdev;
+	spin_lock_init(&nic->stats_lock);
+	INIT_WORK(&nic->work, atl_work);
+	mutex_init(&nic->hw.mcp.lock);
+	__set_bit(ATL_ST_ENABLED, &nic->state);
+
+	hw = &nic->hw;
+	hw->regs = ioremap(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
+	if (!hw->regs) {
+		ret = -EIO;
+		goto err_ioremap;
+	}
+
+	ret = atl_hwinit(nic, id->driver_data);
+	if (ret)
+		goto err_hwinit;
+
+	eth_platform_get_mac_address(&hw->pdev->dev, hw->mac_addr);
+	if (!is_valid_ether_addr(hw->mac_addr)) {
+		atl_dev_err("invalid MAC address: %*phC\n", ETH_ALEN,
+			    hw->mac_addr);
+		/* XXX Workaround for bad MAC addr in efuse. Maybe
+		 * switch to some predefined one later.
+		 */
+		eth_random_addr(hw->mac_addr);
+		/* ret = -EIO; */
+		/* goto err_hwinit; */
+	}
+
+	ether_addr_copy(ndev->dev_addr, hw->mac_addr);
+	atl_dev_dbg("got MAC address: %pM\n", hw->mac_addr);
+
+	nic->requested_nvecs = atl_max_queues;
+	nic->requested_tx_size = ATL_RING_SIZE;
+	nic->requested_rx_size = ATL_RING_SIZE;
+	nic->rx_intr_delay = atl_rx_mod;
+	nic->tx_intr_delay = atl_tx_mod;
+
+	ret = atl_setup_datapath(nic);
+	if (ret)
+		goto err_datapath;
+
+	atl_setup_rss(nic);
+
+	ndev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+		NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_RXHASH | NETIF_F_LRO;
+
+	ndev->vlan_features |= ndev->features;
+	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+		NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	ndev->hw_features |= ndev->features | NETIF_F_RXALL;
+
+	if (pci_64)
+		ndev->features |= NETIF_F_HIGHDMA;
+
+	ndev->features |= NETIF_F_NTUPLE;
+
+	ndev->priv_flags |= IFF_UNICAST_FLT;
+
+	timer_setup(&nic->link_timer, &atl_link_timer, 0);
+
+	hw->mcp.ops->set_default_link(hw);
+	hw->link_state.force_off = 1;
+	hw->intr_mask = BIT(ATL_NUM_NON_RING_IRQS) - 1;
+	ndev->netdev_ops = &atl_ndev_ops;
+	ndev->mtu = 1500;
+#ifdef ATL_HAVE_MINMAX_MTU
+	ndev->max_mtu = nic->max_mtu;
+#endif
+	ndev->ethtool_ops = &atl_ethtool_ops;
+	ret = register_netdev(ndev);
+	if (ret)
+		goto err_register;
+
+	pci_set_drvdata(pdev, nic);
+	netif_carrier_off(ndev);
+
+	ret = atl_alloc_link_intr(nic);
+	if (ret)
+		goto err_link_intr;
+
+	ret = atl_hwmon_init(nic);
+	if (ret)
+		goto err_hwmon_init;
+
+	atl_start_hw_global(nic);
+	if (atl_keep_link)
+		atl_link_up(nic);
+
+	return 0;
+
+err_hwmon_init:
+	atl_free_link_intr(nic);
+err_link_intr:
+	unregister_netdev(nic->ndev);
+err_register:
+	atl_clear_datapath(nic);
+err_datapath:
+err_hwinit:
+	iounmap(hw->regs);
+err_ioremap:
+	disable_needed = test_and_clear_bit(ATL_ST_ENABLED, &nic->state);
+	free_netdev(ndev);
+err_alloc_ndev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	if (!nic || disable_needed)
+		pci_disable_device(pdev);
+	return ret;
+}
+
+static void atl_remove(struct pci_dev *pdev)
+{
+	int disable_needed;
+	struct atl_nic *nic = pci_get_drvdata(pdev);
+
+	if (!nic)
+		return;
+
+	netif_carrier_off(nic->ndev);
+	atl_intr_disable_all(&nic->hw);
+	/* atl_hw_reset(&nic->hw); */
+	atl_free_link_intr(nic);
+	unregister_netdev(nic->ndev);
+	atl_fwd_release_rings(nic);
+	atl_clear_datapath(nic);
+	iounmap(nic->hw.regs);
+	disable_needed = test_and_clear_bit(ATL_ST_ENABLED, &nic->state);
+	cancel_work_sync(&nic->work);
+	free_netdev(nic->ndev);
+	pci_release_regions(pdev);
+	if (disable_needed)
+		pci_disable_device(pdev);
+}
+
+static int atl_suspend_common(struct device *dev, bool deep)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct atl_nic *nic = pci_get_drvdata(pdev);
+	struct atl_hw *hw = &nic->hw;
+	int ret;
+
+	rtnl_lock();
+	netif_device_detach(nic->ndev);
+
+	if (netif_running(nic->ndev))
+		atl_do_close(nic);
+
+	if (deep && atl_keep_link)
+		atl_link_down(nic);
+
+	if (deep && nic->flags & ATL_FL_WOL) {
+		ret = hw->mcp.ops->enable_wol(hw);
+		if (ret)
+			atl_dev_err("Enable WoL failed: %d\n", -ret);
+	}
+
+	pci_disable_device(pdev);
+	__clear_bit(ATL_ST_ENABLED, &nic->state);
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+static int atl_suspend_poweroff(struct device *dev)
+{
+	return atl_suspend_common(dev, true);
+}
+
+static int atl_freeze(struct device *dev)
+{
+	return atl_suspend_common(dev, false);
+}
+
+static int atl_resume_common(struct device *dev, bool deep)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct atl_nic *nic = pci_get_drvdata(pdev);
+	int ret;
+
+	rtnl_lock();
+
+	ret = pci_enable_device_mem(pdev);
+	if (ret)
+		goto exit;
+
+	pci_set_master(pdev);
+	__set_bit(ATL_ST_ENABLED, &nic->state);
+
+	if (deep) {
+		ret = atl_hw_reset(&nic->hw);
+		if (ret)
+			goto exit;
+
+		atl_start_hw_global(nic);
+	}
+
+	if (netif_running(nic->ndev))
+		ret = atl_do_open(nic);
+
+	if (deep && atl_keep_link)
+		atl_link_up(nic);
+
+	if (ret)
+		goto exit;
+
+	netif_device_attach(nic->ndev);
+
+exit:
+	rtnl_unlock();
+
+	return ret;
+}
+
+static int atl_resume_restore(struct device *dev)
+{
+	return atl_resume_common(dev, true);
+}
+
+static int atl_thaw(struct device *dev)
+{
+	return atl_resume_common(dev, false);
+}
+
+static void atl_shutdown(struct pci_dev *pdev)
+{
+	atl_suspend_common(&pdev->dev, true);
+}
+
+const struct dev_pm_ops atl_pm_ops = {
+	.suspend = atl_suspend_poweroff,
+	.poweroff = atl_suspend_poweroff,
+	.freeze = atl_freeze,
+	.resume = atl_resume_restore,
+	.restore = atl_resume_restore,
+	.thaw = atl_thaw,
+};
+
+static struct pci_driver atl_pci_ops = {
+	.name = atl_driver_name,
+	.id_table = atl_pci_tbl,
+	.probe = atl_probe,
+	.remove = atl_remove,
+	.shutdown = atl_shutdown,
+#ifdef CONFIG_PM
+	.driver.pm = &atl_pm_ops,
+#endif
+};
+
+static int __init atl_module_init(void)
+{
+	int ret;
+
+	atl_wq = create_singlethread_workqueue(atl_driver_name);
+	if (!atl_wq) {
+		pr_err("%s: Couldn't create workqueue\n", atl_driver_name);
+		return -ENOMEM;
+	}
+
+	ret = pci_register_driver(&atl_pci_ops);
+	if (ret) {
+		destroy_workqueue(atl_wq);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(atl_module_init);
+
+static void __exit atl_module_exit(void)
+{
+	pci_unregister_driver(&atl_pci_ops);
+
+	if (atl_wq) {
+		destroy_workqueue(atl_wq);
+		atl_wq = NULL;
+	}
+}
+module_exit(atl_module_exit);
+
+MODULE_DEVICE_TABLE(pci, atl_pci_tbl);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(ATL_VERSION);
+MODULE_AUTHOR("Aquantia Corp.");
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h
new file mode 100644
index 0000000..aefb5d5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h
@@ -0,0 +1,182 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_REGS_H_
+#define _ATL_REGS_H_
+
+#define ATL_REG_STRIDE(base, stride, idx) ((base) + (stride) * (idx))
+
+/* Ring registers common for Rx and Tx */
+#define ATL_RING_OFFT(ring, offt)				\
+	(((struct atl_hw_ring *)(ring))->reg_base + (offt))
+#define ATL_RING_BASE_LSW(ring) ATL_RING_OFFT(ring, 0)
+#define ATL_RING_BASE_MSW(ring) ATL_RING_OFFT(ring, 4)
+#define ATL_RING_CTL(ring) ATL_RING_OFFT(ring, 8)
+#define ATL_RING_HEAD(ring) ATL_RING_OFFT(ring, 0xc)
+#define ATL_RING_TAIL(ring) ATL_RING_OFFT(ring, 0x10)
+#define ATL_RING_STS(ring) ATL_RING_OFFT(ring, 0x14)
+
+/* MIF @ 0x0000*/
+#define ATL_GLOBAL_STD_CTRL 0
+#define ATL_GLOBAL_FW_ID 0xc
+#define ATL_GLOBAL_CHIP_ID 0x10
+#define ATL_GLOBAL_CHIP_REV 0x14
+#define ATL_GLOBAL_FW_IMAGE_ID 0x18
+#define ATL_GLOBAL_MIF_ID 0x1c
+#define ATL_GLOBAL_MBOX_CTRL 0x200
+#define ATL_GLOBAL_MBOX_CRC 0x204
+#define ATL_GLOBAL_MBOX_ADDR 0x208
+#define ATL_GLOBAL_MBOX_DATA 0x20c
+#define ATL_GLOBAL_MDIO_CTL 0x280
+#define ATL_GLOBAL_MDIO_CMD 0x284
+#define ATL_GLOBAL_MDIO_WDATA 0x288
+#define ATL_GLOBAL_MDIO_ADDR 0x28c
+#define ATL_GLOBAL_MDIO_RDATA 0x290
+/* Scratch pads numbered starting from 1 */
+#define ATL_MCP_SCRATCH(idx) ATL_REG_STRIDE(0x300 - 0x4, 0x4, idx)
+#define ATL_MCP_SEM(idx) ATL_REG_STRIDE(0x3a0, 0x4, idx)
+#define ATL_MCP_SEM_MDIO 0
+#define ATL_MCP_SEM_MSM 1
+#define ATL_GLOBAL_CTRL2 0x404
+#define ATL_GLOBAL_DAISY_CHAIN_STS1 0x704
+
+enum mcp_scratchpad {
+	FW2_MBOX_DATA = 11,	/* 0x328 */
+	FW2_MBOX_CMD = 12,	/* 0x32c */
+	FW_STAT_STRUCT = 25, 	/* 0x360 */
+	FW2_EFUSE_SHADOW = 26,	/* 0x364 */
+	FW1_LINK_REQ = 27,
+	FW2_LINK_REQ_LOW = 27,	/* 0x368 */
+	FW1_LINK_STS = 28,
+	FW2_LINK_REQ_HIGH = 28,	/* 0x36c */
+	FW2_LINK_RES_LOW = 29,	/* 0x370 */
+	FW1_EFUSE_SHADOW = 30,
+	FW2_LINK_RES_HIGH = 30,	/* 0x374 */
+	RBL_STS = 35,		/* 0x388 */
+};
+
+/* INTR @ 0x2000 */
+#define ATL_INTR_STS 0x2000
+#define ATL_INTR_MSK 0x2010
+#define ATL_INTR_MSK_SET 0x2060
+#define ATL_INTR_MSK_CLEAR 0x2070
+#define ATL_INTR_AUTO_CLEAR 0x2080
+#define ATL_INTR_AUTO_MASK 0x2090
+#define ATL_INTR_RING_INTR_MAP(idx) ATL_REG_STRIDE(0x2100, 0x4, (idx) >> 1)
+#define ATL_INTR_GEN_INTR_MAP4 0x218c
+#define ATL_INTR_RSC_EN 0x2200
+#define ATL_INTR_RSC_DELAY 0x2204
+#define ATL_INTR_CTRL 0x2300
+#define ATL_INTR_THRTL(idx) ATL_REG_STRIDE(0x2800, 4, idx)
+
+/* MPI @ 0x4000 */
+#define ATL_MPI_CTRL1 0x4000
+#define ATL_MPI_MSM_ADDR 0x4400
+#define ATL_MPI_MSM_WR 0x4404
+#define ATL_MPI_MSM_RD 0x4408
+
+/* RX @ 0x5000 */
+#define ATL_RX_CTRL1 0x5000
+#define ATL_RX_FLT_CTRL1 0x5100
+#define ATL_RX_FLT_CTRL2 0x5104
+#define ATL_UC_FLT_NUM 37
+#define ATL_RX_UC_FLT_REG1(idx) ATL_REG_STRIDE(0x5110, 8, idx)
+#define ATL_RX_UC_FLT_REG2(idx) ATL_REG_STRIDE(0x5114, 8, idx)
+#define ATL_MC_FLT_NUM 8
+#define ATL_RX_MC_FLT(idx) ATL_REG_STRIDE(0x5250, 4, idx)
+#define ATL_RX_MC_FLT_MSK 0x5270
+#define ATL_RX_VLAN_FLT_CTRL1 0x5280
+#define ATL_VLAN_FLT_NUM 16
+#define ATL_RX_VLAN_FLT(idx) ATL_REG_STRIDE(0x5290, 4, idx)
+#define ATL_RX_ETYPE_FLT(idx) ATL_REG_STRIDE(0x5300, 4, idx)
+#define ATL_ETYPE_FLT_NUM 16
+#define ATL_NTUPLE_CTRL(idx) ATL_REG_STRIDE(0x5380, 4, idx)
+#define ATL_NTUPLE_SADDR(idx) ATL_REG_STRIDE(0x53b0, 4, idx)
+#define ATL_NTUPLE_DADDR(idx) ATL_REG_STRIDE(0x53d0, 4, idx)
+#define ATL_NTUPLE_SPORT(idx) ATL_REG_STRIDE(0x5400, 4, idx)
+#define ATL_NTUPLE_DPORT(idx) ATL_REG_STRIDE(0x5420, 4, idx)
+#define ATL_NTUPLE_FLT_NUM 8
+#define ATL_RX_RSS_CTRL 0x54c0
+#define ATL_RX_RSS_KEY_ADDR 0x54d0
+#define ATL_RX_RSS_KEY_WR_DATA 0x54d4
+#define ATL_RX_RSS_KEY_RD_DATA 0x54d8
+#define ATL_RX_RSS_TBL_ADDR 0x54e0
+#define ATL_RX_RSS_TBL_WR_DATA 0x54e4
+#define ATL_RX_RSS_TBL_RD_DATA 0x54e8
+#define ATL_RX_RPF_DBG_CNT_CTRL 0x5518
+#define ATL_RX_RPF_HOST_CNT_LO 0x552c
+#define ATL_RX_RPF_HOST_CNT_HI 0x5530
+#define ATL_RX_RPF_LOST_CNT_LO 0x554c
+#define ATL_RX_RPF_LOST_CNT_HI 0x5550
+#define ATL_RX_PO_CTRL1 0x5580
+#define ATL_RX_LRO_CTRL1 0x5590
+#define ATL_RX_LRO_CTRL2 0x5594
+#define ATL_RX_LRO_PKT_LIM_EN 0x5598
+#define ATL_RX_LRO_PKT_LIM(idx) ATL_REG_STRIDE(0x55a0, 4, (idx) >> 3)
+#define ATL_RX_LRO_TMRS 0x5620
+#define ATL_RX_PBUF_CTRL1 0x5700
+#define ATL_RX_PBUF_REG1(idx) ATL_REG_STRIDE(0x5710, 0x10, idx)
+#define ATL_RX_PBUF_REG2(idx) ATL_REG_STRIDE(0x5714, 0x10, idx)
+#define ATL_RX_INTR_CTRL 0x5a30
+#define ATL_RX_INTR_MOD_CTRL(idx) ATL_REG_STRIDE(0x5a40, 4, idx)
+
+/* Rx rings */
+#define ATL_RX_RING(idx) ATL_REG_STRIDE(0x5b00, 0x20, idx)
+#define ATL_RX_RING_BASE_LSW(ring) ATL_RING_BASE_LSW(ring)
+#define ATL_RX_RING_BASE_MSW(ring) ATL_RING_BASE_MSW(ring)
+#define ATL_RX_RING_CTL(ring) ATL_RING_CTL(ring)
+#define ATL_RX_RING_HEAD(ring) ATL_RING_HEAD(ring)
+#define ATL_RX_RING_TAIL(ring) ATL_RING_TAIL(ring)
+#define ATL_RX_RING_STS(ring) ATL_RING_STS(ring)
+#define ATL_RX_RING_BUF_SIZE(ring) ATL_RING_OFFT(ring, 0x18)
+#define ATL_RX_RING_THRESH(ring) ATL_RING_OFFT(ring, 0x1c)
+
+#define ATL_RX_DMA_STATS_CNT7 0x6818
+
+/* TX @ 0x7000 */
+#define ATL_TX_CTRL1 0x7000
+#define ATL_TX_PO_CTRL1 0x7800
+#define ATL_TX_LSO_CTRL 0x7810
+#define ATL_TX_LSO_TCP_CTRL1 0x7820
+#define ATL_TX_LSO_TCP_CTRL2 0x7824
+#define ATL_TX_PBUF_CTRL1 0x7900
+#define ATL_TX_PBUF_REG1(idx) ATL_REG_STRIDE(0x7910, 0x10, idx)
+#define ATL_TX_INTR_CTRL 0x7b40
+
+/* Tx rings */
+#define ATL_TX_RING(idx) ATL_REG_STRIDE(0x7c00, 0x40, idx)
+#define ATL_TX_RING_BASE_LSW(ring) ATL_RING_BASE_LSW(ring)
+#define ATL_TX_RING_BASE_MSW(ring) ATL_RING_BASE_MSW(ring)
+#define ATL_TX_RING_CTL(ring) ATL_RING_CTL(ring)
+#define ATL_TX_RING_HEAD(ring) ATL_RING_HEAD(ring)
+#define ATL_TX_RING_TAIL(ring) ATL_RING_TAIL(ring)
+#define ATL_TX_RING_STS(ring) ATL_RING_STS(ring)
+#define ATL_TX_RING_THRESH(ring) ATL_RING_OFFT(ring, 0x18)
+#define ATL_TX_RING_HEAD_WB_LSW(ring) ATL_RING_OFFT(ring, 0x1c)
+#define ATL_TX_RING_HEAD_WB_MSW(ring) ATL_RING_OFFT(ring, 0x20)
+
+#define ATL_TX_INTR_MOD_CTRL(idx) ATL_REG_STRIDE(0x8980, 0x4, idx)
+
+/* MSM */
+#define ATL_MSM_GEN_CTRL 0x8
+#define ATL_MSM_GEN_STS 0x40
+#define ATL_MSM_TX_LPI_DELAY 0x78
+#define ATL_MSM_CTR_RX_PKTS_GOOD 0x88
+#define ATL_MSM_CTR_RX_FCS_ERRS 0x90
+#define ATL_MSM_CTR_RX_ALIGN_ERRS 0x98
+#define ATL_MSM_CTR_TX_PAUSE 0xa0
+#define ATL_MSM_CTR_RX_PAUSE 0xa8
+#define ATL_MSM_CTR_RX_OCTETS_LO 0xd8
+#define ATL_MSM_CTR_RX_OCTETS_HI 0xdc
+#define ATL_MSM_CTR_RX_MULTICAST 0xE8
+#define ATL_MSM_CTR_RX_BROADCAST 0xF0
+#define ATL_MSM_CTR_RX_ERRS 0x120
+
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c
new file mode 100644
index 0000000..2496fe0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c
@@ -0,0 +1,1680 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include "atl_ring.h"
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "atl_trace.h"
+
+#define atl_update_ring_stat(ring, stat, delta)			\
+do {								\
+	struct atl_desc_ring *_ring = (ring);			\
+								\
+	u64_stats_update_begin(&_ring->syncp);			\
+	_ring->stats.stat += (delta);				\
+	u64_stats_update_end(&_ring->syncp);			\
+} while (0)
+
+static inline uint32_t fetch_tx_head(struct atl_desc_ring *ring)
+{
+#ifdef ATL_TX_HEAD_WB
+	//XXX
+#else
+	return atl_read(ring_hw(ring), ATL_TX_RING_HEAD(ring));
+#endif
+}
+
+static int tx_full(struct atl_desc_ring *ring, int needed)
+{
+	struct atl_nic *nic = ring->qvec->nic;
+
+	if (likely(ring_space(ring) >= needed))
+		return 0;
+
+	netif_stop_subqueue(ring->qvec->nic->ndev, ring->qvec->idx);
+	atl_nic_dbg("Stopping tx queue\n");
+
+	smp_mb();
+
+	// Check if another CPU freed some space
+	if (likely(ring_space(ring) < needed))
+		return -EAGAIN;
+
+	netif_start_subqueue(ring->qvec->nic->ndev, ring->qvec->idx);
+	atl_nic_dbg("Restarting tx queue in %s...\n", __func__);
+	atl_update_ring_stat(ring, tx.tx_restart, 1);
+	return 0;
+}
+
+static void atl_txbuf_free(struct atl_txbuf *txbuf, struct device *dev,
+	uint32_t idx)
+{
+	if (txbuf->skb) {
+		if (dma_unmap_len(txbuf, len)) {
+			dma_unmap_single(dev, dma_unmap_addr(txbuf, daddr),
+					 dma_unmap_len(txbuf, len),
+					 DMA_TO_DEVICE);
+			trace_atl_dma_unmap_head(-1, idx,
+				dma_unmap_addr(txbuf, daddr),
+				dma_unmap_len(txbuf, len),
+				txbuf->skb);
+		}
+		dev_kfree_skb_any(txbuf->skb);
+	} else if (dma_unmap_len(txbuf, len)) {
+		dma_unmap_page(dev, dma_unmap_addr(txbuf, daddr),
+			       dma_unmap_len(txbuf, len),
+			       DMA_TO_DEVICE);
+		trace_atl_dma_unmap_frag(-1, idx, dma_unmap_addr(txbuf, daddr),
+			dma_unmap_len(txbuf, len), txbuf->skb);
+	}
+
+	txbuf->last = -1;
+	txbuf->skb = NULL;
+	dma_unmap_len_set(txbuf, len, 0);
+}
+
+static inline struct netdev_queue *atl_txq(struct atl_desc_ring *ring)
+{
+	return netdev_get_tx_queue(ring->qvec->nic->ndev,
+		ring->qvec->idx);
+}
+
+static unsigned int atl_tx_free_low = MAX_SKB_FRAGS + 4;
+module_param_named(tx_free_low, atl_tx_free_low, uint, 0644);
+
+static unsigned int atl_tx_free_high = MAX_SKB_FRAGS * 3;
+module_param_named(tx_free_high, atl_tx_free_high, uint, 0644);
+
+static inline int skb_xmit_more(struct sk_buff *skb)
+{
+	return skb->xmit_more;
+}
+
+static netdev_tx_t atl_map_xmit_skb(struct sk_buff *skb,
+	struct atl_desc_ring *ring, struct atl_txbuf *first_buf)
+{
+	int idx = ring->tail;
+	struct device *dev = ring->qvec->dev;
+	struct atl_tx_desc *desc = &ring->desc.tx;
+	struct skb_frag_struct *frag;
+	/* Header's DMA mapping must be stored in the txbuf that has
+	 * ->skb set, even if it corresponds to the context
+	 * descriptor and not the first data descriptor
+	 */
+	struct atl_txbuf *txbuf = first_buf;
+	unsigned int len = skb_headlen(skb);
+	unsigned int frags = skb_shinfo(skb)->nr_frags;
+	dma_addr_t daddr = dma_map_single(dev, skb->data, len,
+					  DMA_TO_DEVICE);
+	trace_atl_dma_map_head(-1, idx, daddr, len, skb, skb->data);
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(dev, daddr))
+			goto err_dma;
+
+		dma_unmap_len_set(txbuf, len, len);
+		dma_unmap_addr_set(txbuf, daddr, daddr);
+
+		desc->daddr = cpu_to_le64(daddr);
+		while (len > ATL_DATA_PER_TXD) {
+			desc->len = cpu_to_le16(ATL_DATA_PER_TXD);
+			WRITE_ONCE(ring->hw.descs[idx].tx, *desc);
+			bump_ptr(idx, ring, 1);
+			daddr += ATL_DATA_PER_TXD;
+			len -= ATL_DATA_PER_TXD;
+			desc->daddr = cpu_to_le64(daddr);
+		}
+		desc->len = cpu_to_le16(len);
+
+		if (!frags)
+			break;
+
+		WRITE_ONCE(ring->hw.descs[idx].tx, *desc);
+		bump_ptr(idx, ring, 1);
+		txbuf = &ring->txbufs[idx];
+		len = skb_frag_size(frag);
+		daddr = skb_frag_dma_map(dev, frag, 0, len,
+					 DMA_TO_DEVICE);
+		trace_atl_dma_map_frag(frag - &skb_shinfo(skb)->frags[0], idx,
+				       daddr, len, skb, skb_frag_address(frag));
+
+		frags--;
+	}
+
+	//Last descriptor
+	desc->eop = 1;
+#if defined(ATL_TX_DESC_WB) || defined(ATL_TX_HEAD_WB)
+	desc->cmd |= tx_desc_cmd_wb;
+#endif
+	WRITE_ONCE(ring->hw.descs[idx].tx, *desc);
+	first_buf->last = idx;
+	bump_ptr(idx, ring, 1);
+	ring->txbufs[idx].last = -1;
+	ring->tail = idx;
+
+	/* Stop queue if no space for another packet */
+	tx_full(ring, atl_tx_free_low);
+
+	/* Delay bumping the HW tail if another packet is pending and
+	 * there's space for it.
+	 */
+	if (skb_xmit_more(skb) && !netif_xmit_stopped(atl_txq(ring)))
+		return NETDEV_TX_OK;
+
+	wmb();
+	atl_write(ring_hw(ring), ATL_TX_RING_TAIL(ring), ring->tail);
+
+	return NETDEV_TX_OK;
+
+err_dma:
+	dev_err(dev, "atl_map_skb failed\n");
+	for (;;) {
+		atl_txbuf_free(txbuf, dev, idx);
+		if (txbuf == first_buf)
+			break;
+		bump_ptr(idx, ring, -1);
+		txbuf = &ring->txbufs[idx];
+	}
+	ring->tail = idx;
+	atl_update_ring_stat(ring, tx.dma_map_failed, 1);
+	return -EFAULT;
+}
+
+static uint32_t atl_insert_context(struct atl_txbuf *txbuf,
+	struct atl_desc_ring *ring, unsigned int *len)
+{
+	struct sk_buff *skb = txbuf->skb;
+	struct atl_tx_ctx *ctx;
+	unsigned int hdr_len;
+	uint32_t tx_cmd = 0;
+	int mss;
+	DECLARE_SCRATCH_DESC(scratch);
+
+	ctx = &DESC_PTR(ring, ring->tail, scratch)->ctx;
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	txbuf->bytes = *len;
+	txbuf->packets = 1;
+
+	mss = skb_shinfo(skb)->gso_size;
+
+	if (mss && (skb_shinfo(skb)->gso_type &
+		    (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+		tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
+		ctx->mss_len = mss;
+		ctx->cmd = ctx_cmd_tcp;
+
+		ctx->l2_len = skb_network_offset(skb);
+
+		if (skb_is_gso_v6(skb))
+			ctx->cmd |= ctx_cmd_ipv6;
+
+		ctx->l3_len = skb_transport_offset(skb) - ctx->l2_len;
+		ctx->l4_len = tcp_hdrlen(skb);
+
+		hdr_len = ctx->l2_len + ctx->l3_len + ctx->l4_len;
+
+		*len -= hdr_len;
+		txbuf->packets = skb_shinfo(skb)->gso_segs;
+		txbuf->bytes += (txbuf->packets - 1) * hdr_len;
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		tx_cmd |= tx_desc_cmd_vlan;
+		ctx->vlan_tag = skb_vlan_tag_get(skb);
+	}
+
+	if (tx_cmd) {
+		ctx->type = tx_desc_type_context;
+		ctx->idx = 0;
+		COMMIT_DESC(ring, ring->tail, scratch);
+		bump_tail(ring, 1);
+	}
+
+	return tx_cmd;
+}
+
+netdev_tx_t atl_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_desc_ring *ring = &nic->qvecs[skb->queue_mapping].tx;
+	unsigned int len = skb->len;
+	struct atl_tx_desc *desc;
+	struct atl_txbuf *txbuf;
+	uint32_t cmd_from_ctx;
+
+	if (tx_full(ring, skb_shinfo(skb)->nr_frags + 4)) {
+		atl_update_ring_stat(ring, tx.tx_busy, 1);
+		return NETDEV_TX_BUSY;
+	}
+
+	txbuf = &ring->txbufs[ring->tail];
+
+	txbuf->skb = skb;
+	cmd_from_ctx = atl_insert_context(txbuf, ring, &len);
+
+	/* use ring->desc unconditionally as it will serve as a
+	 * template for all descriptors
+	 */
+	desc = &ring->desc.tx;
+
+	memset(desc, 0, sizeof(*desc));
+
+	desc->cmd = cmd_from_ctx;
+	desc->cmd |= tx_desc_cmd_fcs;
+	desc->ct_en = !!cmd_from_ctx;
+	desc->type = tx_desc_type_desc;
+
+	desc->pay_len = len;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		uint8_t l4_proto = 0;
+
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			desc->cmd |= tx_desc_cmd_ipv4cs;
+			l4_proto = ip_hdr(skb)->protocol;
+			break;
+		case htons(ETH_P_IPV6):
+			l4_proto = ipv6_hdr(skb)->nexthdr;
+			break;
+		}
+
+		switch (l4_proto) {
+		case IPPROTO_TCP:
+		case IPPROTO_UDP:
+			desc->cmd |= tx_desc_cmd_l4cs;
+			break;
+		}
+	}
+
+	return atl_map_xmit_skb(skb, ring, txbuf);
+}
+
+static unsigned int atl_tx_clean_budget = 256;
+module_param_named(tx_clean_budget, atl_tx_clean_budget, uint, 0644);
+
+// Returns true if all work done
+static bool atl_clean_tx(struct atl_desc_ring *ring)
+{
+	struct atl_nic *nic = ring->qvec->nic;
+	struct device *dev = ring->qvec->dev;
+	uint32_t first = READ_ONCE(ring->head);
+#ifndef ATL_TX_DESC_WB
+	uint32_t done = atl_get_tx_head(ring);
+#endif
+	uint32_t budget = atl_tx_clean_budget;
+	unsigned int bytes = 0, packets = 0;
+	struct atl_tx_desc *last_desc;
+
+	atl_nic_dbg("descs in ring: %d\n", ring_occupied(ring));
+	do {
+		struct atl_txbuf *txbuf = &ring->txbufs[first];
+		struct sk_buff *skb = txbuf->skb;
+		uint32_t last = txbuf->last;
+
+		if (last == -1)
+			break;
+
+#ifdef ATL_TX_DESC_WB
+		last_desc = &ring->hw.descs[last].tx;
+
+		if (!last_desc->dd)
+			break;
+#else
+		if ((first <= last && done >= first && done <= last) ||
+		    ((first > last) && (done >= first || done <= last)))
+			break;
+#endif
+
+		bump_ptr(last, ring, 1);
+		napi_consume_skb(txbuf->skb, budget);
+		trace_atl_dma_unmap_head(-1, first,
+					 dma_unmap_addr(txbuf, daddr),
+					 dma_unmap_len(txbuf, len), skb);
+
+		txbuf->skb = NULL;
+		txbuf->last = -1;
+		dma_unmap_single(dev, dma_unmap_addr(txbuf, daddr),
+				 dma_unmap_len(txbuf, len), DMA_TO_DEVICE);
+		dma_unmap_len_set(txbuf, len, 0);
+
+		bytes += txbuf->bytes;
+		packets += txbuf->packets;
+
+		for (bump_ptr(first, ring, 1); first != last;
+		     bump_ptr(first, ring, 1)) {
+			txbuf = &ring->txbufs[first];
+			if (dma_unmap_len(txbuf, len)) {
+				dma_unmap_page(dev,
+					dma_unmap_addr(txbuf, daddr),
+					dma_unmap_len(txbuf, len),
+					DMA_TO_DEVICE);
+				trace_atl_dma_unmap_frag(-1, first,
+					dma_unmap_addr(txbuf, daddr),
+					dma_unmap_len(txbuf, len), skb);
+				dma_unmap_len_set(txbuf, len, 0);
+			}
+		}
+	} while (--budget);
+
+	u64_stats_update_begin(&ring->syncp);
+	ring->stats.tx.bytes += bytes;
+	ring->stats.tx.packets += packets;
+	u64_stats_update_end(&ring->syncp);
+
+	WRITE_ONCE(ring->head, first);
+
+	if (ring_space(ring) > atl_tx_free_high) {
+		struct net_device *ndev = nic->ndev;
+
+		smp_mb();
+		if (__netif_subqueue_stopped(ndev, ring->qvec->idx) &&
+			test_bit(ATL_ST_UP, &nic->state)) {
+			atl_nic_dbg("restarting tx queue\n");
+			netif_wake_subqueue(ndev, ring->qvec->idx);
+			atl_update_ring_stat(ring, tx.tx_restart, 1);
+		}
+	}
+
+	return !!budget;
+}
+
+static bool atl_rx_checksum(struct sk_buff *skb, struct atl_rx_desc_wb *desc,
+	struct atl_desc_ring *ring)
+{
+	struct atl_nic *nic = ring->qvec->nic;
+	struct net_device *ndev = nic->ndev;
+	int csum_ok = 1, recheck = 0;
+
+	skb_checksum_none_assert(skb);
+
+	if (desc->rx_stat & atl_rx_stat_mac_err) {
+		atl_update_ring_stat(ring, rx.mac_err, 1);
+		atl_nic_dbg("rx MAC err: rx_stat %d pkt_type %d len %d\n",
+			desc->rx_stat, desc->pkt_type, desc->pkt_len);
+		goto drop;
+	}
+
+	if (!(ndev->features & NETIF_F_RXCSUM))
+		return true;
+
+	switch (desc->pkt_type & atl_rx_pkt_type_l3_msk) {
+	case atl_rx_pkt_type_ipv4:
+		csum_ok &= !(desc->rx_stat & atl_rx_stat_ipv4_err);
+		/* Fallthrough */
+	case atl_rx_pkt_type_ipv6:
+		break;
+	default:
+		return true;
+	}
+
+	switch (desc->pkt_type & atl_rx_pkt_type_l4_msk) {
+	case atl_rx_pkt_type_tcp:
+	case atl_rx_pkt_type_udp:
+		recheck = desc->pkt_len <= 60;
+		csum_ok &= !(desc->rx_stat & atl_rx_stat_l4_err);
+		break;
+	default:
+		return true;
+	}
+
+	if (csum_ok) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		return true;
+	} else if (recheck)
+		return true;
+
+	atl_update_ring_stat(ring, rx.csum_err, 1);
+
+	atl_nic_dbg("bad rx checksum: rx_stat %d pkt_type %d len %d\n",
+		    desc->rx_stat, desc->pkt_type, desc->pkt_len);
+
+	if (ndev->features & NETIF_F_RXALL)
+		return true;
+
+drop:
+	dev_kfree_skb_any(skb);
+	return false;
+}
+
+static void atl_rx_hash(struct sk_buff *skb, struct atl_rx_desc_wb *desc,
+	struct net_device *ndev)
+{
+	uint8_t rss_type = desc->rss_type;
+
+	if (!(ndev->features & NETIF_F_RXHASH) || rss_type < 2 || rss_type > 7)
+		return;
+
+	skb_set_hash(skb, le32_to_cpu(desc->rss_hash),
+		(rss_type > 3 && rss_type < 8) ? PKT_HASH_TYPE_L4 :
+		PKT_HASH_TYPE_L3);
+}
+
+static bool atl_rx_packet(struct sk_buff *skb, struct atl_rx_desc_wb *desc,
+			  struct atl_desc_ring *ring)
+{
+	struct net_device *ndev = ring->qvec->nic->ndev;
+	struct napi_struct *napi = &ring->qvec->napi;
+
+	if (!atl_rx_checksum(skb, desc, ring))
+		return false;
+
+	if (!skb_is_nonlinear(skb) && eth_skb_pad(skb))
+		return false;
+
+	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX
+	    && desc->rx_estat & atl_rx_estat_vlan_stripped) {
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       le16_to_cpu(desc->vlan_tag));
+	}
+
+	atl_rx_hash(skb, desc, ndev);
+
+	skb_record_rx_queue(skb, ring->qvec->idx);
+	skb->protocol = eth_type_trans(skb, ndev);
+	if (skb->pkt_type == PACKET_MULTICAST)
+		atl_update_ring_stat(ring, rx.multicast, 1);
+	napi_gro_receive(napi, skb);
+	return true;
+}
+
+unsigned int atl_rx_linear;
+module_param_named(rx_linear, atl_rx_linear, uint, 0444);
+
+/* DMA mappings of buffer pages are accounted via struct
+ * atl_rxpage. Being mapped counts as a single additional reference
+ * for the target page.
+ */
+static int atl_get_page(struct atl_pgref *pgref, unsigned int order,
+	struct device *dev)
+{
+	struct atl_rxpage *rxpage;
+	struct page *page;
+	dma_addr_t daddr;
+	int ret = -ENOMEM;
+
+	rxpage = kmalloc(sizeof(*rxpage), GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!rxpage))
+		return ret;
+
+	page = dev_alloc_pages(order);
+	if (unlikely(!page))
+		goto free_rxpage;
+
+	daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order, DMA_FROM_DEVICE);
+	trace_atl_dma_map_rxbuf(-1, -1, daddr, PAGE_SIZE << order, NULL,
+		page_to_virt(page));
+
+	if (unlikely(dma_mapping_error(dev, daddr)))
+		goto free_page;
+
+	rxpage->page = page;
+	rxpage->daddr = daddr;
+	rxpage->order = order;
+	rxpage->mapcount = 1;
+
+	pgref->rxpage = rxpage;
+	pgref->pg_off = 0;
+
+	return 0;
+
+free_page:
+	__free_pages(page, order);
+free_rxpage:
+	kfree(rxpage);
+
+	return ret;
+}
+
+static int atl_get_pages(struct atl_rxbuf *rxbuf,
+	struct atl_desc_ring *ring)
+{
+	int ret;
+	struct device *dev = ring->qvec->dev;
+
+	if (likely((rxbuf->head.rxpage || atl_rx_linear)
+			&& rxbuf->data.rxpage))
+		return 0;
+
+	if (!rxbuf->head.rxpage && !atl_rx_linear) {
+		ret = atl_get_page(&rxbuf->head, ATL_RX_HEAD_ORDER, dev);
+		if (ret) {
+			atl_update_ring_stat(ring,
+				rx.alloc_head_page_failed, 1);
+			return ret;
+		}
+		atl_update_ring_stat(ring, rx.alloc_head_page, 1);
+	}
+
+	if (!rxbuf->data.rxpage) {
+		ret = atl_get_page(&rxbuf->data, ATL_RX_DATA_ORDER, dev);
+		if (ret) {
+			atl_update_ring_stat(ring,
+				rx.alloc_data_page_failed, 1);
+			return ret;
+		}
+		atl_update_ring_stat(ring, rx.alloc_data_page, 1);
+	}
+
+	return 0;
+}
+
+static inline void atl_fill_rx_desc(struct atl_desc_ring *ring,
+	struct atl_rxbuf *rxbuf)
+{
+	struct atl_rx_desc *desc;
+	DECLARE_SCRATCH_DESC(scratch);
+
+	desc  = &DESC_PTR(ring, ring->tail, scratch)->rx;
+
+	desc->daddr = atl_buf_daddr(&rxbuf->data) +
+		(atl_rx_linear ? ATL_RX_HEADROOM : 0);
+
+	/* Assigning haddr clears dd as bufs are cacheline-aligned
+	 * and ATL_RX_HEADROOM is even
+	 */
+	desc->haddr = atl_rx_linear ? 0 :
+		atl_buf_daddr(&rxbuf->head) + ATL_RX_HEADROOM;
+
+	trace_atl_fill_rx_desc(ring->tail, desc);
+	COMMIT_DESC(ring, ring->tail, scratch);
+}
+
+static int atl_fill_rx(struct atl_desc_ring *ring, uint32_t count)
+{
+	int ret = 0;
+
+	while (count) {
+		struct atl_rxbuf *rxbuf = &ring->rxbufs[ring->tail];
+
+		ret = atl_get_pages(rxbuf, ring);
+		if (ret)
+			break;
+
+		atl_fill_rx_desc(ring, rxbuf);
+		bump_tail(ring, 1);
+		count--;
+	}
+
+	/* If tail ptr passed the next_to_recycle ptr, clamp the
+	 * latter to the former.
+	 */
+	if (ring->next_to_recycle < ring->head ?
+		ring->next_to_recycle < ring->tail &&
+		ring->tail < ring->head :
+		ring->tail > ring->next_to_recycle ||
+		ring->tail < ring->head)
+		ring->next_to_recycle = ring->tail;
+
+	wmb();
+	atl_write(ring_hw(ring), ATL_RX_RING_TAIL(ring), ring->tail);
+	return ret;
+}
+
+static inline void atl_get_rxpage(struct atl_pgref *pgref)
+{
+	pgref->rxpage->mapcount++;
+}
+
+static inline void __atl_free_rxpage(struct atl_rxpage *rxpage,
+	struct device *dev)
+{
+	unsigned int len = PAGE_SIZE << rxpage->order;
+
+	dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
+	trace_atl_dma_unmap_rxbuf(-1, -1, rxpage->daddr, len, NULL);
+
+	/* Drop the ref for dma mapping. */
+	__free_pages(rxpage->page, rxpage->order);
+	kfree(rxpage);
+}
+
+static inline void atl_put_rxpage(struct atl_pgref *pgref, struct device *dev)
+{
+	struct atl_rxpage *rxpage = pgref->rxpage;
+
+	if (!rxpage)
+		return;
+
+	if (--rxpage->mapcount)
+		return;
+
+	__atl_free_rxpage(rxpage, dev);
+	pgref->rxpage = 0;
+}
+
+static bool atl_recycle_or_put_page(struct atl_pgref *pgref,
+	unsigned int buf_len, struct device *dev)
+{
+	unsigned int order = pgref->rxpage->order;
+	unsigned int size = PAGE_SIZE << order;
+	struct page *page = pgref->rxpage->page;
+
+	if (!page_is_pfmemalloc(page) && pgref->pg_off + buf_len < size)
+		return true;
+
+	atl_put_rxpage(pgref, dev);
+
+	return false;
+}
+
+static void atl_maybe_recycle_rxbuf(struct atl_desc_ring *ring,
+	struct atl_rxbuf *rxbuf)
+{
+	int reused = 0;
+	struct atl_pgref *head = &rxbuf->head, *data = &rxbuf->data;
+	struct atl_rxbuf *new = &ring->rxbufs[ring->next_to_recycle];
+	unsigned int data_len = ATL_RX_BUF_SIZE +
+		(atl_rx_linear ? ATL_RX_HDR_OVRHD : 0);
+
+	if (!atl_rx_linear
+		&& atl_recycle_or_put_page(head,
+			ATL_RX_HDR_SIZE + ATL_RX_HDR_OVRHD, ring->qvec->dev)) {
+		new->head = *head;
+		reused = 1;
+		atl_update_ring_stat(ring, rx.reused_head_page, 1);
+	}
+	head->rxpage = 0;
+
+	if (atl_recycle_or_put_page(data, data_len, ring->qvec->dev)) {
+		new->data = *data;
+		reused = 1;
+		atl_update_ring_stat(ring, rx.reused_data_page, 1);
+	}
+	data->rxpage = 0;
+
+	if (reused)
+		bump_ptr(ring->next_to_recycle, ring, 1);
+}
+
+static unsigned int atl_data_len(struct atl_rx_desc_wb *wb)
+{
+	unsigned int len = le16_to_cpu(wb->pkt_len);
+
+	if (!wb->eop)
+		return ATL_RX_BUF_SIZE;
+
+	if (!wb->rsc_cnt && wb->sph)
+		len -= wb->hdr_len;
+
+	len &= ATL_RX_BUF_SIZE - 1;
+	return len ?: ATL_RX_BUF_SIZE;
+}
+
+static void atl_sync_range(struct atl_desc_ring *ring,
+	struct atl_pgref *pgref, unsigned int offt, unsigned int len)
+{
+	dma_addr_t daddr = pgref->rxpage->daddr;
+	unsigned int pg_off = pgref->pg_off + offt;
+
+	dma_sync_single_range_for_cpu(ring->qvec->dev, daddr, pg_off, len,
+		DMA_FROM_DEVICE);
+	trace_atl_sync_rx_range(-1, daddr, pg_off, len);
+}
+
+static struct sk_buff *atl_init_skb(struct atl_desc_ring *ring,
+	struct atl_rxbuf *rxbuf, struct atl_rx_desc_wb *wb)
+{
+	struct sk_buff *skb;
+	unsigned int hdr_len, alloc, tailroom, len;
+	unsigned int data_len = atl_data_len(wb);
+	void *hdr;
+	struct atl_pgref *pgref;
+	struct atl_nic *nic = ring->qvec->nic;
+
+	if (atl_rx_linear) {
+		if (!wb->eop) {
+			atl_nic_err("Multi-frag packet in linear mode\n");
+			atl_update_ring_stat(ring, rx.linear_dropped, 1);
+			return (void *)-1l;
+		}
+
+		hdr_len = len = data_len;
+		tailroom = 0;
+		pgref = &rxbuf->data;
+	} else {
+		hdr_len = wb->hdr_len;
+		if (hdr_len == 0) {
+			atl_nic_err("Header parse error\n");
+			return (void *)-1l;
+		}
+
+		/* If entire packet fits into ATL_RX_HDR_SIZE, reserve
+		 * enough space to pull the data part into skb head
+		 * and make it linear, otherwise allocate space for
+		 * hdr_len only
+		 */
+		len = (wb->sph ? hdr_len : 0) + data_len;
+		if (!wb->eop || len > ATL_RX_HDR_SIZE)
+			len = hdr_len;
+
+		/* reserve space for potential __pskb_pull_tail() */
+		tailroom = min(ATL_RX_TAILROOM, ATL_RX_HDR_SIZE - len);
+		pgref = &rxbuf->head;
+	}
+
+	if (atl_rx_linear || (wb->sph && (wb->eop || !wb->rsc_cnt)))
+		atl_sync_range(ring, pgref,
+			ATL_RX_HEADROOM, hdr_len);
+
+	alloc = len + tailroom + ATL_RX_HEADROOM;
+	alloc += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	alloc = SKB_DATA_ALIGN(alloc);
+
+	hdr = atl_buf_vaddr(pgref);
+	skb = build_skb(hdr, alloc);
+	if (unlikely(!skb)) {
+		atl_update_ring_stat(ring, rx.alloc_skb_failed, 1);
+		return NULL;
+	}
+
+	if (wb->rsc_cnt && !wb->eop) {
+		struct atl_cb *atl_cb = ATL_CB(skb);
+
+		/* First frag of a multi-frag RSC packet. Either head or
+		 * data buffer, depending on whether the header was
+		 * split off by HW, might still be accessed by
+		 * RSC. Delay processing till EOP.
+		 */
+		if (wb->sph) {
+			atl_cb->pgref = *pgref;
+			atl_cb->head = true;
+			/* Safe to sync the data buf. !wb->eop
+			 * implies the data buffer is completely filled.
+			 */
+			atl_sync_range(ring, &rxbuf->data, 0, ATL_RX_BUF_SIZE);
+		} else {
+			atl_cb->pgref = rxbuf->data;
+			atl_cb->head = false;
+			/* No need to sync head fragment as nothing
+			 * was DMA'd into it
+			 */
+		}
+		atl_get_rxpage(&atl_cb->pgref);
+	}
+
+	pgref->pg_off += alloc;
+	page_ref_inc(pgref->rxpage->page);
+
+	if (!atl_rx_linear && !wb->sph) {
+		atl_nic_dbg("Header not split despite non-zero hdr_len (%d)\n",
+			hdr_len);
+		/* Make skb head empty -- will copy the real header
+		 * from the data buffer later
+		 */
+		hdr_len = 0;
+	}
+
+	skb_reserve(skb, ATL_RX_HEADROOM);
+	skb_put(skb, hdr_len);
+	return skb;
+}
+
+static inline void atl_skb_put_data(struct sk_buff *skb,
+	void *data, unsigned int len)
+{
+	memcpy(skb_tail_pointer(skb), data, len);
+	skb->tail += len;
+	skb->len += len;
+}
+
+static struct sk_buff *atl_process_rx_frag(struct atl_desc_ring *ring,
+	struct atl_rxbuf *rxbuf, struct atl_rx_desc_wb *wb)
+{
+	bool first_frag = false;
+	bool hdr_split = !!wb->sph;
+	unsigned int hdr_len, data_len, aligned_data_len;
+	unsigned int data_offt = 0, to_pull = 0;
+	struct sk_buff *skb = rxbuf->skb;
+	struct atl_cb *atl_cb;
+	struct atl_pgref *headref = &rxbuf->head, *dataref = &rxbuf->data;
+	struct device *dev = ring->qvec->dev;
+
+	if (!skb) {
+		 /* First buffer of a packet */
+		skb = atl_init_skb(ring, rxbuf, wb);
+		first_frag = true;
+	} else
+		rxbuf->skb = NULL;
+
+	if (unlikely(!skb || skb == (void *)-1l))
+		return skb;
+
+	hdr_len = wb->hdr_len;
+	data_len = atl_data_len(wb);
+
+	if (atl_rx_linear) {
+		/* Linear skb mode. The entire packet was DMA'd into
+		 * the data buffer and skb has already been built
+		 * around it and dataref's pg_off has been increased
+		 * in atl_init_skb()
+		 */
+
+		atl_maybe_recycle_rxbuf(ring, rxbuf);
+		return skb;
+	}
+
+	/* Align the start of the next buffer in the page. This also
+	 * serves as truesize increment when the paged frag is added
+	 * to skb.
+	 */
+	aligned_data_len = ALIGN(data_len, L1_CACHE_BYTES);
+
+	if (first_frag && !hdr_split)
+		/* Header was not split off, so skip over it
+		 * when adding the paged frag
+		 */
+		data_offt = hdr_len;
+
+	if (!first_frag || wb->eop || !wb->rsc_cnt) {
+		atl_sync_range(ring, dataref, 0, data_len);
+
+		/* If header was not split off by HW, remember to pull
+		 * it into the skb head later. The rest of the data
+		 * buffer might need to be pulled too for small
+		 * packets, so delay the actual copying till later
+		 */
+		if (first_frag && !hdr_split)
+			to_pull = hdr_len;
+	}
+
+	/* If the entire packet fits within ATL_RX_HDR_SIZE bytes,
+	 * pull it into the skb head. This handles the header not
+	 * having been split by HW case correctly too, as
+	 * skb_headlen() will be zero in that case and data_len will
+	 * hold the whole packet length.
+	 */
+	if (first_frag && skb_headlen(skb) + data_len <= ATL_RX_HDR_SIZE) {
+		to_pull = data_len;
+		/* Recycle the data buffer as we're copying the
+		 * contents to skb head.
+		 */
+		aligned_data_len = 0;
+	} else {
+		/* Add the data buffer to paged frag list, skipping
+		 * the un-split header if any -- it will be copied to
+		 * skb head later.
+		 */
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+			dataref->rxpage->page, dataref->pg_off + data_offt,
+			data_len - data_offt, aligned_data_len);
+		page_ref_inc(dataref->rxpage->page);
+	}
+
+	if (to_pull)
+		atl_skb_put_data(skb, atl_buf_vaddr(dataref), to_pull);
+
+	/* Update the data buf's pg_off to point to free
+	 * space. Header buf's offset was updated in atl_init_skb()
+	 * for first frag of the packet only.
+	 */
+	dataref->pg_off += aligned_data_len;
+	atl_maybe_recycle_rxbuf(ring, rxbuf);
+
+	if (first_frag || !wb->eop || !wb->rsc_cnt)
+		return skb;
+
+	/* The last descriptor of RSC packet is done, unmap the head
+	 * fragment.
+	 */
+	atl_cb = ATL_CB(skb);
+
+	headref = &atl_cb->pgref;
+	if (unlikely(!headref->rxpage))
+		return skb;
+
+	if (likely(atl_cb->head)) {
+		atl_sync_range(ring, headref, ATL_RX_HEADROOM, hdr_len);
+		atl_put_rxpage(headref, dev);
+	} else {
+		atl_sync_range(ring, headref, 0, ATL_RX_BUF_SIZE);
+		/* Data buf's sync being delayed implies header was
+		 * not split off by HW. Fix that now.
+		 */
+		atl_skb_put_data(skb, atl_buf_vaddr(headref), hdr_len);
+		atl_put_rxpage(headref, dev);
+	}
+
+	return skb;
+}
+
+unsigned int atl_rx_refill_batch = 16;
+module_param_named(rx_refill_batch, atl_rx_refill_batch, uint, 0644);
+
+static int atl_clean_rx(struct atl_desc_ring *ring, int budget)
+{
+	unsigned int packets = 0;
+	unsigned int bytes = 0;
+	struct sk_buff *skb;
+
+	while (packets < budget) {
+		uint32_t space = ring_space(ring);
+		struct atl_rx_desc_wb *wb;
+		struct atl_rxbuf *rxbuf;
+		unsigned int len;
+		DECLARE_SCRATCH_DESC(scratch);
+
+		if (space >= atl_rx_refill_batch)
+			atl_fill_rx(ring, space);
+
+		rxbuf = &ring->rxbufs[ring->head];
+
+		wb = &DESC_PTR(ring, ring->head, scratch)->wb;
+		FETCH_DESC(ring, ring->head, scratch);
+
+		if (!wb->dd)
+			break;
+		DESC_RMB();
+
+		skb = atl_process_rx_frag(ring, rxbuf, wb);
+
+		/* Treat allocation errors as transient and retry later */
+		if (!skb) {
+			struct atl_nic *nic = ring->qvec->nic;
+
+			atl_nic_err("failed to alloc skb for RX packet\n");
+			break;
+		}
+
+		if (skb == (void *)-1l)
+			atl_maybe_recycle_rxbuf(ring, rxbuf);
+
+		bump_head(ring, 1);
+		if (!wb->eop) {
+			uint32_t next = wb->rsc_cnt ?
+				le16_to_cpu(wb->next_desp) :
+				ring->head;
+			/* If atl_process_rx_flags() returned any
+			 * other error this propagates the error to
+			 * the next descriptor of the packet,
+			 * preventing it from being treated as a start
+			 * of a new packet later.
+			 */
+			ring->rxbufs[next].skb = skb;
+			atl_update_ring_stat(ring, rx.non_eop_descs, 1);
+			continue;
+		}
+
+		if (skb == (void *)-1l)
+			continue;
+
+		len = skb->len;
+		if (atl_rx_packet(skb, wb, ring)) {
+			packets++;
+			bytes += len;
+		}
+	}
+
+	u64_stats_update_begin(&ring->syncp);
+	ring->stats.rx.bytes += bytes;
+	ring->stats.rx.packets += packets;
+	u64_stats_update_end(&ring->syncp);
+
+	return packets;
+}
+
+unsigned int atl_min_intr_delay = 10;
+module_param_named(min_intr_delay, atl_min_intr_delay, uint, 0644);
+
+static void atl_set_intr_throttle(struct atl_queue_vec *qvec)
+{
+	struct atl_hw *hw = &qvec->nic->hw;
+	atl_write(hw, ATL_INTR_THRTL(atl_qvec_intr(qvec)),
+		1 << 0x1f | ((atl_min_intr_delay / 2) & 0x1ff) << 0x10);
+}
+
+static int atl_poll(struct napi_struct *napi, int budget)
+{
+	struct atl_queue_vec *qvec;
+	struct atl_nic *nic;
+	bool clean_done;
+	int rx_cleaned;
+
+	qvec = container_of(napi, struct atl_queue_vec, napi);
+	nic = qvec->nic;
+
+	clean_done = atl_clean_tx(&qvec->tx);
+	rx_cleaned = atl_clean_rx(&qvec->rx, budget);
+
+	clean_done &= (rx_cleaned < budget);
+
+	if (!clean_done)
+		return budget;
+
+	napi_complete_done(napi, rx_cleaned);
+	atl_intr_enable(&nic->hw, BIT(atl_qvec_intr(qvec)));
+	/* atl_set_intr_throttle(&nic->hw, qvec->idx); */
+	return rx_cleaned;
+}
+
+/* XXX NOTE: only checked on device probe for now */
+static int enable_msi = 1;
+module_param_named(msi, enable_msi, int, 0444);
+
+static int atl_config_interrupts(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	unsigned int flags;
+	int ret;
+	struct irq_affinity iaff = {
+		.pre_vectors = ATL_NUM_NON_RING_IRQS,
+		.post_vectors = 0,
+	};
+
+	if (enable_msi) {
+		flags = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_AFFINITY;
+		ret = pci_alloc_irq_vectors_affinity(hw->pdev,
+			ATL_NUM_NON_RING_IRQS + 1,
+			ATL_NUM_NON_RING_IRQS + nic->requested_nvecs,
+			flags, &iaff);
+
+		/* pci_alloc_irq_vectors() never allocates less
+		 * than min_vectors
+		 */
+		if (ret > 0) {
+			ret -= ATL_NUM_NON_RING_IRQS;
+			nic->nvecs = ret;
+			nic->flags |= ATL_FL_MULTIPLE_VECTORS;
+			return ret;
+		}
+	}
+
+	atl_nic_warn("Couldn't allocate MSI-X / MSI vectors, falling back to legacy interrupts\n");
+
+	ret = pci_alloc_irq_vectors(hw->pdev, 1, 1, PCI_IRQ_LEGACY);
+	if (ret < 0) {
+		atl_nic_err("Couldn't allocate legacy IRQ\n");
+		return ret;
+	}
+
+	nic->nvecs = 1;
+	nic->flags &= ~ATL_FL_MULTIPLE_VECTORS;
+
+	return 1;
+}
+
+irqreturn_t atl_ring_irq(int irq, void *priv)
+{
+	struct napi_struct *napi = priv;
+
+	napi_schedule_irqoff(napi);
+	return IRQ_HANDLED;
+}
+
+void atl_clear_datapath(struct atl_nic *nic)
+{
+	int i;
+	struct atl_queue_vec *qvecs = nic->qvecs;
+
+	/* If atl_reconfigure() have failed previously,
+	 * atl_clear_datapath() can be called again on
+	 * pci_ops->remove(), without an intervening
+	 * atl_setup_datapath().
+	 */
+	if (!test_and_clear_bit(ATL_ST_CONFIGURED, &nic->state))
+		return;
+
+#ifdef ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+	for (i = 0; i < nic->nvecs; i++) {
+		int vector = pci_irq_vector(nic->hw.pdev,
+			i + ATL_NUM_NON_RING_IRQS);
+		irq_set_affinity_hint(vector, NULL);
+	}
+#endif
+
+	pci_free_irq_vectors(nic->hw.pdev);
+
+	if (!qvecs)
+		return;
+
+	for (i = 0; i < nic->nvecs; i++)
+		netif_napi_del(&qvecs[i].napi);
+	kfree(qvecs);
+	nic->qvecs = NULL;
+}
+
+int atl_setup_datapath(struct atl_nic *nic)
+{
+	int nvecs, i, ret;
+	struct atl_queue_vec *qvec;
+
+	nvecs = atl_config_interrupts(nic);
+	if (nvecs < 0)
+		return nvecs;
+
+	qvec = kcalloc(nvecs, sizeof(*qvec), GFP_KERNEL);
+	if (!qvec) {
+		atl_nic_err("Couldn't alloc qvecs\n");
+		ret = -ENOMEM;
+		goto exit_free;
+	}
+	nic->qvecs = qvec;
+
+	for (i = 0; i < nvecs; i++, qvec++) {
+		qvec->nic = nic;
+		qvec->idx = i;
+		qvec->dev = &nic->hw.pdev->dev;
+
+		qvec->rx.hw.reg_base = ATL_RX_RING(i);
+		qvec->rx.qvec = qvec;
+		qvec->rx.hw.size = nic->requested_rx_size;
+
+		qvec->tx.hw.reg_base = ATL_TX_RING(i);
+		qvec->tx.qvec = qvec;
+		qvec->tx.hw.size = nic->requested_tx_size;
+
+		u64_stats_init(&qvec->rx.syncp);
+		u64_stats_init(&qvec->tx.syncp);
+
+		netif_napi_add(nic->ndev, &qvec->napi, atl_poll, 64);
+	}
+
+	atl_compat_calc_affinities(nic);
+
+	nic->max_mtu = atl_rx_linear ? ATL_MAX_RX_LINEAR_MTU : ATL_MAX_MTU;
+
+	set_bit(ATL_ST_CONFIGURED, &nic->state);
+	return 0;
+
+exit_free:
+	atl_clear_datapath(nic);
+	return ret;
+}
+
+static inline void atl_free_rxpage(struct atl_pgref *pgref, struct device *dev)
+{
+	struct atl_rxpage *rxpage = pgref->rxpage;
+
+	if (!rxpage)
+		return;
+
+	/* Unmap, dropping the ref for being mapped */
+	__atl_free_rxpage(rxpage, dev);
+	pgref->rxpage = 0;
+}
+
+/* Releases any skbs that may have been queued on ring positions yet
+ * to be processes by poll. The buffers are kept to be re-used after
+ * resume / thaw. */
+static void atl_clear_rx_bufs(struct atl_desc_ring *ring)
+{
+	unsigned int bufs = ring_occupied(ring);
+	struct device *dev = ring->qvec->dev;
+
+	while (bufs) {
+		struct atl_rxbuf *rxbuf = &ring->rxbufs[ring->head];
+		struct sk_buff *skb = rxbuf->skb;
+
+		if (skb) {
+			struct atl_pgref *pgref = &ATL_CB(skb)->pgref;
+
+			atl_put_rxpage(pgref, dev);
+			dev_kfree_skb_any(skb);
+			rxbuf->skb = NULL;
+		}
+
+		bump_head(ring, 1);
+		bufs--;
+	}
+}
+
+static void atl_free_rx_bufs(struct atl_desc_ring *ring)
+{
+	struct device *dev = ring->qvec->dev;
+	struct atl_rxbuf *rxbuf;
+
+	if (!ring->rxbufs)
+		return;
+
+	for (rxbuf = ring->rxbufs;
+	     rxbuf < &ring->rxbufs[ring->hw.size]; rxbuf++) {
+		atl_free_rxpage(&rxbuf->head, dev);
+		atl_free_rxpage(&rxbuf->data, dev);
+	}
+}
+
+static void atl_free_tx_bufs(struct atl_desc_ring *ring)
+{
+	unsigned int bufs = ring_occupied(ring);
+
+	if (!ring->txbufs)
+		return;
+
+	while (bufs) {
+		struct atl_txbuf *txbuf;
+
+		bump_tail(ring, -1);
+		txbuf = &ring->txbufs[ring->tail];
+
+		atl_txbuf_free(txbuf, ring->qvec->dev, ring->tail);
+		bufs--;
+	}
+}
+
+static void atl_free_ring(struct atl_desc_ring *ring)
+{
+	if (ring->bufs) {
+		vfree(ring->bufs);
+		ring->bufs = 0;
+	}
+
+	atl_free_descs(ring->qvec->nic, &ring->hw);
+}
+
+static int atl_alloc_ring(struct atl_desc_ring *ring, size_t buf_size,
+	char *type)
+{
+	int ret;
+	struct atl_nic *nic = ring->qvec->nic;
+	int idx = ring->qvec->idx;
+
+	ret = atl_alloc_descs(nic, &ring->hw);
+	if (ret) {
+		atl_nic_err("Couldn't alloc %s[%d] descriptors\n", type, idx);
+		return ret;
+	}
+
+	ring->bufs = vzalloc(ring->hw.size * buf_size);
+	if (!ring->bufs) {
+		atl_nic_err("Couldn't alloc %s[%d] %sbufs\n", type, idx, type);
+		ret = -ENOMEM;
+		goto free;
+	}
+
+	ring->head = ring->tail =
+		atl_read(&nic->hw, ATL_RING_HEAD(ring)) & 0x1fff;
+	return 0;
+
+free:
+	atl_free_ring(ring);
+	return ret;
+}
+
+static int atl_alloc_qvec_intr(struct atl_queue_vec *qvec)
+{
+	struct atl_nic *nic = qvec->nic;
+	int vector;
+	int ret;
+
+	snprintf(qvec->name, sizeof(qvec->name), "%s-ring-%d",
+		nic->ndev->name, qvec->idx);
+
+	if (!(nic->flags & ATL_FL_MULTIPLE_VECTORS))
+		return 0;
+
+	vector = pci_irq_vector(nic->hw.pdev, atl_qvec_intr(qvec));
+	ret = request_irq(vector, atl_ring_irq, 0, qvec->name, &qvec->napi);
+	if (ret) {
+		atl_nic_err("request MSI ring vector failed: %d\n", -ret);
+		return ret;
+	}
+
+	atl_compat_set_affinity(vector, qvec);
+
+	return 0;
+}
+
+static void atl_free_qvec_intr(struct atl_queue_vec *qvec)
+{
+	int vector = pci_irq_vector(qvec->nic->hw.pdev, atl_qvec_intr(qvec));
+
+	if (!(qvec->nic->flags & ATL_FL_MULTIPLE_VECTORS))
+		return;
+
+	atl_compat_set_affinity(vector, NULL);
+	free_irq(vector, &qvec->napi);
+}
+
+static int atl_alloc_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_txbuf *txbuf;
+	int count = qvec->tx.hw.size;
+	int ret;
+
+	ret = atl_alloc_qvec_intr(qvec);
+	if (ret)
+		return ret;
+
+	ret = atl_alloc_ring(&qvec->tx, sizeof(struct atl_txbuf), "tx");
+	if (ret)
+		goto free_irq;
+
+	ret = atl_alloc_ring(&qvec->rx, sizeof(struct atl_rxbuf), "rx");
+	if (ret)
+		goto free_tx;
+
+	for (txbuf = qvec->tx.txbufs; count; count--)
+		(txbuf++)->last = -1;
+
+	return 0;
+
+free_tx:
+	atl_free_ring(&qvec->tx);
+free_irq:
+	atl_free_qvec_intr(qvec);
+
+	return ret;
+}
+
+static void atl_free_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_desc_ring *rx = &qvec->rx;
+	struct atl_desc_ring *tx = &qvec->tx;
+
+	atl_free_rx_bufs(rx);
+	atl_free_ring(rx);
+
+	atl_free_ring(tx);
+	atl_free_qvec_intr(qvec);
+}
+
+int atl_alloc_rings(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+	int ret;
+
+	atl_for_each_qvec(nic, qvec) {
+		ret = atl_alloc_qvec(qvec);
+		if (ret)
+			goto free;
+	}
+
+	return 0;
+
+free:
+	while(--qvec >= &nic->qvecs[0])
+		atl_free_qvec(qvec);
+
+	return ret;
+}
+
+void atl_free_rings(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+
+	atl_for_each_qvec(nic, qvec)
+		atl_free_qvec(qvec);
+
+}
+
+static unsigned int atl_rx_mod_hyst = 10, atl_tx_mod_hyst = 10;
+module_param_named(rx_mod_hyst, atl_rx_mod_hyst, uint, 0644);
+module_param_named(tx_mod_hyst, atl_tx_mod_hyst, uint, 0644);
+
+static void atl_set_intr_mod_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_nic *nic = qvec->nic;
+	struct atl_hw *hw = &nic->hw;
+	unsigned int min, max;
+	int idx = qvec->idx;
+
+	min = nic->rx_intr_delay - atl_min_intr_delay;
+	max = min + atl_rx_mod_hyst;
+
+	atl_write(hw, ATL_RX_INTR_MOD_CTRL(idx),
+		(max / 2) << 0x10 | (min / 2) << 8 | 2);
+
+	min = nic->tx_intr_delay - atl_min_intr_delay;
+	max = min + atl_tx_mod_hyst;
+
+	atl_write(hw, ATL_TX_INTR_MOD_CTRL(idx),
+		(max / 2) << 0x10 | (min / 2) << 8 | 2);
+}
+
+void atl_set_intr_mod(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+
+	atl_for_each_qvec(nic, qvec)
+		atl_set_intr_mod_qvec(qvec);
+}
+
+static void atl_start_rx_ring(struct atl_desc_ring *ring)
+{
+	struct atl_hw *hw = &ring->qvec->nic->hw;
+	int idx = ring->qvec->idx;
+	unsigned int rx_ctl;
+
+	atl_write(hw, ATL_RING_BASE_LSW(ring), ring->hw.daddr);
+	atl_write(hw, ATL_RING_BASE_MSW(ring), ring->hw.daddr >> 32);
+
+	atl_write(hw, ATL_RX_RING_TAIL(ring), ring->tail);
+	atl_write(hw, ATL_RX_RING_BUF_SIZE(ring),
+		(ATL_RX_HDR_SIZE / 64) << 8 | ATL_RX_BUF_SIZE / 1024);
+	atl_write(hw, ATL_RX_RING_THRESH(ring), 8 << 0x10 | 24 << 0x18);
+
+	/* LRO */
+	atl_write_bits(hw, ATL_RX_LRO_PKT_LIM(idx),
+		(idx & 7) * 4, 2, 3);
+
+	/* Enable ring | VLAN offload | header split in non-linear mode */
+	rx_ctl = BIT(31) | BIT(29) | ring->hw.size |
+		(atl_rx_linear ? 0 : BIT(28));
+	atl_write(hw, ATL_RX_RING_CTL(ring), rx_ctl);
+}
+
+static void atl_start_tx_ring(struct atl_desc_ring *ring)
+{
+	struct atl_nic *nic = ring->qvec->nic;
+	struct atl_hw *hw = &nic->hw;
+
+	atl_write(hw, ATL_RING_BASE_LSW(ring), ring->hw.daddr);
+	atl_write(hw, ATL_RING_BASE_MSW(ring), ring->hw.daddr >> 32);
+
+	/* Enable TSO on all active Tx rings */
+	atl_write(hw, ATL_TX_LSO_CTRL, BIT(nic->nvecs) - 1);
+
+	atl_write(hw, ATL_TX_RING_TAIL(ring), ring->tail);
+	atl_write(hw, ATL_TX_RING_THRESH(ring), 8 << 8 | 8 << 0x10 |
+		24 << 0x18);
+	atl_write(hw, ATL_TX_RING_CTL(ring), BIT(31) | ring->hw.size);
+}
+
+static int atl_start_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_desc_ring *rx = &qvec->rx;
+	struct atl_desc_ring *tx = &qvec->tx;
+	struct atl_hw *hw = &qvec->nic->hw;
+	int intr = atl_qvec_intr(qvec);
+	struct atl_rxbuf *rxbuf;
+	int ret;
+
+	rx->head = rx->tail = atl_read(hw, ATL_RING_HEAD(rx)) & 0x1fff;
+	tx->head = tx->tail = atl_read(hw, ATL_RING_HEAD(tx)) & 0x1fff;
+
+	ret = atl_fill_rx(rx, ring_space(rx));
+	if (ret)
+		return ret;
+
+	rx->next_to_recycle = rx->tail;
+	/* rxbuf at ->next_to_recycle is always kept empty so that
+	 * atl_maybe_recycle_rxbuf() always have a spot to recyle into
+	 * without overwriting a pgref to an already allocated page,
+	 * leaking memory. It's also the guard element in the ring
+	 * that keeps ->tail from overrunning ->head. If it's nonempty
+	 * on ring init (e.g. after a sleep-wake cycle) just release
+	 * the pages. */
+	rxbuf = &rx->rxbufs[rx->next_to_recycle];
+	atl_put_rxpage(&rxbuf->head, qvec->dev);
+	atl_put_rxpage(&rxbuf->data, qvec->dev);
+
+	/* Map ring interrups into corresponding cause bit*/
+	atl_set_intr_bits(hw, qvec->idx, intr, intr);
+	atl_set_intr_throttle(qvec);
+
+	napi_enable(&qvec->napi);
+	atl_set_intr_mod_qvec(qvec);
+	atl_intr_enable(hw, BIT(atl_qvec_intr(qvec)));
+
+	atl_start_tx_ring(tx);
+	atl_start_rx_ring(rx);
+
+	return 0;
+}
+
+static void atl_stop_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_desc_ring *rx = &qvec->rx;
+	struct atl_desc_ring *tx = &qvec->tx;
+	struct atl_hw *hw = &qvec->nic->hw;
+
+	/* Disable and reset rings */
+	atl_write(hw, ATL_RING_CTL(rx), BIT(25));
+	atl_write(hw, ATL_RING_CTL(tx), BIT(25));
+	udelay(10);
+	atl_write(hw, ATL_RING_CTL(rx), 0);
+	atl_write(hw, ATL_RING_CTL(tx), 0);
+
+	atl_intr_disable(hw, BIT(atl_qvec_intr(qvec)));
+	napi_disable(&qvec->napi);
+
+	atl_clear_rx_bufs(rx);
+	atl_free_tx_bufs(tx);
+}
+
+static void atl_set_lro(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	uint32_t val = nic->ndev->features & NETIF_F_LRO ?
+		BIT(nic->nvecs) - 1 : 0;
+
+	atl_write_bits(hw, ATL_RX_LRO_CTRL1, 0, nic->nvecs, val);
+	atl_write_bits(hw, ATL_INTR_RSC_EN, 0, nic->nvecs, val);
+}
+
+int atl_start_rings(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	uint32_t mask;
+	struct atl_queue_vec *qvec;
+	int ret;
+
+	mask = BIT(nic->nvecs + ATL_NUM_NON_RING_IRQS) -
+		BIT(ATL_NUM_NON_RING_IRQS);
+	/* Enable auto-masking of ring interrupts on intr generation */
+	atl_set_bits(hw, ATL_INTR_AUTO_MASK, mask);
+	/* Enable status auto-clear on intr generation */
+	atl_set_bits(hw, ATL_INTR_AUTO_CLEAR, mask);
+
+	atl_set_lro(nic);
+	atl_set_rss_tbl(hw);
+
+	atl_for_each_qvec(nic, qvec) {
+		ret = atl_start_qvec(qvec);
+		if (ret)
+			goto stop;
+	}
+
+	return 0;
+
+stop:
+	while (--qvec >= &nic->qvecs[0])
+		atl_stop_qvec(qvec);
+
+	return ret;
+}
+
+void atl_stop_rings(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+	struct atl_hw *hw = &nic->hw;
+
+	atl_for_each_qvec(nic, qvec)
+		atl_stop_qvec(qvec);
+
+	atl_write_bit(hw, 0x5a00, 0, 1);
+	udelay(10);
+	atl_write_bit(hw, 0x5a00, 0, 0);
+}
+
+int atl_set_features(struct net_device *ndev, netdev_features_t features)
+{
+	netdev_features_t changed = ndev->features ^ features;
+
+	ndev->features = features;
+
+	if (changed & NETIF_F_LRO)
+		atl_set_lro(netdev_priv(ndev));
+
+	return 0;
+}
+
+void atl_get_ring_stats(struct atl_desc_ring *ring,
+	struct atl_ring_stats *stats)
+{
+	unsigned int start;
+
+	do {
+		start = u64_stats_fetch_begin_irq(&ring->syncp);
+		memcpy(stats, &ring->stats, sizeof(*stats));
+	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+}
+
+#define atl_add_stats(_dst, _src)				\
+do {								\
+	int i;							\
+	uint64_t *dst = (uint64_t *)(&(_dst));			\
+	uint64_t *src = (uint64_t *)(&(_src));			\
+								\
+	for (i = 0; i < sizeof(_dst) / sizeof(uint64_t); i++)	\
+		dst[i] += src[i];				\
+} while (0)
+
+void atl_update_global_stats(struct atl_nic *nic)
+{
+	int i;
+	struct atl_ring_stats stats;
+
+	memset(&stats, 0, sizeof(stats));
+	atl_update_eth_stats(nic);
+
+	spin_lock(&nic->stats_lock);
+
+	memset(&nic->stats.rx, 0, sizeof(nic->stats.rx));
+	memset(&nic->stats.tx, 0, sizeof(nic->stats.tx));
+
+
+	for (i = 0; i < nic->nvecs; i++) {
+		atl_get_ring_stats(&nic->qvecs[i].rx, &stats);
+		atl_add_stats(nic->stats.rx, stats.rx);
+
+		atl_get_ring_stats(&nic->qvecs[i].tx, &stats);
+		atl_add_stats(nic->stats.tx, stats.tx);
+	}
+
+	spin_unlock(&nic->stats_lock);
+}
+
+void atl_get_stats64(struct net_device *ndev,
+	struct rtnl_link_stats64 *nstats)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_global_stats *stats = &nic->stats;
+
+	atl_update_global_stats(nic);
+
+	nstats->rx_bytes = stats->rx.bytes;
+	nstats->rx_packets = stats->rx.packets;
+	nstats->tx_bytes = stats->tx.bytes;
+	nstats->tx_packets = stats->tx.packets;
+	nstats->rx_crc_errors = stats->rx.csum_err;
+	nstats->rx_frame_errors = stats->rx.mac_err;
+	nstats->rx_errors = nstats->rx_crc_errors + nstats->rx_frame_errors;
+	nstats->multicast = stats->rx.multicast;
+	nstats->tx_aborted_errors = stats->tx.dma_map_failed;
+	nstats->tx_errors = nstats->tx_aborted_errors;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h
new file mode 100644
index 0000000..bc433db
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h
@@ -0,0 +1,199 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_RING_H_
+#define _ATL_RING_H_
+
+#include <linux/compiler.h>
+
+#include "atl_common.h"
+#include "atl_desc.h"
+
+//#define ATL_RINGS_IN_UC_MEM
+
+#define ATL_TX_DESC_WB
+//#define ATL_TX_HEAD_WB
+
+#define ATL_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+#define ATL_RX_TAILROOM 64u
+#define ATL_RX_HEAD_ORDER 0
+#define ATL_RX_DATA_ORDER 0
+
+/* Header space in skb. Must be a multiple of L1_CACHE_BYTES */
+#define ATL_RX_HDR_SIZE 256u
+#define ATL_RX_HDR_OVRHD SKB_DATA_ALIGN(ATL_RX_HEADROOM +	\
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define ATL_RX_BUF_SIZE 2048
+
+#define ATL_MAX_RX_LINEAR_MTU (ATL_RX_BUF_SIZE - ETH_HLEN)
+
+#define ring_space(ring)						\
+	({								\
+		typeof(ring) __ring = (ring);				\
+		uint32_t space = READ_ONCE(__ring->head) -		\
+			READ_ONCE(__ring->tail) - 1;			\
+		(int32_t)space < 0 ? space + __ring->hw.size : space;	\
+	})
+
+#define ring_occupied(ring)						\
+	({								\
+		typeof(ring) __ring = (ring);				\
+		uint32_t occupied = READ_ONCE(__ring->tail) -		\
+			READ_ONCE(__ring->head);			\
+		(int32_t)occupied < 0 ? occupied + __ring->hw.size	\
+			: occupied;					\
+	})
+
+#define bump_ptr(ptr, ring, amount)					\
+	({								\
+		uint32_t __res = offset_ptr(ptr, ring, amount);		\
+		(ptr) = __res;						\
+		__res;							\
+	})
+
+/* These don't have to be atomic, because Tx tail is only adjusted
+ * in ndo->start_xmit which is serialized by the stack and the rest are
+ * only adjusted in NAPI poll which is serialized by NAPI */
+#define bump_tail(ring, amount) do {					\
+	uint32_t __ptr = READ_ONCE((ring)->tail);			\
+	WRITE_ONCE((ring)->tail, offset_ptr(__ptr, ring, amount));	\
+	} while (0)
+
+#define bump_head(ring, amount) do {					\
+	uint32_t __ptr = READ_ONCE((ring)->head);			\
+	WRITE_ONCE((ring)->head, offset_ptr(__ptr, ring, amount));	\
+	} while (0)
+
+struct atl_rxpage {
+	struct page *page;
+	dma_addr_t daddr;
+	unsigned mapcount; 	/* not atomic_t because accesses are
+				 * serialized by NAPI */
+	unsigned order;
+};
+
+struct atl_pgref {
+	struct atl_rxpage *rxpage;
+	unsigned pg_off;
+};
+
+struct atl_cb {
+	struct atl_pgref pgref;
+	bool head;
+};
+#define ATL_CB(skb) ((struct atl_cb *)(skb)->cb)
+
+struct atl_rxbuf {
+	struct sk_buff *skb;
+	struct atl_pgref head;
+	struct atl_pgref data;
+};
+
+struct atl_txbuf {
+	struct sk_buff *skb;
+	uint32_t last; /* index of eop descriptor */
+	unsigned bytes;
+	unsigned packets;
+	DEFINE_DMA_UNMAP_ADDR(daddr);
+	DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct atl_desc_ring {
+	struct atl_hw_ring hw;
+	uint32_t head, tail;
+	union {
+		/* Rx ring only */
+		uint32_t next_to_recycle;
+		/* Tx ring only, template desc for atl_map_tx_skb() */
+		union atl_desc desc;
+	};
+	union {
+		struct atl_rxbuf *rxbufs;
+		struct atl_txbuf *txbufs;
+		void *bufs;
+	};
+	struct atl_queue_vec *qvec;
+	struct u64_stats_sync syncp;
+	struct atl_ring_stats stats;
+};
+
+struct atl_queue_vec {
+	struct atl_desc_ring tx;
+	struct atl_desc_ring rx;
+	struct device *dev;	/* pdev->dev for DMA */
+	struct napi_struct napi;
+	struct atl_nic *nic;
+	unsigned idx;
+	char name[IFNAMSIZ + 10];
+#ifdef ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+	cpumask_t affinity_hint;
+#endif
+};
+
+#define atl_for_each_qvec(nic, qvec)				\
+	for (qvec = &(nic)->qvecs[0];				\
+	     qvec < &(nic)->qvecs[(nic)->nvecs]; qvec++)
+
+static inline struct atl_hw *ring_hw(struct atl_desc_ring *ring)
+{
+	return &ring->qvec->nic->hw;
+}
+
+static inline int atl_qvec_intr(struct atl_queue_vec *qvec)
+{
+	return qvec->idx + ATL_NUM_NON_RING_IRQS;
+}
+
+static inline void *atl_buf_vaddr(struct atl_pgref *pgref)
+{
+	return page_to_virt(pgref->rxpage->page) + pgref->pg_off;
+}
+
+static inline dma_addr_t atl_buf_daddr(struct atl_pgref *pgref)
+{
+	return pgref->rxpage->daddr + pgref->pg_off;
+}
+
+void atl_get_ring_stats(struct atl_desc_ring *ring,
+	struct atl_ring_stats *stats);
+
+#ifdef ATL_RINGS_IN_UC_MEM
+
+#define DECLARE_SCRATCH_DESC(_name) union atl_desc _name
+#define DESC_PTR(_ring, _idx, _scratch) (&(_scratch))
+#define COMMIT_DESC(_ring, _idx, _scratch)		\
+	WRITE_ONCE((_ring)->hw.descs[_idx], (_scratch))
+#define FETCH_DESC(_ring, _idx, _scratch)			\
+do {								\
+	(_scratch) = READ_ONCE((_ring)->hw.descs[_idx]);	\
+	dma_rmb();						\
+} while(0)
+
+#define DESC_RMB()
+
+#else // ATL_RINGS_IN_UC_MEM
+
+#define DECLARE_SCRATCH_DESC(_name)
+#define DESC_PTR(_ring, _idx, _scratch) (&(_ring)->hw.descs[_idx])
+#define COMMIT_DESC(_ring, _idx, _scratch)
+#define FETCH_DESC(_ring, _idx, _scratch)
+#define DESC_RMB() dma_rmb()
+
+#endif // ATL_RINGS_IN_UC_MEM
+
+#ifdef ATL_TX_HEAD_WB
+#error Head ptr writeback not implemented
+#elif !defined(ATL_TX_DESC_WB)
+static inline uint32_t atl_get_tx_head(struct atl_desc_ring *ring)
+{
+	return atl_read(ring_hw(ring), ATL_TX_RING_HEAD(ring->idx)) & 0x1fff;
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.c
new file mode 100644
index 0000000..70c5806
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.c
@@ -0,0 +1,14 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#define CREATE_TRACE_POINTS
+#include "atl_trace.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.h
new file mode 100644
index 0000000..e7333d5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.h
@@ -0,0 +1,130 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM atlnew
+
+#if !defined(_ATL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ATL_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "atl_desc.h"
+
+DECLARE_EVENT_CLASS(atl_dma_map_class,
+	TP_PROTO(int frag_idx, int ring_idx, dma_addr_t daddr, size_t size, struct sk_buff *skb,
+		void *vaddr),
+	TP_ARGS(frag_idx, ring_idx, daddr, size, skb, vaddr),
+	TP_STRUCT__entry(
+		__field(int, frag_idx)
+		__field(int, ring_idx)
+		__field(dma_addr_t, daddr)
+		__field(size_t, size)
+		__field(struct sk_buff *, skb)
+		__field(void *, vaddr)
+	),
+	TP_fast_assign(
+		__entry->frag_idx = frag_idx;
+		__entry->ring_idx = ring_idx;
+		__entry->daddr = daddr;
+		__entry->size = size;
+		__entry->skb = skb;
+		__entry->vaddr = vaddr;
+	),
+	TP_printk("idx %d ring idx %d daddr %pad len %#zx skb %p vaddr %p",
+		__entry->frag_idx, __entry->ring_idx, &__entry->daddr,
+		__entry->size, __entry->skb, __entry->vaddr)
+);
+
+#define DEFINE_MAP_EVENT(name)						\
+	DEFINE_EVENT(atl_dma_map_class, name,				\
+		TP_PROTO(int frag_idx, int ring_idx,			\
+			dma_addr_t daddr, size_t size,			\
+			struct sk_buff *skb, void *vaddr),		\
+		TP_ARGS(frag_idx, ring_idx, daddr, size, skb, vaddr))
+
+DEFINE_MAP_EVENT(atl_dma_map_head);
+DEFINE_MAP_EVENT(atl_dma_map_frag);
+DEFINE_MAP_EVENT(atl_dma_map_rxbuf);
+
+DECLARE_EVENT_CLASS(atl_dma_unmap_class,
+	TP_PROTO(int frag_idx, int ring_idx, dma_addr_t daddr, size_t size,
+		struct sk_buff *skb),
+	TP_ARGS(frag_idx, ring_idx, daddr, size, skb),
+	TP_STRUCT__entry(
+		__field(int, frag_idx)
+		__field(int, ring_idx)
+		__field(dma_addr_t, daddr)
+		__field(size_t, size)
+		__field(struct sk_buff *, skb)
+	),
+	TP_fast_assign(
+		__entry->frag_idx = frag_idx;
+		__entry->ring_idx = ring_idx;
+		__entry->daddr = daddr;
+		__entry->size = size;
+		__entry->skb = skb;
+	),
+	TP_printk("idx %d ring idx %d daddr %pad len %#zx skb %p",
+		__entry->frag_idx, __entry->ring_idx, &__entry->daddr,
+		__entry->size, __entry->skb)
+);
+
+#define DEFINE_UNMAP_EVENT(name)					\
+	DEFINE_EVENT(atl_dma_unmap_class, name,				\
+		TP_PROTO(int frag_idx, int ring_idx, dma_addr_t daddr,	\
+			size_t size, struct sk_buff *skb),		\
+		TP_ARGS(frag_idx, ring_idx, daddr, size, skb))
+
+DEFINE_UNMAP_EVENT(atl_dma_unmap_head);
+DEFINE_UNMAP_EVENT(atl_dma_unmap_frag);
+DEFINE_UNMAP_EVENT(atl_dma_unmap_rxbuf);
+
+TRACE_EVENT(atl_fill_rx_desc,
+	TP_PROTO(int ring_idx, struct atl_rx_desc *desc),
+	TP_ARGS(ring_idx, desc),
+	TP_STRUCT__entry(
+		__field(int, ring_idx)
+		__field(dma_addr_t, daddr)
+		__field(dma_addr_t, haddr)
+	),
+	TP_fast_assign(
+		__entry->ring_idx = ring_idx;
+		__entry->daddr = desc->daddr;
+		__entry->haddr = desc->haddr;
+	),
+	TP_printk("[%d] daddr %pad", __entry->ring_idx, &__entry->daddr)
+);
+
+TRACE_EVENT(atl_sync_rx_range,
+	TP_PROTO(int ring_idx, dma_addr_t daddr, unsigned long pg_off,
+		size_t size),
+	TP_ARGS(ring_idx, daddr, pg_off, size),
+	TP_STRUCT__entry(
+		__field(int, ring_idx)
+		__field(dma_addr_t, daddr)
+		__field(unsigned long, pg_off)
+		__field(size_t, size)
+	),
+	TP_fast_assign(
+		__entry->ring_idx = ring_idx;
+		__entry->daddr = daddr;
+		__entry->pg_off = pg_off;
+		__entry->size = size;
+	),
+	TP_printk("[%d] daddr %pad pg_off %#lx size %#zx", __entry->ring_idx,
+		&__entry->daddr, __entry->pg_off, __entry->size)
+);
+
+#endif /* _ATL_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef  TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE atl_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index c965e65..9939cca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -262,6 +262,8 @@
 		AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
 			       HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
 			       10, 1000U);
+		if (err)
+			return err;
 	}
 
 	if (self->rbl_enabled)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 7b6859e..fc16b2b 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -519,7 +519,6 @@
 				struct ethtool_wolinfo *wol)
 {
 	struct bcm_sysport_priv *priv = netdev_priv(dev);
-	u32 reg;
 
 	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
 	wol->wolopts = priv->wolopts;
@@ -527,11 +526,7 @@
 	if (!(priv->wolopts & WAKE_MAGICSECURE))
 		return;
 
-	/* Return the programmed SecureOn password */
-	reg = umac_readl(priv, UMAC_PSW_MS);
-	put_unaligned_be16(reg, &wol->sopass[0]);
-	reg = umac_readl(priv, UMAC_PSW_LS);
-	put_unaligned_be32(reg, &wol->sopass[2]);
+	memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
 }
 
 static int bcm_sysport_set_wol(struct net_device *dev,
@@ -547,13 +542,8 @@
 	if (wol->wolopts & ~supported)
 		return -EINVAL;
 
-	/* Program the SecureOn password */
-	if (wol->wolopts & WAKE_MAGICSECURE) {
-		umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
-			    UMAC_PSW_MS);
-		umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
-			    UMAC_PSW_LS);
-	}
+	if (wol->wolopts & WAKE_MAGICSECURE)
+		memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
 
 	/* Flag the device and relevant IRQ as wakeup capable */
 	if (wol->wolopts) {
@@ -2588,13 +2578,18 @@
 	unsigned int index, i = 0;
 	u32 reg;
 
-	/* Password has already been programmed */
 	reg = umac_readl(priv, UMAC_MPD_CTRL);
 	if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
 		reg |= MPD_EN;
 	reg &= ~PSW_EN;
-	if (priv->wolopts & WAKE_MAGICSECURE)
+	if (priv->wolopts & WAKE_MAGICSECURE) {
+		/* Program the SecureOn password */
+		umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
+			    UMAC_PSW_MS);
+		umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
+			    UMAC_PSW_LS);
 		reg |= PSW_EN;
+	}
 	umac_writel(priv, reg, UMAC_MPD_CTRL);
 
 	if (priv->wolopts & WAKE_FILTER) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 046c6c1..36e0adf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -12,6 +12,7 @@
 #define __BCM_SYSPORT_H
 
 #include <linux/bitmap.h>
+#include <linux/ethtool.h>
 #include <linux/if_vlan.h>
 #include <linux/net_dim.h>
 
@@ -776,6 +777,7 @@
 	unsigned int		crc_fwd:1;
 	u16			rev;
 	u32			wolopts;
+	u8			sopass[SOPASS_MAX];
 	unsigned int		wol_irq_disabled:1;
 
 	/* MIB related fields */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0de487a..3db54b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1282,6 +1282,7 @@
 	BNX2X_SP_RTNL_TX_STOP,
 	BNX2X_SP_RTNL_GET_DRV_VERSION,
 	BNX2X_SP_RTNL_CHANGE_UDP_PORT,
+	BNX2X_SP_RTNL_UPDATE_SVID,
 };
 
 enum bnx2x_iov_flag {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index fcc2328..a585f10 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2925,6 +2925,10 @@
 	func_params.f_obj = &bp->func_obj;
 	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
 
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
 	if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
 		int func = BP_ABS_FUNC(bp);
 		u32 val;
@@ -4301,7 +4305,8 @@
 				bnx2x_handle_eee_event(bp);
 
 			if (val & DRV_STATUS_OEM_UPDATE_SVID)
-				bnx2x_handle_update_svid_cmd(bp);
+				bnx2x_schedule_sp_rtnl(bp,
+					BNX2X_SP_RTNL_UPDATE_SVID, 0);
 
 			if (bp->link_vars.periodic_flags &
 			    PERIODIC_FLAGS_LINK_EVENT) {
@@ -8462,6 +8467,7 @@
 	/* Fill a user request section if needed */
 	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
 		ramrod_param.user_req.u.vlan.vlan = vlan;
+		__set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
 		/* Set the command: ADD or DEL */
 		if (set)
 			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
@@ -8482,6 +8488,27 @@
 	return rc;
 }
 
+static int bnx2x_del_all_vlans(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
+	unsigned long ramrod_flags = 0, vlan_flags = 0;
+	struct bnx2x_vlan_entry *vlan;
+	int rc;
+
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	__set_bit(BNX2X_VLAN, &vlan_flags);
+	rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
+	if (rc)
+		return rc;
+
+	/* Mark that hw forgot all entries */
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		vlan->hw = false;
+	bp->vlan_cnt = 0;
+
+	return 0;
+}
+
 int bnx2x_del_all_macs(struct bnx2x *bp,
 		       struct bnx2x_vlan_mac_obj *mac_obj,
 		       int mac_type, bool wait_for_comp)
@@ -9320,6 +9347,17 @@
 		BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
 			  rc);
 
+	/* The whole *vlan_obj structure may be not initialized if VLAN
+	 * filtering offload is not supported by hardware. Currently this is
+	 * true for all hardware covered by CHIP_IS_E1x().
+	 */
+	if (!CHIP_IS_E1x(bp)) {
+		/* Remove all currently configured VLANs */
+		rc = bnx2x_del_all_vlans(bp);
+		if (rc < 0)
+			BNX2X_ERR("Failed to delete all VLANs\n");
+	}
+
 	/* Disable LLH */
 	if (!CHIP_IS_E1(bp))
 		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
@@ -10349,6 +10387,9 @@
 			       &bp->sp_rtnl_state))
 		bnx2x_update_mng_version(bp);
 
+	if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
+		bnx2x_handle_update_svid_cmd(bp);
+
 	if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
 			       &bp->sp_rtnl_state)) {
 		if (bnx2x_udp_port_update(bp)) {
@@ -11740,8 +11781,10 @@
 	 * If maximum allowed number of connections is zero -
 	 * disable the feature.
 	 */
-	if (!bp->cnic_eth_dev.max_fcoe_conn)
+	if (!bp->cnic_eth_dev.max_fcoe_conn) {
 		bp->flags |= NO_FCOE_FLAG;
+		eth_zero_addr(bp->fip_mac);
+	}
 }
 
 static void bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -13014,13 +13057,6 @@
 
 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
 {
-	struct bnx2x_vlan_entry *vlan;
-
-	/* The hw forgot all entries after reload */
-	list_for_each_entry(vlan, &bp->vlan_reg, link)
-		vlan->hw = false;
-	bp->vlan_cnt = 0;
-
 	/* Don't set rx mode here. Our caller will do it. */
 	bnx2x_vlan_configure(bp, false);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 0bf2fd4..7a6e82d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -265,6 +265,7 @@
 	BNX2X_ETH_MAC,
 	BNX2X_ISCSI_ETH_MAC,
 	BNX2X_NETQ_ETH_MAC,
+	BNX2X_VLAN,
 	BNX2X_DONT_CONSUME_CAM_CREDIT,
 	BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
 };
@@ -272,7 +273,8 @@
 #define BNX2X_VLAN_MAC_CMP_MASK	(1 << BNX2X_UC_LIST_MAC | \
 				 1 << BNX2X_ETH_MAC | \
 				 1 << BNX2X_ISCSI_ETH_MAC | \
-				 1 << BNX2X_NETQ_ETH_MAC)
+				 1 << BNX2X_NETQ_ETH_MAC | \
+				 1 << BNX2X_VLAN)
 #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
 	((flags) & BNX2X_VLAN_MAC_CMP_MASK)
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e2d9254..1fdaf86b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -463,6 +463,12 @@
 	}
 
 	length >>= 9;
+	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
+		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
+				     skb->len);
+		i = 0;
+		goto tx_dma_error;
+	}
 	flags |= bnxt_lhint_arr[length];
 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 
@@ -6073,23 +6079,26 @@
 int bnxt_reserve_rings(struct bnxt *bp)
 {
 	int tcs = netdev_get_num_tc(bp->dev);
+	bool reinit_irq = false;
 	int rc;
 
 	if (!bnxt_need_reserve_rings(bp))
 		return 0;
 
-	rc = __bnxt_reserve_rings(bp);
-	if (rc) {
-		netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
-		return rc;
-	}
 	if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
 		bnxt_ulp_irq_stop(bp);
 		bnxt_clear_int_mode(bp);
-		rc = bnxt_init_int_mode(bp);
+		reinit_irq = true;
+	}
+	rc = __bnxt_reserve_rings(bp);
+	if (reinit_irq) {
+		if (!rc)
+			rc = bnxt_init_int_mode(bp);
 		bnxt_ulp_irq_restart(bp, rc);
-		if (rc)
-			return rc;
+	}
+	if (rc) {
+		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
+		return rc;
 	}
 	if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
 		netdev_err(bp->dev, "tx ring reservation failure\n");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 3d45f4c..9bbaad9 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -643,6 +643,7 @@
 #define MACB_CAPS_JUMBO				0x00000020
 #define MACB_CAPS_GEM_HAS_PTP			0x00000040
 #define MACB_CAPS_BD_RD_PREFETCH		0x00000080
+#define MACB_CAPS_NEEDS_RSTONUBR		0x00000100
 #define MACB_CAPS_FIFO_MODE			0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE	0x20000000
 #define MACB_CAPS_SG_DISABLED			0x40000000
@@ -1214,6 +1215,8 @@
 
 	int	rx_bd_rd_prefetch;
 	int	tx_bd_rd_prefetch;
+
+	u32	rx_intr_mask;
 };
 
 #ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 58b9744..8abea1c 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -56,12 +56,12 @@
 /* level of occupied TX descriptors under which we wake up TX process */
 #define MACB_TX_WAKEUP_THRESH(bp)	(3 * (bp)->tx_ring_size / 4)
 
-#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
-				 | MACB_BIT(ISR_ROVR))
+#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
 #define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 					| MACB_BIT(ISR_RLE)		\
 					| MACB_BIT(TXERR))
-#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)	\
+					| MACB_BIT(TXUBR))
 
 /* Max length of transmit frame must be a multiple of 8 bytes */
 #define MACB_TX_LEN_ALIGN	8
@@ -681,6 +681,11 @@
 	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
 		desc_64 = macb_64b_desc(bp, desc);
 		desc_64->addrh = upper_32_bits(addr);
+		/* The low bits of RX address contain the RX_USED bit, clearing
+		 * of which allows packet RX. Make sure the high bits are also
+		 * visible to HW at that point.
+		 */
+		dma_wmb();
 	}
 #endif
 	desc->addr = lower_32_bits(addr);
@@ -929,14 +934,19 @@
 
 			if (entry == bp->rx_ring_size - 1)
 				paddr |= MACB_BIT(RX_WRAP);
-			macb_set_addr(bp, desc, paddr);
 			desc->ctrl = 0;
+			/* Setting addr clears RX_USED and allows reception,
+			 * make sure ctrl is cleared first to avoid a race.
+			 */
+			dma_wmb();
+			macb_set_addr(bp, desc, paddr);
 
 			/* properly align Ethernet header */
 			skb_reserve(skb, NET_IP_ALIGN);
 		} else {
-			desc->addr &= ~MACB_BIT(RX_USED);
 			desc->ctrl = 0;
+			dma_wmb();
+			desc->addr &= ~MACB_BIT(RX_USED);
 		}
 	}
 
@@ -990,11 +1000,15 @@
 
 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
 		addr = macb_get_addr(bp, desc);
-		ctrl = desc->ctrl;
 
 		if (!rxused)
 			break;
 
+		/* Ensure ctrl is at least as up-to-date as rxused */
+		dma_rmb();
+
+		ctrl = desc->ctrl;
+
 		queue->rx_tail++;
 		count++;
 
@@ -1169,11 +1183,14 @@
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
 
-		ctrl = desc->ctrl;
-
 		if (!(desc->addr & MACB_BIT(RX_USED)))
 			break;
 
+		/* Ensure ctrl is at least as up-to-date as addr */
+		dma_rmb();
+
+		ctrl = desc->ctrl;
+
 		if (ctrl & MACB_BIT(RX_SOF)) {
 			if (first_frag != -1)
 				discard_partial_frame(queue, first_frag, tail);
@@ -1253,7 +1270,7 @@
 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 			napi_reschedule(napi);
 		} else {
-			queue_writel(queue, IER, MACB_RX_INT_FLAGS);
+			queue_writel(queue, IER, bp->rx_intr_mask);
 		}
 	}
 
@@ -1271,7 +1288,7 @@
 	u32 ctrl;
 
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-		queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+		queue_writel(queue, IDR, bp->rx_intr_mask |
 					 MACB_TX_INT_FLAGS |
 					 MACB_BIT(HRESP));
 	}
@@ -1301,7 +1318,7 @@
 
 		/* Enable interrupts */
 		queue_writel(queue, IER,
-			     MACB_RX_INT_FLAGS |
+			     bp->rx_intr_mask |
 			     MACB_TX_INT_FLAGS |
 			     MACB_BIT(HRESP));
 	}
@@ -1313,6 +1330,21 @@
 	netif_tx_start_all_queues(dev);
 }
 
+static void macb_tx_restart(struct macb_queue *queue)
+{
+	unsigned int head = queue->tx_head;
+	unsigned int tail = queue->tx_tail;
+	struct macb *bp = queue->bp;
+
+	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+		queue_writel(queue, ISR, MACB_BIT(TXUBR));
+
+	if (head == tail)
+		return;
+
+	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+}
+
 static irqreturn_t macb_interrupt(int irq, void *dev_id)
 {
 	struct macb_queue *queue = dev_id;
@@ -1340,14 +1372,14 @@
 			    (unsigned int)(queue - bp->queues),
 			    (unsigned long)status);
 
-		if (status & MACB_RX_INT_FLAGS) {
+		if (status & bp->rx_intr_mask) {
 			/* There's no point taking any more interrupts
 			 * until we have processed the buffers. The
 			 * scheduling call may fail if the poll routine
 			 * is already scheduled, so disable interrupts
 			 * now.
 			 */
-			queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
+			queue_writel(queue, IDR, bp->rx_intr_mask);
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 
@@ -1370,6 +1402,9 @@
 		if (status & MACB_BIT(TCOMP))
 			macb_tx_interrupt(queue);
 
+		if (status & MACB_BIT(TXUBR))
+			macb_tx_restart(queue);
+
 		/* Link change detection isn't possible with RMII, so we'll
 		 * add that if/when we get our hands on a full-blown MII PHY.
 		 */
@@ -1377,8 +1412,9 @@
 		/* There is a hardware issue under heavy load where DMA can
 		 * stop, this causes endless "used buffer descriptor read"
 		 * interrupts but it can be cleared by re-enabling RX. See
-		 * the at91 manual, section 41.3.1 or the Zynq manual
-		 * section 16.7.4 for details.
+		 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
+		 * section 16.7.4 for details. RXUBR is only enabled for
+		 * these two versions.
 		 */
 		if (status & MACB_BIT(RXUBR)) {
 			ctrl = macb_readl(bp, NCR);
@@ -2228,7 +2264,7 @@
 
 		/* Enable interrupts */
 		queue_writel(queue, IER,
-			     MACB_RX_INT_FLAGS |
+			     bp->rx_intr_mask |
 			     MACB_TX_INT_FLAGS |
 			     MACB_BIT(HRESP));
 	}
@@ -3876,6 +3912,7 @@
 };
 
 static const struct macb_config emac_config = {
+	.caps = MACB_CAPS_NEEDS_RSTONUBR,
 	.clk_init = at91ether_clk_init,
 	.init = at91ether_init,
 };
@@ -3897,7 +3934,8 @@
 };
 
 static const struct macb_config zynq_config = {
-	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
+	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
+		MACB_CAPS_NEEDS_RSTONUBR,
 	.dma_burst_length = 16,
 	.clk_init = macb_clk_init,
 	.init = macb_init,
@@ -4052,6 +4090,10 @@
 						macb_dma_desc_get_size(bp);
 	}
 
+	bp->rx_intr_mask = MACB_RX_INT_FLAGS;
+	if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
+		bp->rx_intr_mask |= MACB_BIT(RXUBR);
+
 	mac = of_get_mac_address(np);
 	if (mac) {
 		ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index cd5296b8..a6dc47e 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -319,6 +319,8 @@
 	desc_ptp = macb_ptp_desc(queue->bp, desc);
 	tx_timestamp = &queue->tx_timestamps[head];
 	tx_timestamp->skb = skb;
+	/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
+	dma_rmb();
 	tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
 	tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
 	/* move head */
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 60641e20..9a7f70d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1434,7 +1434,8 @@
 		 * csum is correct or is zero.
 		 */
 		if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
-		    tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
+		    tcp_udp_csum_ok && outer_csum_ok &&
+		    (ipv4_csum_ok || ipv6)) {
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 			skb->csum_level = encap;
 		}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 65a22cd..029730b 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2052,6 +2052,7 @@
 	bool nonlinear = skb_is_nonlinear(skb);
 	struct rtnl_link_stats64 *percpu_stats;
 	struct dpaa_percpu_priv *percpu_priv;
+	struct netdev_queue *txq;
 	struct dpaa_priv *priv;
 	struct qm_fd fd;
 	int offset = 0;
@@ -2101,6 +2102,11 @@
 	if (unlikely(err < 0))
 		goto skb_to_fd_failed;
 
+	txq = netdev_get_tx_queue(net_dev, queue_mapping);
+
+	/* LLTX requires to do our own update of trans_start */
+	txq->trans_start = jiffies;
+
 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
 		fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index bc6eb30..41c6fa20 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -928,7 +928,7 @@
 	hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
 
 	/* Create element to be added to the driver hash table */
-	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+	hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
 	if (!hash_entry)
 		return -ENOMEM;
 	hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 4070593..f75b9c1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -553,7 +553,7 @@
 	hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
 
 	/* Create element to be added to the driver hash table */
-	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+	hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
 	if (!hash_entry)
 		return -ENOMEM;
 	hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 22a817d..1e2b53a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1888,6 +1888,8 @@
 	u16 i, j;
 	u8 __iomem *bd;
 
+	netdev_reset_queue(ugeth->ndev);
+
 	ug_info = ugeth->ug_info;
 	uf_info = &ug_info->uf_info;
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index b52029e..a78bfaf 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -147,12 +147,10 @@
 	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
 	int i;
 
-	vf_cb->mac_cb	 = NULL;
-
-	kfree(vf_cb);
-
 	for (i = 0; i < handle->q_num; i++)
 		hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+
+	kfree(vf_cb);
 }
 
 static int hns_ae_wait_flow_down(struct hnae_handle *handle)
@@ -379,6 +377,9 @@
 
 	hns_ae_ring_enable_all(handle, 0);
 
+	/* clean rx fbd. */
+	hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
+
 	(void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 09e4061..aa2c25d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -67,11 +67,14 @@
 	struct mac_driver *drv = (struct mac_driver *)mac_drv;
 
 	/*enable GE rX/tX */
-	if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+	if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
 		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
 
-	if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+	if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+		/* enable rx pcs */
+		dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
 		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+	}
 }
 
 static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
@@ -79,11 +82,14 @@
 	struct mac_driver *drv = (struct mac_driver *)mac_drv;
 
 	/*disable GE rX/tX */
-	if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+	if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
 		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
 
-	if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+	if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+		/* disable rx pcs */
+		dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
 		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+	}
 }
 
 /* hns_gmac_get_en - get port enable
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 6ed6f14..cfdc92d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -778,6 +778,17 @@
 	return rc;
 }
 
+static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
+{
+	if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
+		return;
+
+	phy_device_remove(mac_cb->phy_dev);
+	phy_device_free(mac_cb->phy_dev);
+
+	mac_cb->phy_dev = NULL;
+}
+
 #define MAC_MEDIA_TYPE_MAX_LEN		16
 
 static const struct {
@@ -1117,7 +1128,11 @@
 	int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
 
 	for (i = 0; i < max_port_num; i++) {
+		if (!dsaf_dev->mac_cb[i])
+			continue;
+
 		dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
+		hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
 		dsaf_dev->mac_cb[i] = NULL;
 	}
 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index e557a4e..3b9e74b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -935,6 +935,62 @@
 }
 
 /**
+ * hns_dsaf_tcam_uc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
+				       u32 address,
+				       struct dsaf_tbl_tcam_data *tcam_data,
+				       struct dsaf_tbl_tcam_data *tcam_mask,
+				       struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
+{
+	spin_lock_bh(&dsaf_dev->tcam_lock);
+	hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+	hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+	hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
+	hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+	hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+	/*Restore Match Data*/
+	tcam_mask->tbl_tcam_data_high = 0xffffffff;
+	tcam_mask->tbl_tcam_data_low = 0xffffffff;
+	hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+	spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mask
+ * @ptbl_tcam_mcast
+ */
+static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
+				       u32 address,
+				       struct dsaf_tbl_tcam_data *tcam_data,
+				       struct dsaf_tbl_tcam_data *tcam_mask,
+				       struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
+{
+	spin_lock_bh(&dsaf_dev->tcam_lock);
+	hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+	hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+	hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
+	hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+	hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+	/*Restore Match Data*/
+	tcam_mask->tbl_tcam_data_high = 0xffffffff;
+	tcam_mask->tbl_tcam_data_low = 0xffffffff;
+	hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+	spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
  * hns_dsaf_tcam_mc_invld - INT
  * @dsaf_id: dsa fabric id
  * @address
@@ -1493,6 +1549,27 @@
 }
 
 /**
+ * hns_dsaf_find_empty_mac_entry_reverse
+ * search dsa fabric soft empty-entry from the end
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
+{
+	struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+	int i;
+
+	soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
+	for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
+		/* search all entry from end to start.*/
+		if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+			return i;
+		soft_mac_entry--;
+	}
+	return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
  * hns_dsaf_set_mac_key - set mac key
  * @dsaf_dev: dsa fabric device struct pointer
  * @mac_key: tcam key pointer
@@ -2166,9 +2243,9 @@
 		DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
 
 	hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
-		DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+		DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
 	hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
-		DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+		DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
 
 	/* pfc pause frame statistics stored in dsaf inode*/
 	if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
@@ -2285,237 +2362,237 @@
 				DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
 		p[223 + i] = dsaf_read_dev(ddev,
 				DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
-		p[224 + i] = dsaf_read_dev(ddev,
+		p[226 + i] = dsaf_read_dev(ddev,
 				DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
 	}
 
-	p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+	p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
 
 	for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
 		j = i * DSAF_COMM_CHN + port;
-		p[228 + i] = dsaf_read_dev(ddev,
+		p[230 + i] = dsaf_read_dev(ddev,
 				DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
 	}
 
-	p[231] = dsaf_read_dev(ddev,
-		DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+	p[233] = dsaf_read_dev(ddev,
+		DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
 
 	/* dsaf inode registers */
 	for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
 		j = i * DSAF_COMM_CHN + port;
-		p[232 + i] = dsaf_read_dev(ddev,
+		p[234 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_CFG_REG_0_REG + j * 0x80);
-		p[235 + i] = dsaf_read_dev(ddev,
+		p[237 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
-		p[238 + i] = dsaf_read_dev(ddev,
+		p[240 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
-		p[241 + i] = dsaf_read_dev(ddev,
+		p[243 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
-		p[244 + i] = dsaf_read_dev(ddev,
+		p[246 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
-		p[245 + i] = dsaf_read_dev(ddev,
+		p[249 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
-		p[248 + i] = dsaf_read_dev(ddev,
+		p[252 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
-		p[251 + i] = dsaf_read_dev(ddev,
+		p[255 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
-		p[254 + i] = dsaf_read_dev(ddev,
+		p[258 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
-		p[257 + i] = dsaf_read_dev(ddev,
+		p[261 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
-		p[260 + i] = dsaf_read_dev(ddev,
+		p[264 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_INER_ST_0_REG + j * 0x80);
-		p[263 + i] = dsaf_read_dev(ddev,
+		p[267 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
-		p[266 + i] = dsaf_read_dev(ddev,
+		p[270 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
-		p[269 + i] = dsaf_read_dev(ddev,
+		p[273 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
-		p[272 + i] = dsaf_read_dev(ddev,
+		p[276 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
-		p[275 + i] = dsaf_read_dev(ddev,
+		p[279 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
-		p[278 + i] = dsaf_read_dev(ddev,
+		p[282 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
-		p[281 + i] = dsaf_read_dev(ddev,
+		p[285 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
-		p[284 + i] = dsaf_read_dev(ddev,
+		p[288 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
-		p[287 + i] = dsaf_read_dev(ddev,
+		p[291 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
-		p[290 + i] = dsaf_read_dev(ddev,
+		p[294 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
-		p[293 + i] = dsaf_read_dev(ddev,
+		p[297 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
-		p[296 + i] = dsaf_read_dev(ddev,
+		p[300 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
-		p[299 + i] = dsaf_read_dev(ddev,
+		p[303 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
-		p[302 + i] = dsaf_read_dev(ddev,
+		p[306 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
-		p[305 + i] = dsaf_read_dev(ddev,
+		p[309 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
-		p[308 + i] = dsaf_read_dev(ddev,
+		p[312 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
 	}
 
 	/* dsaf onode registers */
 	for (i = 0; i < DSAF_XOD_NUM; i++) {
-		p[311 + i] = dsaf_read_dev(ddev,
+		p[315 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
-		p[319 + i] = dsaf_read_dev(ddev,
+		p[323 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
-		p[327 + i] = dsaf_read_dev(ddev,
+		p[331 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
-		p[335 + i] = dsaf_read_dev(ddev,
+		p[339 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
-		p[343 + i] = dsaf_read_dev(ddev,
+		p[347 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
-		p[351 + i] = dsaf_read_dev(ddev,
+		p[355 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
 	}
 
-	p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
-	p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
-	p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+	p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+	p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+	p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
 
 	for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
 		j = i * DSAF_COMM_CHN + port;
-		p[362 + i] = dsaf_read_dev(ddev,
+		p[366 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_GNT_L_0_REG + j * 0x90);
-		p[365 + i] = dsaf_read_dev(ddev,
+		p[369 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_GNT_H_0_REG + j * 0x90);
-		p[368 + i] = dsaf_read_dev(ddev,
+		p[372 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
-		p[371 + i] = dsaf_read_dev(ddev,
+		p[375 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
-		p[374 + i] = dsaf_read_dev(ddev,
+		p[378 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
-		p[377 + i] = dsaf_read_dev(ddev,
+		p[381 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
-		p[380 + i] = dsaf_read_dev(ddev,
+		p[384 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
-		p[383 + i] = dsaf_read_dev(ddev,
+		p[387 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
-		p[386 + i] = dsaf_read_dev(ddev,
+		p[390 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
-		p[389 + i] = dsaf_read_dev(ddev,
+		p[393 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
 	}
 
-	p[392] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
-	p[393] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
-	p[394] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
-	p[395] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
 	p[396] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
 	p[397] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
 	p[398] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
 	p[399] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
 	p[400] = dsaf_read_dev(ddev,
-		DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
 	p[401] = dsaf_read_dev(ddev,
-		DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
 	p[402] = dsaf_read_dev(ddev,
-		DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
 	p[403] = dsaf_read_dev(ddev,
-		DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
 	p[404] = dsaf_read_dev(ddev,
+		DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+	p[405] = dsaf_read_dev(ddev,
+		DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+	p[406] = dsaf_read_dev(ddev,
+		DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+	p[407] = dsaf_read_dev(ddev,
+		DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+	p[408] = dsaf_read_dev(ddev,
 		DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
 
 	/* dsaf voq registers */
 	for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
 		j = (i * DSAF_COMM_CHN + port) * 0x90;
-		p[405 + i] = dsaf_read_dev(ddev,
+		p[409 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
-		p[408 + i] = dsaf_read_dev(ddev,
+		p[412 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
-		p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
-		p[414 + i] = dsaf_read_dev(ddev,
+		p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+		p[418 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
-		p[417 + i] = dsaf_read_dev(ddev,
+		p[421 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
-		p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
-		p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
-		p[426 + i] = dsaf_read_dev(ddev,
+		p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+		p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+		p[430 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
-		p[429 + i] = dsaf_read_dev(ddev,
+		p[433 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
-		p[432 + i] = dsaf_read_dev(ddev,
+		p[436 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
-		p[435 + i] = dsaf_read_dev(ddev,
+		p[439 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
-		p[438 + i] = dsaf_read_dev(ddev,
+		p[442 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_BP_ALL_THRD_0_REG + j);
 	}
 
 	/* dsaf tbl registers */
-	p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
-	p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
-	p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
-	p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
-	p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
-	p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
-	p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
-	p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
-	p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
-	p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
-	p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
-	p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
-	p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
-	p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
-	p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
-	p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
-	p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
-	p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
-	p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
-	p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
-	p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
-	p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
-	p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+	p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+	p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+	p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+	p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+	p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+	p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+	p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+	p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+	p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+	p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+	p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+	p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+	p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+	p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+	p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+	p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+	p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+	p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+	p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+	p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+	p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+	p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+	p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
 
 	for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
 		j = i * 0x8;
-		p[464 + 2 * i] = dsaf_read_dev(ddev,
+		p[468 + 2 * i] = dsaf_read_dev(ddev,
 			DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
-		p[465 + 2 * i] = dsaf_read_dev(ddev,
+		p[469 + 2 * i] = dsaf_read_dev(ddev,
 			DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
 	}
 
-	p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
-	p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
-	p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
-	p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
-	p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
-	p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
-	p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
-	p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
-	p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
-	p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
-	p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
-	p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+	p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+	p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+	p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+	p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+	p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+	p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+	p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+	p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+	p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+	p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+	p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+	p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
 
 	/* dsaf other registers */
-	p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
-	p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
-	p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
-	p[495] = dsaf_read_dev(ddev,
+	p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+	p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+	p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+	p[499] = dsaf_read_dev(ddev,
 		DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
-	p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
-	p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+	p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+	p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
 
 	if (!is_ver1)
-		p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+		p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
 
 	/* mark end of dsaf regs */
-	for (i = 499; i < 504; i++)
+	for (i = 503; i < 504; i++)
 		p[i] = 0xdddddddd;
 }
 
@@ -2673,58 +2750,156 @@
 	return DSAF_DUMP_REGS_NUM;
 }
 
+static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
+{
+	struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
+	struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+	struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
+	struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+	struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
+	struct dsaf_drv_mac_single_dest_entry mask_entry;
+	struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+	struct hns_mac_cb *mac_cb;
+	u8 addr[ETH_ALEN] = {0};
+	u8 port_num;
+	u16 mskid;
+
+	/* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+	if (entry_index != DSAF_INVALID_ENTRY_IDX)
+		return;
+
+	/* put promisc tcam entry in the end. */
+	/* 1. set promisc unicast vague tcam entry. */
+	entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		dev_err(dsaf_dev->dev,
+			"enable uc promisc failed (port:%#x)\n",
+			port);
+		return;
+	}
+
+	mac_cb = dsaf_dev->mac_cb[port];
+	(void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
+	tbl_tcam_ucast.tbl_ucast_out_port = port_num;
+
+	/* config uc vague table */
+	hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+				   &tbl_tcam_mask_uc, &tbl_tcam_ucast);
+
+	/* update software entry */
+	soft_mac_entry = priv->soft_mac_tbl;
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = entry_index;
+	soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+	soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+	/* step back to the START for mc. */
+	soft_mac_entry = priv->soft_mac_tbl;
+
+	/* 2. set promisc multicast vague tcam entry. */
+	entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		dev_err(dsaf_dev->dev,
+			"enable mc promisc failed (port:%#x)\n",
+			port);
+		return;
+	}
+
+	memset(&mask_entry, 0x0, sizeof(mask_entry));
+	memset(&mask_key, 0x0, sizeof(mask_key));
+	memset(&temp_key, 0x0, sizeof(temp_key));
+	mask_entry.addr[0] = 0x01;
+	hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
+			     port, mask_entry.addr);
+	tbl_tcam_mcast.tbl_mcast_item_vld = 1;
+	tbl_tcam_mcast.tbl_mcast_old_en = 0;
+
+	if (port < DSAF_SERVICE_NW_NUM) {
+		mskid = port;
+	} else if (port >= DSAF_BASE_INNER_PORT_NUM) {
+		mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+	} else {
+		dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
+			dsaf_dev->ae_dev.name, port,
+			mask_key.high.val, mask_key.low.val);
+		return;
+	}
+
+	dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+		     mskid % 32, 1);
+	memcpy(&temp_key, &mask_key, sizeof(mask_key));
+	hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+				   (struct dsaf_tbl_tcam_data *)(&mask_key),
+				   &tbl_tcam_mcast);
+
+	/* update software entry */
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = entry_index;
+	soft_mac_entry->tcam_key.high.val = temp_key.high.val;
+	soft_mac_entry->tcam_key.low.val = temp_key.low.val;
+}
+
+static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
+{
+	struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+	struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
+	struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+	struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
+	struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+	u8 addr[ETH_ALEN] = {0};
+
+	/* 1. delete uc vague tcam entry. */
+	/* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+	if (entry_index == DSAF_INVALID_ENTRY_IDX)
+		return;
+
+	/* config uc vague table */
+	hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+				   &tbl_tcam_mask, &tbl_tcam_ucast);
+	/* update soft management table. */
+	soft_mac_entry = priv->soft_mac_tbl;
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+	/* step back to the START for mc. */
+	soft_mac_entry = priv->soft_mac_tbl;
+
+	/* 2. delete mc vague tcam entry. */
+	addr[0] = 0x01;
+	memset(&mac_key, 0x0, sizeof(mac_key));
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+	if (entry_index == DSAF_INVALID_ENTRY_IDX)
+		return;
+
+	/* config mc vague table */
+	hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+				   &tbl_tcam_mask, &tbl_tcam_mcast);
+	/* update soft management table. */
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+}
+
 /* Reserve the last TCAM entry for promisc support */
-#define dsaf_promisc_tcam_entry(port) \
-	(DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
 void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
 			       u32 port, bool enable)
 {
-	struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
-	struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
-	u16 entry_index;
-	struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
-	struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
-
-	if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
-		return;
-
-	/* find the tcam entry index for promisc */
-	entry_index = dsaf_promisc_tcam_entry(port);
-
-	memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
-	memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
-
-	/* config key mask */
-	if (enable) {
-		dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
-			       DSAF_TBL_TCAM_KEY_PORT_M,
-			       DSAF_TBL_TCAM_KEY_PORT_S, port);
-		dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
-			       DSAF_TBL_TCAM_KEY_PORT_M,
-			       DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
-
-		/* SUB_QID */
-		dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
-			     DSAF_SERVICE_NW_NUM, true);
-		mac_data.tbl_mcast_item_vld = true;	/* item_vld bit */
-	} else {
-		mac_data.tbl_mcast_item_vld = false;	/* item_vld bit */
-	}
-
-	dev_dbg(dsaf_dev->dev,
-		"set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
-		dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
-		tbl_tcam_data.low.val, entry_index);
-
-	/* config promisc entry with mask */
-	hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
-			     (struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
-			     (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
-			     &mac_data);
-
-	/* config software entry */
-	soft_mac_entry += entry_index;
-	soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
+	if (enable)
+		set_promisc_tcam_enable(dsaf_dev, port);
+	else
+		set_promisc_tcam_disable(dsaf_dev, port);
 }
 
 int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 74d935d..b9733b0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -176,7 +176,7 @@
 #define DSAF_INODE_IN_DATA_STP_DISC_0_REG	0x1A50
 #define DSAF_INODE_GE_FC_EN_0_REG		0x1B00
 #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG		0x1B50
-#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG		0x1C00
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG		0x103C
 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG	0x1C00
 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET	0x100
 #define DSAF_INODE_IN_PRIO_PAUSE_OFFSET		0x50
@@ -404,11 +404,11 @@
 #define RCB_ECC_ERR_ADDR4_REG			0x460
 #define RCB_ECC_ERR_ADDR5_REG			0x464
 
-#define RCB_COM_SF_CFG_INTMASK_RING		0x480
-#define RCB_COM_SF_CFG_RING_STS			0x484
-#define RCB_COM_SF_CFG_RING			0x488
-#define RCB_COM_SF_CFG_INTMASK_BD		0x48C
-#define RCB_COM_SF_CFG_BD_RINT_STS		0x470
+#define RCB_COM_SF_CFG_INTMASK_RING		0x470
+#define RCB_COM_SF_CFG_RING_STS			0x474
+#define RCB_COM_SF_CFG_RING			0x478
+#define RCB_COM_SF_CFG_INTMASK_BD		0x47C
+#define RCB_COM_SF_CFG_BD_RINT_STS		0x480
 #define RCB_COM_RCB_RD_BD_BUSY			0x490
 #define RCB_COM_RCB_FBD_CRT_EN			0x494
 #define RCB_COM_AXI_WR_ERR_INTMASK		0x498
@@ -534,6 +534,7 @@
 #define GMAC_LD_LINK_COUNTER_REG		0x01D0UL
 #define GMAC_LOOP_REG				0x01DCUL
 #define GMAC_RECV_CONTROL_REG			0x01E0UL
+#define GMAC_PCS_RX_EN_REG			0x01E4UL
 #define GMAC_VLAN_CODE_REG			0x01E8UL
 #define GMAC_RX_OVERRUN_CNT_REG			0x01ECUL
 #define GMAC_RX_LENGTHFIELD_ERR_CNT_REG		0x01F4UL
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 28e9078..b043370 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1186,6 +1186,9 @@
 	if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
 		phy_dev->autoneg = false;
 
+	if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
+		phy_stop(phy_dev);
+
 	return 0;
 }
 
@@ -1281,6 +1284,22 @@
 	return cpu;
 }
 
+static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < q_num * 2; i++) {
+		if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+			irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+					      NULL);
+			free_irq(priv->ring_data[i].ring->irq,
+				 &priv->ring_data[i]);
+			priv->ring_data[i].ring->irq_init_flag =
+				RCB_IRQ_NOT_INITED;
+		}
+	}
+}
+
 static int hns_nic_init_irq(struct hns_nic_priv *priv)
 {
 	struct hnae_handle *h = priv->ae_handle;
@@ -1306,7 +1325,7 @@
 		if (ret) {
 			netdev_err(priv->netdev, "request irq(%d) fail\n",
 				   rd->ring->irq);
-			return ret;
+			goto out_free_irq;
 		}
 		disable_irq(rd->ring->irq);
 
@@ -1321,6 +1340,10 @@
 	}
 
 	return 0;
+
+out_free_irq:
+	hns_nic_free_irq(h->q_num, priv);
+	return ret;
 }
 
 static int hns_nic_net_up(struct net_device *ndev)
@@ -1330,6 +1353,9 @@
 	int i, j;
 	int ret;
 
+	if (!test_bit(NIC_STATE_DOWN, &priv->state))
+		return 0;
+
 	ret = hns_nic_init_irq(priv);
 	if (ret != 0) {
 		netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
@@ -1365,6 +1391,7 @@
 	for (j = i - 1; j >= 0; j--)
 		hns_nic_ring_close(ndev, j);
 
+	hns_nic_free_irq(h->q_num, priv);
 	set_bit(NIC_STATE_DOWN, &priv->state);
 
 	return ret;
@@ -1482,11 +1509,19 @@
 }
 
 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+#define HNS_TX_TIMEO_LIMIT (40 * HZ)
 static void hns_nic_net_timeout(struct net_device *ndev)
 {
 	struct hns_nic_priv *priv = netdev_priv(ndev);
 
-	hns_tx_timeout_reset(priv);
+	if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
+		ndev->watchdog_timeo *= 2;
+		netdev_info(ndev, "watchdog_timo changed to %d.\n",
+			    ndev->watchdog_timeo);
+	} else {
+		ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+		hns_tx_timeout_reset(priv);
+	}
 }
 
 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
@@ -2049,11 +2084,11 @@
 		= container_of(work, struct hns_nic_priv, service_task);
 	struct hnae_handle *h = priv->ae_handle;
 
+	hns_nic_reset_subtask(priv);
 	hns_nic_update_link_status(priv->netdev);
 	h->dev->ops->update_led_status(h);
 	hns_nic_update_stats(priv->netdev);
 
-	hns_nic_reset_subtask(priv);
 	hns_nic_service_event_complete(priv);
 }
 
@@ -2339,7 +2374,7 @@
 	ndev->min_mtu = MAC_MIN_MTU;
 	switch (priv->enet_ver) {
 	case AE_VERSION_2:
-		ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+		ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
 		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 			NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
 			NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
@@ -2384,6 +2419,8 @@
 out_notify_fail:
 	(void)cancel_work_sync(&priv->service_task);
 out_read_prop_fail:
+	/* safe for ACPI FW */
+	of_node_put(to_of_node(priv->fwnode));
 	free_netdev(ndev);
 	return ret;
 }
@@ -2413,6 +2450,9 @@
 	set_bit(NIC_STATE_REMOVING, &priv->state);
 	(void)cancel_work_sync(&priv->service_task);
 
+	/* safe for ACPI FW */
+	of_node_put(to_of_node(priv->fwnode));
+
 	free_netdev(ndev);
 	return 0;
 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 774beda..e2710ff 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1157,16 +1157,18 @@
  */
 static int hns_nic_nway_reset(struct net_device *netdev)
 {
-	int ret = 0;
 	struct phy_device *phy = netdev->phydev;
 
-	if (netif_running(netdev)) {
-		/* if autoneg is disabled, don't restart auto-negotiation */
-		if (phy && phy->autoneg == AUTONEG_ENABLE)
-			ret = genphy_restart_aneg(phy);
-	}
+	if (!netif_running(netdev))
+		return 0;
 
-	return ret;
+	if (!phy)
+		return -EOPNOTSUPP;
+
+	if (phy->autoneg != AUTONEG_ENABLE)
+		return -EINVAL;
+
+	return genphy_restart_aneg(phy);
 }
 
 static u32
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b7b2f82..0ccfa6a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2691,6 +2691,8 @@
 
 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
 {
+#define HNS3_VECTOR_PF_MAX_NUM		64
+
 	struct hnae3_handle *h = priv->ae_handle;
 	struct hns3_enet_tqp_vector *tqp_vector;
 	struct hnae3_vector_info *vector;
@@ -2703,6 +2705,8 @@
 	/* RSS size, cpu online and vector_num should be the same */
 	/* Should consider 2p/4p later */
 	vector_num = min_t(u16, num_online_cpus(), tqp_num);
+	vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
+
 	vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
 			      GFP_KERNEL);
 	if (!vector)
@@ -2760,12 +2764,12 @@
 
 		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
 
-		if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
-			(void)irq_set_affinity_hint(
-				priv->tqp_vector[i].vector_irq,
-						    NULL);
-			free_irq(priv->tqp_vector[i].vector_irq,
-				 &priv->tqp_vector[i]);
+		if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
+			irq_set_affinity_notifier(tqp_vector->vector_irq,
+						  NULL);
+			irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
+			free_irq(tqp_vector->vector_irq, tqp_vector);
+			tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
 		}
 
 		priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 017e084..baf5cc2 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -321,7 +321,7 @@
 		}
 
 		hns_mdio_cmd_write(mdio_dev, is_c45,
-				   MDIO_C45_WRITE_ADDR, phy_id, devad);
+				   MDIO_C45_READ, phy_id, devad);
 	}
 
 	/* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 525d8b8..f70cb4d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1172,11 +1172,15 @@
 
 map_failed_frags:
 	last = i+1;
-	for (i = 0; i < last; i++)
+	for (i = 1; i < last; i++)
 		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
 			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
 			       DMA_TO_DEVICE);
 
+	dma_unmap_single(&adapter->vdev->dev,
+			 descs[0].fields.address,
+			 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+			 DMA_TO_DEVICE);
 map_failed:
 	if (!firmware_has_feature(FW_FEATURE_CMO))
 		netdev_err(netdev, "tx: unable to map xmit buffer\n");
@@ -1310,7 +1314,6 @@
 	unsigned long lpar_rc;
 	u16 mss = 0;
 
-restart_poll:
 	while (frames_processed < budget) {
 		if (!ibmveth_rxq_pending_buffer(adapter))
 			break;
@@ -1398,7 +1401,6 @@
 		    napi_reschedule(napi)) {
 			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
 					       VIO_IRQ_DISABLE);
-			goto restart_poll;
 		}
 	}
 
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ab21a1..c8704b1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1939,8 +1939,9 @@
 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
 {
 	struct ibmvnic_rwi *rwi;
+	unsigned long flags;
 
-	mutex_lock(&adapter->rwi_lock);
+	spin_lock_irqsave(&adapter->rwi_lock, flags);
 
 	if (!list_empty(&adapter->rwi_list)) {
 		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
@@ -1950,7 +1951,7 @@
 		rwi = NULL;
 	}
 
-	mutex_unlock(&adapter->rwi_lock);
+	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
 	return rwi;
 }
 
@@ -2025,6 +2026,7 @@
 	struct list_head *entry, *tmp_entry;
 	struct ibmvnic_rwi *rwi, *tmp;
 	struct net_device *netdev = adapter->netdev;
+	unsigned long flags;
 	int ret;
 
 	if (adapter->state == VNIC_REMOVING ||
@@ -2041,21 +2043,21 @@
 		goto err;
 	}
 
-	mutex_lock(&adapter->rwi_lock);
+	spin_lock_irqsave(&adapter->rwi_lock, flags);
 
 	list_for_each(entry, &adapter->rwi_list) {
 		tmp = list_entry(entry, struct ibmvnic_rwi, list);
 		if (tmp->reset_reason == reason) {
 			netdev_dbg(netdev, "Skipping matching reset\n");
-			mutex_unlock(&adapter->rwi_lock);
+			spin_unlock_irqrestore(&adapter->rwi_lock, flags);
 			ret = EBUSY;
 			goto err;
 		}
 	}
 
-	rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
+	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
 	if (!rwi) {
-		mutex_unlock(&adapter->rwi_lock);
+		spin_unlock_irqrestore(&adapter->rwi_lock, flags);
 		ibmvnic_close(netdev);
 		ret = ENOMEM;
 		goto err;
@@ -2069,7 +2071,7 @@
 	}
 	rwi->reset_reason = reason;
 	list_add_tail(&rwi->list, &adapter->rwi_list);
-	mutex_unlock(&adapter->rwi_lock);
+	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
 	adapter->resetting = true;
 	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
 	schedule_work(&adapter->ibmvnic_reset);
@@ -4700,7 +4702,7 @@
 
 	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
 	INIT_LIST_HEAD(&adapter->rwi_list);
-	mutex_init(&adapter->rwi_lock);
+	spin_lock_init(&adapter->rwi_lock);
 	adapter->resetting = false;
 
 	adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 735f481..0946539 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1068,7 +1068,7 @@
 	struct tasklet_struct tasklet;
 	enum vnic_state state;
 	enum ibmvnic_reset_reason reset_reason;
-	struct mutex rwi_lock;
+	spinlock_t rwi_lock;
 	struct list_head rwi_list;
 	struct work_struct ibmvnic_reset;
 	bool resetting;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 37c7694..e1f821e 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -173,10 +173,14 @@
 	struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
 						     ptp_clock_info);
 	unsigned long flags;
-	u64 ns;
+	u64 cycles, ns;
 
 	spin_lock_irqsave(&adapter->systim_lock, flags);
-	ns = timecounter_read(&adapter->tc);
+
+	/* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
+	cycles = adapter->cc.read(&adapter->cc);
+	ns = timecounter_cyc2time(&adapter->tc, cycles);
+
 	spin_unlock_irqrestore(&adapter->systim_lock, flags);
 
 	*ts = ns_to_timespec64(ns);
@@ -232,9 +236,12 @@
 						     systim_overflow_work.work);
 	struct e1000_hw *hw = &adapter->hw;
 	struct timespec64 ts;
+	u64 ns;
 
-	adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+	/* Update the timecounter */
+	ns = timecounter_read(&adapter->tc);
 
+	ts = ns_to_timespec64(ns);
 	e_dbg("SYSTIM overflow check at %lld.%09lu\n",
 	      (long long) ts.tv_sec, ts.tv_nsec);
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 7a80652..f84e2c2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -122,6 +122,7 @@
 	__I40E_MDD_EVENT_PENDING,
 	__I40E_VFLR_EVENT_PENDING,
 	__I40E_RESET_RECOVERY_PENDING,
+	__I40E_TIMEOUT_RECOVERY_PENDING,
 	__I40E_MISC_IRQ_REQUESTED,
 	__I40E_RESET_INTR_RECEIVED,
 	__I40E_REINIT_REQUESTED,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 3c34270..f81ad0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -336,6 +336,10 @@
 		      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
 		return;   /* don't do any new action before the next timeout */
 
+	/* don't kick off another recovery if one is already pending */
+	if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
+		return;
+
 	if (tx_ring) {
 		head = i40e_get_head(tx_ring);
 		/* Read interrupt register */
@@ -420,9 +424,9 @@
 				  struct rtnl_link_stats64 *stats)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_ring *tx_ring, *rx_ring;
 	struct i40e_vsi *vsi = np->vsi;
 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+	struct i40e_ring *ring;
 	int i;
 
 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
@@ -436,24 +440,26 @@
 		u64 bytes, packets;
 		unsigned int start;
 
-		tx_ring = READ_ONCE(vsi->tx_rings[i]);
-		if (!tx_ring)
+		ring = READ_ONCE(vsi->tx_rings[i]);
+		if (!ring)
 			continue;
-		i40e_get_netdev_stats_struct_tx(tx_ring, stats);
+		i40e_get_netdev_stats_struct_tx(ring, stats);
 
-		rx_ring = &tx_ring[1];
+		if (i40e_enabled_xdp_vsi(vsi)) {
+			ring++;
+			i40e_get_netdev_stats_struct_tx(ring, stats);
+		}
 
+		ring++;
 		do {
-			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
-			packets = rx_ring->stats.packets;
-			bytes   = rx_ring->stats.bytes;
-		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+			start   = u64_stats_fetch_begin_irq(&ring->syncp);
+			packets = ring->stats.packets;
+			bytes   = ring->stats.bytes;
+		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
 
 		stats->rx_packets += packets;
 		stats->rx_bytes   += bytes;
 
-		if (i40e_enabled_xdp_vsi(vsi))
-			i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
 	}
 	rcu_read_unlock();
 
@@ -1539,17 +1545,17 @@
 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
 
 	/* Copy the address first, so that we avoid a possible race with
-	 * .set_rx_mode(). If we copy after changing the address in the filter
-	 * list, we might open ourselves to a narrow race window where
-	 * .set_rx_mode could delete our dev_addr filter and prevent traffic
-	 * from passing.
+	 * .set_rx_mode().
+	 * - Remove old address from MAC filter
+	 * - Copy new address
+	 * - Add new address to MAC filter
 	 */
-	ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
 	spin_lock_bh(&vsi->mac_filter_hash_lock);
 	i40e_del_mac_filter(vsi, netdev->dev_addr);
-	i40e_add_mac_filter(vsi, addr->sa_data);
+	ether_addr_copy(netdev->dev_addr, addr->sa_data);
+	i40e_add_mac_filter(vsi, netdev->dev_addr);
 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
 	if (vsi->type == I40E_VSI_MAIN) {
 		i40e_status ret;
 
@@ -9566,6 +9572,7 @@
 	clear_bit(__I40E_RESET_FAILED, pf->state);
 clear_recovery:
 	clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+	clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
 }
 
 /**
@@ -12011,6 +12018,9 @@
 	ether_addr_copy(netdev->dev_addr, mac_addr);
 	ether_addr_copy(netdev->perm_addr, mac_addr);
 
+	/* i40iw_net_event() reads 16 bytes from neigh->primary_key */
+	netdev->neigh_priv_len = sizeof(u32) * 4;
+
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 	netdev->priv_flags |= IFF_SUPP_NOFCS;
 	/* Setup netdev TC information */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 3f047bb..db1543b 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4333,8 +4333,12 @@
 	if (!vsi->netdev)
 		return;
 
-	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-		napi_enable(&vsi->q_vectors[q_idx]->napi);
+	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+		if (q_vector->rx.ring || q_vector->tx.ring)
+			napi_enable(&q_vector->napi);
+	}
 }
 
 /**
@@ -4817,8 +4821,12 @@
 	if (!vsi->netdev)
 		return;
 
-	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-		napi_disable(&vsi->q_vectors[q_idx]->napi);
+	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+		if (q_vector->rx.ring || q_vector->tx.ring)
+			napi_disable(&q_vector->napi);
+	}
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 0796cef..ffaa6e0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8770,9 +8770,11 @@
 	rtnl_unlock();
 
 #ifdef CONFIG_PM
-	retval = pci_save_state(pdev);
-	if (retval)
-		return retval;
+	if (!runtime) {
+		retval = pci_save_state(pdev);
+		if (retval)
+			return retval;
+	}
 #endif
 
 	status = rd32(E1000_STATUS);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index add124e..b27f7a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -4,6 +4,7 @@
 #include "ixgbe.h"
 #include <net/xfrm.h>
 #include <crypto/aead.h>
+#include <linux/if_bridge.h>
 
 /**
  * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
@@ -676,7 +677,8 @@
 	} else {
 		struct tx_sa tsa;
 
-		if (adapter->num_vfs)
+		if (adapter->num_vfs &&
+		    adapter->bridge_mode != BRIDGE_MODE_VEPA)
 			return -EOPNOTSUPP;
 
 		/* find the first unused index */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index eea63a9..f6ffd9f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -699,7 +699,6 @@
 	u8 num_tcs = adapter->hw_tcs;
 	u32 reg_val;
 	u32 queue;
-	u32 word;
 
 	/* remove VLAN filters beloning to this VF */
 	ixgbe_clear_vf_vlans(adapter, vf);
@@ -754,6 +753,14 @@
 		}
 	}
 
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 word;
+
 	/* Clear VF's mailbox memory */
 	for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
@@ -827,6 +834,8 @@
 	/* reset the filters for the device */
 	ixgbe_vf_reset_event(adapter, vf);
 
+	ixgbe_vf_clear_mbx(adapter, vf);
+
 	/* set vf mac address */
 	if (!is_zero_ether_addr(vf_mac))
 		ixgbe_set_vf_mac(adapter, vf, vf_mac);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b4ed7d3..a78a392 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -406,7 +406,6 @@
 	struct mvneta_pcpu_stats __percpu	*stats;
 
 	int pkt_size;
-	unsigned int frag_size;
 	void __iomem *base;
 	struct mvneta_rx_queue *rxqs;
 	struct mvneta_tx_queue *txqs;
@@ -2905,7 +2904,9 @@
 	if (!pp->bm_priv) {
 		/* Set Offset */
 		mvneta_rxq_offset_set(pp, rxq, 0);
-		mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
+		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
+					PAGE_SIZE :
+					MVNETA_RX_BUF_SIZE(pp->pkt_size));
 		mvneta_rxq_bm_disable(pp, rxq);
 		mvneta_rxq_fill(pp, rxq, rxq->size);
 	} else {
@@ -3749,7 +3750,6 @@
 	int ret;
 
 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
-	pp->frag_size = PAGE_SIZE;
 
 	ret = mvneta_setup_rxqs(pp);
 	if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6320e08..f8e4808 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4292,12 +4292,15 @@
 	case PHY_INTERFACE_MODE_10GKR:
 	case PHY_INTERFACE_MODE_XAUI:
 	case PHY_INTERFACE_MODE_NA:
-		phylink_set(mask, 10000baseCR_Full);
-		phylink_set(mask, 10000baseSR_Full);
-		phylink_set(mask, 10000baseLR_Full);
-		phylink_set(mask, 10000baseLRM_Full);
-		phylink_set(mask, 10000baseER_Full);
-		phylink_set(mask, 10000baseKR_Full);
+		if (port->gop_id == 0) {
+			phylink_set(mask, 10000baseT_Full);
+			phylink_set(mask, 10000baseCR_Full);
+			phylink_set(mask, 10000baseSR_Full);
+			phylink_set(mask, 10000baseLR_Full);
+			phylink_set(mask, 10000baseLRM_Full);
+			phylink_set(mask, 10000baseER_Full);
+			phylink_set(mask, 10000baseKR_Full);
+		}
 		/* Fall-through */
 	case PHY_INTERFACE_MODE_RGMII:
 	case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4308,7 +4311,6 @@
 		phylink_set(mask, 10baseT_Full);
 		phylink_set(mask, 100baseT_Half);
 		phylink_set(mask, 100baseT_Full);
-		phylink_set(mask, 10000baseT_Full);
 		/* Fall-through */
 	case PHY_INTERFACE_MODE_1000BASEX:
 	case PHY_INTERFACE_MODE_2500BASEX:
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9c08c36..15dea48 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -152,8 +152,10 @@
 	memset(p, 0, regs->len);
 	memcpy_fromio(p, io, B3_RAM_ADDR);
 
-	memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
-		      regs->len - B3_RI_WTO_R1);
+	if (regs->len > B3_RI_WTO_R1) {
+		memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+			      regs->len - B3_RI_WTO_R1);
+	}
 }
 
 /* Wake on Lan only supported on Yukon chips with rev 1 or above */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 697d9b3..1485f66 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -46,6 +46,7 @@
 #include <linux/mii.h>
 #include <linux/of_device.h>
 #include <linux/of_net.h>
+#include <linux/dmi.h>
 
 #include <asm/irq.h>
 
@@ -93,7 +94,7 @@
 module_param(copybreak, int, 0);
 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
 
-static int disable_msi = 0;
+static int disable_msi = -1;
 module_param(disable_msi, int, 0);
 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 
@@ -4931,6 +4932,24 @@
 	return buf;
 }
 
+static const struct dmi_system_id msi_blacklist[] = {
+	{
+		.ident = "Dell Inspiron 1545",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
+		},
+	},
+	{
+		.ident = "Gateway P-79",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
+		},
+	},
+	{}
+};
+
 static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	struct net_device *dev, *dev1;
@@ -5042,6 +5061,9 @@
 		goto err_out_free_pci;
 	}
 
+	if (disable_msi == -1)
+		disable_msi = !!dmi_check_system(msi_blacklist);
+
 	if (!disable_msi && pci_enable_msi(pdev) == 0) {
 		err = sky2_test_msi(hw);
 		if (err) {
@@ -5087,7 +5109,7 @@
 	INIT_WORK(&hw->restart_work, sky2_restart);
 
 	pci_set_drvdata(pdev, hw);
-	pdev->d3_delay = 200;
+	pdev->d3_delay = 300;
 
 	return 0;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e65bc3c..857588e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2645,6 +2645,8 @@
 	if (!priv->cmd.context)
 		return -ENOMEM;
 
+	if (mlx4_is_mfunc(dev))
+		mutex_lock(&priv->cmd.slave_cmd_mutex);
 	down_write(&priv->cmd.switch_sem);
 	for (i = 0; i < priv->cmd.max_cmds; ++i) {
 		priv->cmd.context[i].token = i;
@@ -2670,6 +2672,8 @@
 	down(&priv->cmd.poll_sem);
 	priv->cmd.use_events = 1;
 	up_write(&priv->cmd.switch_sem);
+	if (mlx4_is_mfunc(dev))
+		mutex_unlock(&priv->cmd.slave_cmd_mutex);
 
 	return err;
 }
@@ -2682,6 +2686,8 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int i;
 
+	if (mlx4_is_mfunc(dev))
+		mutex_lock(&priv->cmd.slave_cmd_mutex);
 	down_write(&priv->cmd.switch_sem);
 	priv->cmd.use_events = 0;
 
@@ -2689,9 +2695,12 @@
 		down(&priv->cmd.event_sem);
 
 	kfree(priv->cmd.context);
+	priv->cmd.context = NULL;
 
 	up(&priv->cmd.poll_sem);
 	up_write(&priv->cmd.switch_sem);
+	if (mlx4_is_mfunc(dev))
+		mutex_unlock(&priv->cmd.slave_cmd_mutex);
 }
 
 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a1aeeb8..f5cd953 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -620,6 +620,8 @@
 }
 #endif
 
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+
 /* We reach this function only after checking that any of
  * the (IPv4 | IPv6) bits are set in cqe->status.
  */
@@ -627,9 +629,20 @@
 		      netdev_features_t dev_features)
 {
 	__wsum hw_checksum = 0;
+	void *hdr;
 
-	void *hdr = (u8 *)va + sizeof(struct ethhdr);
+	/* CQE csum doesn't cover padding octets in short ethernet
+	 * frames. And the pad field is appended prior to calculating
+	 * and appending the FCS field.
+	 *
+	 * Detecting these padded frames requires to verify and parse
+	 * IP headers, so we simply force all those small frames to skip
+	 * checksum complete.
+	 */
+	if (short_frame(skb->len))
+		return -EINVAL;
 
+	hdr = (u8 *)va + sizeof(struct ethhdr);
 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
 
 	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
@@ -822,6 +835,11 @@
 		skb_record_rx_queue(skb, cq_ring);
 
 		if (likely(dev->features & NETIF_F_RXCSUM)) {
+			/* TODO: For IP non TCP/UDP packets when csum complete is
+			 * not an option (not supported or any other reason) we can
+			 * actually check cqe IPOK status bit and report
+			 * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
+			 */
 			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
 						       MLX4_CQE_STATUS_UDP)) &&
 			    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index babcfd9..7521304 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2064,9 +2064,11 @@
 {
 	struct mlx4_cmd_mailbox *mailbox;
 	__be32 *outbox;
+	u64 qword_field;
 	u32 dword_field;
-	int err;
+	u16 word_field;
 	u8 byte_field;
+	int err;
 	static const u8 a0_dmfs_query_hw_steering[] =  {
 		[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
 		[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@@ -2094,19 +2096,32 @@
 
 	/* QPC/EEC/CQC/EQC/RDMARC attributes */
 
-	MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
-	MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
-	MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
-	MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
-	MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
-	MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
-	MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
-	MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
-	MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
-	MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
-	MLX4_GET(param->num_sys_eqs,   outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
-	MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
-	MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+	MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
+	param->qpc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
+	param->log_num_qps = byte_field & 0x1f;
+	MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+	param->srqc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+	param->log_num_srqs = byte_field & 0x1f;
+	MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
+	param->cqc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
+	param->log_num_cqs = byte_field & 0x1f;
+	MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+	param->altc_base = qword_field;
+	MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+	param->auxc_base = qword_field;
+	MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
+	param->eqc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
+	param->log_num_eqs = byte_field & 0x1f;
+	MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
+	param->num_sys_eqs = word_field & 0xfff;
+	MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+	param->rdmarc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
+	param->log_rd_per_qp = byte_field & 0x7;
 
 	MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
 	if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@@ -2125,22 +2140,21 @@
 	/* steering attributes */
 	if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
 		MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
-		MLX4_GET(param->log_mc_entry_sz, outbox,
-			 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
-		MLX4_GET(param->log_mc_table_sz, outbox,
-			 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
-		MLX4_GET(byte_field, outbox,
-			 INIT_HCA_FS_A0_OFFSET);
+		MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+		param->log_mc_entry_sz = byte_field & 0x1f;
+		MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+		param->log_mc_table_sz = byte_field & 0x1f;
+		MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
 		param->dmfs_high_steer_mode =
 			a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
 	} else {
 		MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
-		MLX4_GET(param->log_mc_entry_sz, outbox,
-			 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
-		MLX4_GET(param->log_mc_hash_sz,  outbox,
-			 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
-		MLX4_GET(param->log_mc_table_sz, outbox,
-			 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+		MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+		param->log_mc_entry_sz = byte_field & 0x1f;
+		MLX4_GET(byte_field,  outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+		param->log_mc_hash_sz = byte_field & 0x1f;
+		MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+		param->log_mc_table_sz = byte_field & 0x1f;
 	}
 
 	/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@@ -2164,15 +2178,18 @@
 	/* TPT attributes */
 
 	MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
-	MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
-	MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+	MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
+	param->mw_enabled = byte_field >> 7;
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+	param->log_mpt_sz = byte_field & 0x3f;
 	MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
 	MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
 
 	/* UAR attributes */
 
 	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
-	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+	param->log_uar_sz = byte_field & 0xf;
 
 	/* phv_check enable */
 	MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 7262c63..288fca8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -57,12 +57,12 @@
 	int i;
 
 	if (chunk->nsg > 0)
-		pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
+		pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
 			     PCI_DMA_BIDIRECTIONAL);
 
 	for (i = 0; i < chunk->npages; ++i)
-		__free_pages(sg_page(&chunk->mem[i]),
-			     get_order(chunk->mem[i].length));
+		__free_pages(sg_page(&chunk->sg[i]),
+			     get_order(chunk->sg[i].length));
 }
 
 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -71,9 +71,9 @@
 
 	for (i = 0; i < chunk->npages; ++i)
 		dma_free_coherent(&dev->persist->pdev->dev,
-				  chunk->mem[i].length,
-				  lowmem_page_address(sg_page(&chunk->mem[i])),
-				  sg_dma_address(&chunk->mem[i]));
+				  chunk->buf[i].size,
+				  chunk->buf[i].addr,
+				  chunk->buf[i].dma_addr);
 }
 
 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
@@ -111,22 +111,21 @@
 	return 0;
 }
 
-static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
-				    int order, gfp_t gfp_mask)
+static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
+				   int order, gfp_t gfp_mask)
 {
-	void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
-				       &sg_dma_address(mem), gfp_mask);
-	if (!buf)
+	buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
+				       &buf->dma_addr, gfp_mask);
+	if (!buf->addr)
 		return -ENOMEM;
 
-	if (offset_in_page(buf)) {
-		dma_free_coherent(dev, PAGE_SIZE << order,
-				  buf, sg_dma_address(mem));
+	if (offset_in_page(buf->addr)) {
+		dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
+				  buf->dma_addr);
 		return -ENOMEM;
 	}
 
-	sg_set_buf(mem, buf, PAGE_SIZE << order);
-	sg_dma_len(mem) = PAGE_SIZE << order;
+	buf->size = PAGE_SIZE << order;
 	return 0;
 }
 
@@ -159,21 +158,21 @@
 
 	while (npages > 0) {
 		if (!chunk) {
-			chunk = kmalloc_node(sizeof(*chunk),
+			chunk = kzalloc_node(sizeof(*chunk),
 					     gfp_mask & ~(__GFP_HIGHMEM |
 							  __GFP_NOWARN),
 					     dev->numa_node);
 			if (!chunk) {
-				chunk = kmalloc(sizeof(*chunk),
+				chunk = kzalloc(sizeof(*chunk),
 						gfp_mask & ~(__GFP_HIGHMEM |
 							     __GFP_NOWARN));
 				if (!chunk)
 					goto fail;
 			}
+			chunk->coherent = coherent;
 
-			sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
-			chunk->npages = 0;
-			chunk->nsg    = 0;
+			if (!coherent)
+				sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
 			list_add_tail(&chunk->list, &icm->chunk_list);
 		}
 
@@ -186,10 +185,10 @@
 
 		if (coherent)
 			ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
-						      &chunk->mem[chunk->npages],
-						      cur_order, mask);
+						&chunk->buf[chunk->npages],
+						cur_order, mask);
 		else
-			ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
+			ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
 						   cur_order, mask,
 						   dev->numa_node);
 
@@ -205,7 +204,7 @@
 		if (coherent)
 			++chunk->nsg;
 		else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
-			chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
+			chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
 						chunk->npages,
 						PCI_DMA_BIDIRECTIONAL);
 
@@ -220,7 +219,7 @@
 	}
 
 	if (!coherent && chunk) {
-		chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
+		chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
 					chunk->npages,
 					PCI_DMA_BIDIRECTIONAL);
 
@@ -320,7 +319,7 @@
 	u64 idx;
 	struct mlx4_icm_chunk *chunk;
 	struct mlx4_icm *icm;
-	struct page *page = NULL;
+	void *addr = NULL;
 
 	if (!table->lowmem)
 		return NULL;
@@ -336,28 +335,49 @@
 
 	list_for_each_entry(chunk, &icm->chunk_list, list) {
 		for (i = 0; i < chunk->npages; ++i) {
-			if (dma_handle && dma_offset >= 0) {
-				if (sg_dma_len(&chunk->mem[i]) > dma_offset)
-					*dma_handle = sg_dma_address(&chunk->mem[i]) +
-						dma_offset;
-				dma_offset -= sg_dma_len(&chunk->mem[i]);
+			dma_addr_t dma_addr;
+			size_t len;
+
+			if (table->coherent) {
+				len = chunk->buf[i].size;
+				dma_addr = chunk->buf[i].dma_addr;
+				addr = chunk->buf[i].addr;
+			} else {
+				struct page *page;
+
+				len = sg_dma_len(&chunk->sg[i]);
+				dma_addr = sg_dma_address(&chunk->sg[i]);
+
+				/* XXX: we should never do this for highmem
+				 * allocation.  This function either needs
+				 * to be split, or the kernel virtual address
+				 * return needs to be made optional.
+				 */
+				page = sg_page(&chunk->sg[i]);
+				addr = lowmem_page_address(page);
 			}
+
+			if (dma_handle && dma_offset >= 0) {
+				if (len > dma_offset)
+					*dma_handle = dma_addr + dma_offset;
+				dma_offset -= len;
+			}
+
 			/*
 			 * DMA mapping can merge pages but not split them,
 			 * so if we found the page, dma_handle has already
 			 * been assigned to.
 			 */
-			if (chunk->mem[i].length > offset) {
-				page = sg_page(&chunk->mem[i]);
+			if (len > offset)
 				goto out;
-			}
-			offset -= chunk->mem[i].length;
+			offset -= len;
 		}
 	}
 
+	addr = NULL;
 out:
 	mutex_unlock(&table->mutex);
-	return page ? lowmem_page_address(page) + offset : NULL;
+	return addr ? addr + offset : NULL;
 }
 
 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index c9169a4..d199874 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -47,11 +47,21 @@
 	MLX4_ICM_PAGE_SIZE	= 1 << MLX4_ICM_PAGE_SHIFT,
 };
 
+struct mlx4_icm_buf {
+	void			*addr;
+	size_t			size;
+	dma_addr_t		dma_addr;
+};
+
 struct mlx4_icm_chunk {
 	struct list_head	list;
 	int			npages;
 	int			nsg;
-	struct scatterlist	mem[MLX4_ICM_CHUNK_LEN];
+	bool			coherent;
+	union {
+		struct scatterlist	sg[MLX4_ICM_CHUNK_LEN];
+		struct mlx4_icm_buf	buf[MLX4_ICM_CHUNK_LEN];
+	};
 };
 
 struct mlx4_icm {
@@ -114,12 +124,18 @@
 
 static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
 {
-	return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+	if (iter->chunk->coherent)
+		return iter->chunk->buf[iter->page_idx].dma_addr;
+	else
+		return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
 }
 
 static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
 {
-	return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
+	if (iter->chunk->coherent)
+		return iter->chunk->buf[iter->page_idx].size;
+	else
+		return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
 }
 
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 31bd567..676428a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2719,13 +2719,13 @@
 	int total_pages;
 	int total_mem;
 	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+	int tot;
 
 	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
 	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
 	total_mem = sq_size + rq_size;
-	total_pages =
-		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
-				   page_shift);
+	tot = (total_mem + (page_offset << 6)) >> page_shift;
+	total_pages = !tot ? 1 : roundup_pow_of_two(tot);
 
 	return total_pages;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 16ceeb1..da52e60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -633,6 +633,7 @@
 	MLX5E_STATE_ASYNC_EVENTS_ENABLED,
 	MLX5E_STATE_OPENED,
 	MLX5E_STATE_DESTROYING,
+	MLX5E_STATE_XDP_TX_ENABLED,
 };
 
 struct mlx5e_rqt {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index ad6d471..4a33c9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -262,7 +262,8 @@
 	int sq_num;
 	int i;
 
-	if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
+	/* this flag is sufficient, no need to test internal sq state */
+	if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
 		return -ENETDOWN;
 
 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -275,9 +276,6 @@
 
 	sq = &priv->channels.c[sq_num]->xdpsq;
 
-	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
-		return -ENETDOWN;
-
 	for (i = 0; i < n; i++) {
 		struct xdp_frame *xdpf = frames[i];
 		struct mlx5e_xdp_info xdpi;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 6dfab04..4d09662 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -49,6 +49,23 @@
 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 		   u32 flags);
 
+static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
+{
+	set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+}
+
+static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
+{
+	clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+	/* let other device's napi(s) see our new state */
+	synchronize_rcu();
+}
+
+static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
+{
+	return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+}
+
 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
 {
 	struct mlx5_wq_cyc *wq = &sq->wq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 98dd3e0..5e54230 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1101,11 +1101,6 @@
 			      struct ethtool_ts_info *info)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
-	int ret;
-
-	ret = ethtool_op_get_ts_info(priv->netdev, info);
-	if (ret)
-		return ret;
 
 	info->phc_index = mlx5_clock_get_ptp_index(mdev);
 
@@ -1113,9 +1108,9 @@
 	    info->phc_index == -1)
 		return 0;
 
-	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
-				 SOF_TIMESTAMPING_RX_HARDWARE |
-				 SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+				SOF_TIMESTAMPING_RX_HARDWARE |
+				SOF_TIMESTAMPING_RAW_HARDWARE;
 
 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
 			 BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index faa84b4..637d59c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -128,6 +128,8 @@
 	return !params->lro_en && frag_sz <= PAGE_SIZE;
 }
 
+#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
+					  MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
 static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
 					 struct mlx5e_params *params)
 {
@@ -138,6 +140,9 @@
 	if (!mlx5e_rx_is_linear_skb(mdev, params))
 		return false;
 
+	if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+		return false;
+
 	if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
 		return true;
 
@@ -1383,6 +1388,7 @@
 	struct mlx5_core_dev *mdev = c->mdev;
 	struct mlx5_rate_limit rl = {0};
 
+	cancel_work_sync(&sq->dim.work);
 	mlx5e_destroy_sq(mdev, sq->sqn);
 	if (sq->rate_limit) {
 		rl.rate = sq->rate_limit;
@@ -1752,7 +1758,7 @@
 
 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
 {
-	return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+	return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask);
 }
 
 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
@@ -2884,6 +2890,7 @@
 
 	mlx5e_build_tx2sq_maps(priv);
 	mlx5e_activate_channels(&priv->channels);
+	mlx5e_xdp_tx_enable(priv);
 	netif_tx_start_all_queues(priv->netdev);
 
 	if (MLX5_ESWITCH_MANAGER(priv->mdev))
@@ -2905,6 +2912,7 @@
 	 */
 	netif_tx_stop_all_queues(priv->netdev);
 	netif_tx_disable(priv->netdev);
+	mlx5e_xdp_tx_disable(priv);
 	mlx5e_deactivate_channels(&priv->channels);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index c9cc974..701624a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -144,6 +144,7 @@
 
 			s->tx_packets		+= sq_stats->packets;
 			s->tx_bytes		+= sq_stats->bytes;
+			s->tx_queue_dropped	+= sq_stats->dropped;
 		}
 	}
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d543a5c..d3f794d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -707,6 +707,8 @@
 	return __get_unaligned_cpu32(fcs_bytes);
 }
 
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+
 static inline void mlx5e_handle_csum(struct net_device *netdev,
 				     struct mlx5_cqe64 *cqe,
 				     struct mlx5e_rq *rq,
@@ -725,6 +727,17 @@
 		return;
 	}
 
+	/* CQE csum doesn't cover padding octets in short ethernet
+	 * frames. And the pad field is appended prior to calculating
+	 * and appending the FCS field.
+	 *
+	 * Detecting these padded frames requires to verify and parse
+	 * IP headers, so we simply force all those small frames to be
+	 * CHECKSUM_UNNECESSARY even if they are not padded.
+	 */
+	if (short_frame(skb->len))
+		goto csum_unnecessary;
+
 	if (likely(is_last_ethertype_ip(skb, &network_depth))) {
 		skb->ip_summed = CHECKSUM_COMPLETE;
 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
@@ -744,6 +757,7 @@
 		return;
 	}
 
+csum_unnecessary:
 	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
 		   (cqe->hds_ip_ext & CQE_L4_OK))) {
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1150,7 +1164,7 @@
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 {
 	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
-	struct mlx5e_xdpsq *xdpsq;
+	struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
 	struct mlx5_cqe64 *cqe;
 	int work_done = 0;
 
@@ -1161,10 +1175,11 @@
 		work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
 
 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
-	if (!cqe)
+	if (!cqe) {
+		if (unlikely(work_done))
+			goto out;
 		return 0;
-
-	xdpsq = &rq->xdpsq;
+	}
 
 	do {
 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
@@ -1179,6 +1194,7 @@
 		rq->handle_rx_cqe(rq, cqe);
 	} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
+out:
 	if (xdpsq->doorbell) {
 		mlx5e_xmit_xdp_doorbell(xdpsq);
 		xdpsq->doorbell = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index d57d51c..7047cc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -73,7 +73,6 @@
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
@@ -194,7 +193,6 @@
 			s->tx_nop               += sq_stats->nop;
 			s->tx_queue_stopped	+= sq_stats->stopped;
 			s->tx_queue_wake	+= sq_stats->wake;
-			s->tx_udp_seg_rem	+= sq_stats->udp_seg_rem;
 			s->tx_queue_dropped	+= sq_stats->dropped;
 			s->tx_cqe_err		+= sq_stats->cqe_err;
 			s->tx_recover		+= sq_stats->recover;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index c1064af..0ad7a16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -86,7 +86,6 @@
 	u64 tx_recover;
 	u64 tx_cqes;
 	u64 tx_queue_wake;
-	u64 tx_udp_seg_rem;
 	u64 tx_cqe_err;
 	u64 tx_xdp_xmit;
 	u64 tx_xdp_full;
@@ -217,7 +216,6 @@
 	u64 csum_partial_inner;
 	u64 added_vlan_packets;
 	u64 nop;
-	u64 udp_seg_rem;
 #ifdef CONFIG_MLX5_EN_TLS
 	u64 tls_ooo;
 	u64 tls_resync_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3092c59..9f7f842 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -96,6 +96,7 @@
 	struct ip_tunnel_info tun_info;
 	struct mlx5_flow_spec spec;
 	int num_mod_hdr_actions;
+	int max_mod_hdr_actions;
 	void *mod_hdr_actions;
 	int mirred_ifindex;
 };
@@ -1742,9 +1743,9 @@
 	OFFLOAD(UDP_DPORT, 2, udp.dest,   0),
 };
 
-/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
- * max from the SW pedit action. On success, it says how many HW actions were
- * actually parsed.
+/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
+ * max from the SW pedit action. On success, attr->num_mod_hdr_actions
+ * says how many HW actions were actually parsed.
  */
 static int offload_pedit_fields(struct pedit_headers *masks,
 				struct pedit_headers *vals,
@@ -1767,9 +1768,11 @@
 	add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
 
 	action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
-	action = parse_attr->mod_hdr_actions;
-	max_actions = parse_attr->num_mod_hdr_actions;
-	nactions = 0;
+	action = parse_attr->mod_hdr_actions +
+		 parse_attr->num_mod_hdr_actions * action_size;
+
+	max_actions = parse_attr->max_mod_hdr_actions;
+	nactions = parse_attr->num_mod_hdr_actions;
 
 	for (i = 0; i < ARRAY_SIZE(fields); i++) {
 		f = &fields[i];
@@ -1874,7 +1877,7 @@
 	if (!parse_attr->mod_hdr_actions)
 		return -ENOMEM;
 
-	parse_attr->num_mod_hdr_actions = max_actions;
+	parse_attr->max_mod_hdr_actions = max_actions;
 	return 0;
 }
 
@@ -1918,9 +1921,11 @@
 			goto out_err;
 	}
 
-	err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
-	if (err)
-		goto out_err;
+	if (!parse_attr->mod_hdr_actions) {
+		err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
+		if (err)
+			goto out_err;
+	}
 
 	err = offload_pedit_fields(masks, vals, parse_attr);
 	if (err < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 6dacaeb..0b03d654 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -387,8 +387,14 @@
 	num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 	contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
 	if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+#ifdef CONFIG_MLX5_EN_IPSEC
+		struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
+#endif
 		mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 		mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
+#ifdef CONFIG_MLX5_EN_IPSEC
+		wqe->eth = cur_eth;
+#endif
 	}
 
 	/* fill wqe */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ea7dedc..d670647 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1133,13 +1133,6 @@
 	int err = 0;
 	u8 *smac_v;
 
-	if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
-		mlx5_core_warn(esw->dev,
-			       "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
-			       vport->vport);
-		return -EPERM;
-	}
-
 	esw_vport_cleanup_ingress_rules(esw, vport);
 
 	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@@ -1696,7 +1689,7 @@
 	int vport_num;
 	int err;
 
-	if (!MLX5_ESWITCH_MANAGER(dev))
+	if (!MLX5_VPORT_MANAGER(dev))
 		return 0;
 
 	esw_info(dev,
@@ -1765,7 +1758,7 @@
 
 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 {
-	if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
+	if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
 		return;
 
 	esw_info(esw->dev, "cleanup\n");
@@ -1812,13 +1805,10 @@
 	mutex_lock(&esw->state_lock);
 	evport = &esw->vports[vport];
 
-	if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
+	if (evport->info.spoofchk && !is_valid_ether_addr(mac))
 		mlx5_core_warn(esw->dev,
-			       "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+			       "Set invalid MAC while spoofchk is on, vport(%d)\n",
 			       vport);
-		err = -EPERM;
-		goto unlock;
-	}
 
 	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
 	if (err) {
@@ -1964,6 +1954,10 @@
 	evport = &esw->vports[vport];
 	pschk = evport->info.spoofchk;
 	evport->info.spoofchk = spoofchk;
+	if (pschk && !is_valid_ether_addr(evport->info.mac))
+		mlx5_core_warn(esw->dev,
+			       "Spoofchk in set while MAC is invalid, vport(%d)\n",
+			       evport->vport);
 	if (evport->enabled && esw->mode == SRIOV_LEGACY)
 		err = esw_vport_ingress_config(esw, evport);
 	if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 37d114c..d181645 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -432,7 +432,7 @@
 
 	if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
 	    --fte->dests_size) {
-		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
+		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
 		update_fte = true;
 	}
 out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 3f767cd..54f1a40 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -511,14 +511,14 @@
 			 ktime_to_ns(ktime_get_real()));
 
 	/* Calculate period in seconds to call the overflow watchdog - to make
-	 * sure counter is checked at least once every wrap around.
+	 * sure counter is checked at least twice every wrap around.
 	 * The period is calculated as the minimum between max HW cycles count
 	 * (The clock source mask) and max amount of cycles that can be
 	 * multiplied by clock multiplier where the result doesn't exceed
 	 * 64bits.
 	 */
 	overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
-	overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+	overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
 
 	ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
 				 frac, &frac);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b5e9f66..563ce3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -640,18 +640,19 @@
 static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
 {
 	struct mlx5_priv *priv  = &mdev->priv;
-	int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+	int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
+	int irq = pci_irq_vector(mdev->pdev, vecidx);
 
-	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+	if (!zalloc_cpumask_var(&priv->irq_info[vecidx].mask, GFP_KERNEL)) {
 		mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
 		return -ENOMEM;
 	}
 
 	cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
-			priv->irq_info[i].mask);
+			priv->irq_info[vecidx].mask);
 
 	if (IS_ENABLED(CONFIG_SMP) &&
-	    irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+	    irq_set_affinity_hint(irq, priv->irq_info[vecidx].mask))
 		mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
 
 	return 0;
@@ -659,11 +660,12 @@
 
 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
 {
+	int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
 	struct mlx5_priv *priv  = &mdev->priv;
-	int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+	int irq = pci_irq_vector(mdev->pdev, vecidx);
 
 	irq_set_affinity_hint(irq, NULL);
-	free_cpumask_var(priv->irq_info[i].mask);
+	free_cpumask_var(priv->irq_info[vecidx].mask);
 }
 
 static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 30f751e..f7154f35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -81,6 +81,7 @@
 	struct mlxsw_core_port *ports;
 	unsigned int max_ports;
 	bool reload_fail;
+	bool fw_flash_in_progress;
 	unsigned long driver_priv[0];
 	/* driver_priv has to be always the last item */
 };
@@ -428,12 +429,16 @@
 	struct rcu_head rcu;
 };
 
-#define MLXSW_EMAD_TIMEOUT_MS 200
+#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS	3000
+#define MLXSW_EMAD_TIMEOUT_MS			200
 
 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
 {
 	unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
 
+	if (trans->core->fw_flash_in_progress)
+		timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
+
 	queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
 }
 
@@ -1854,6 +1859,18 @@
 }
 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
 
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
+{
+	mlxsw_core->fw_flash_in_progress = true;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
+
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
+{
+	mlxsw_core->fw_flash_in_progress = false;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
+
 static int __init mlxsw_core_module_init(void)
 {
 	int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c35be47..c4e4971 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -292,6 +292,9 @@
 			     u64 *p_single_size, u64 *p_double_size,
 			     u64 *p_linear_size);
 
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
+
 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
 			  enum mlxsw_res_id res_id);
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 5890fdf..a903e97 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -604,29 +604,31 @@
 		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
 		u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
 		u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+		char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+
+		memcpy(ncqe, cqe, q->elem_size);
+		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
 
 		if (sendq) {
 			struct mlxsw_pci_queue *sdq;
 
 			sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
 			mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
-						 wqe_counter, cqe);
+						 wqe_counter, ncqe);
 			q->u.cq.comp_sdq_count++;
 		} else {
 			struct mlxsw_pci_queue *rdq;
 
 			rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
 			mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
-						 wqe_counter, q->u.cq.v, cqe);
+						 wqe_counter, q->u.cq.v, ncqe);
 			q->u.cq.comp_rdq_count++;
 		}
 		if (++items == credits)
 			break;
 	}
-	if (items) {
-		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+	if (items)
 		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
-	}
 }
 
 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
@@ -1365,10 +1367,10 @@
 		u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
 
 		if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
-			break;
+			return 0;
 		cond_resched();
 	} while (time_before(jiffies, end));
-	return 0;
+	return -EBUSY;
 }
 
 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 83f452b..72cdaa0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
 
 #define MLXSW_PCI_SW_RESET			0xF0010
 #define MLXSW_PCI_SW_RESET_RST_BIT		BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	5000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	13000
 #define MLXSW_PCI_SW_RESET_WAIT_MSECS		100
 #define MLXSW_PCI_FW_READY			0xA1844
 #define MLXSW_PCI_FW_READY_MASK			0xFFFF
@@ -53,6 +53,7 @@
 #define MLXSW_PCI_WQE_SIZE	32 /* 32 bytes per element */
 #define MLXSW_PCI_CQE01_SIZE	16 /* 16 bytes per element */
 #define MLXSW_PCI_CQE2_SIZE	32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE_MAX	MLXSW_PCI_CQE2_SIZE
 #define MLXSW_PCI_EQE_SIZE	16 /* 16 bytes per element */
 #define MLXSW_PCI_WQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
 #define MLXSW_PCI_CQE01_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ada644d..a12b571 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -308,8 +308,13 @@
 		},
 		.mlxsw_sp = mlxsw_sp
 	};
+	int err;
 
-	return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+	mlxsw_core_fw_flash_start(mlxsw_sp->core);
+	err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+	mlxsw_core_fw_flash_end(mlxsw_sp->core);
+
+	return err;
 }
 
 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
@@ -836,8 +841,9 @@
 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
 		bool configure = false;
 		bool pfc = false;
+		u16 thres_cells;
+		u16 delay_cells;
 		bool lossy;
-		u16 thres;
 
 		for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
 			if (prio_tc[j] == i) {
@@ -851,10 +857,11 @@
 			continue;
 
 		lossy = !(pfc || pause_en);
-		thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
-		delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
-						  pause_en);
-		mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
+		thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+		delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
+							pfc, pause_en);
+		mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
+				     thres_cells, lossy);
 	}
 
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
@@ -4230,6 +4237,25 @@
 	dev_put(mlxsw_sp_port->dev);
 }
 
+static void
+mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
+				 struct net_device *lag_dev)
+{
+	struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
+	struct net_device *upper_dev;
+	struct list_head *iter;
+
+	if (netif_is_bridge_port(lag_dev))
+		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
+
+	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+		if (!netif_is_bridge_port(upper_dev))
+			continue;
+		br_dev = netdev_master_upper_dev_get(upper_dev);
+		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
+	}
+}
+
 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
 {
 	char sldr_pl[MLXSW_REG_SLDR_LEN];
@@ -4422,6 +4448,10 @@
 
 	/* Any VLANs configured on the port are no longer valid */
 	mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+	/* Make the LAG and its directly linked uppers leave bridges they
+	 * are memeber in
+	 */
+	mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
 
 	if (lag->ref_count == 1)
 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@@ -4630,12 +4660,15 @@
 							   lower_dev,
 							   upper_dev);
 		} else if (netif_is_lag_master(upper_dev)) {
-			if (info->linking)
+			if (info->linking) {
 				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
 							     upper_dev);
-			else
+			} else {
+				mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
+							    false);
 				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
 							upper_dev);
+			}
 		} else if (netif_is_ovs_master(upper_dev)) {
 			if (info->linking)
 				err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index e3c6fe8..1dcf152 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -75,7 +75,15 @@
 	act_set = mlxsw_afa_block_first_set(rulei->act_block);
 	mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
 
-	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+	if (err)
+		goto err_ptce2_write;
+
+	return 0;
+
+err_ptce2_write:
+	cregion->ops->entry_remove(cregion, centry);
+	return err;
 }
 
 static void
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index e171513..30931a2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -95,8 +95,9 @@
 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
 		return -EIO;
 
-	max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
-	if (rulei->priority > max_priority)
+	/* Priority range is 1..cap_kvd_size-1. */
+	max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
+	if (rulei->priority >= max_priority)
 		return -EINVAL;
 
 	/* Unlike in TC, in HW, higher number means higher priority. */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 715d24f..562c442 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -696,8 +696,8 @@
 static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
 	.type			= MLXSW_SP_FID_TYPE_DUMMY,
 	.fid_size		= sizeof(struct mlxsw_sp_fid),
-	.start_index		= MLXSW_SP_RFID_BASE - 1,
-	.end_index		= MLXSW_SP_RFID_BASE - 1,
+	.start_index		= VLAN_N_VID - 1,
+	.end_index		= VLAN_N_VID - 1,
 	.ops			= &mlxsw_sp_fid_dummy_ops,
 };
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 4eb64cb..af673ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -282,30 +282,6 @@
 	kfree(bridge_port);
 }
 
-static bool
-mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
-				    bridge_port)
-{
-	struct net_device *dev = bridge_port->dev;
-	struct mlxsw_sp *mlxsw_sp;
-
-	if (is_vlan_dev(dev))
-		mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
-	else
-		mlxsw_sp = mlxsw_sp_lower_get(dev);
-
-	/* In case ports were pulled from out of a bridged LAG, then
-	 * it's possible the reference count isn't zero, yet the bridge
-	 * port should be destroyed, as it's no longer an upper of ours.
-	 */
-	if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
-		return true;
-	else if (bridge_port->ref_count == 0)
-		return true;
-	else
-		return false;
-}
-
 static struct mlxsw_sp_bridge_port *
 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
 			 struct net_device *brport_dev)
@@ -343,8 +319,7 @@
 {
 	struct mlxsw_sp_bridge_device *bridge_device;
 
-	bridge_port->ref_count--;
-	if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
+	if (--bridge_port->ref_count != 0)
 		return;
 	bridge_device = bridge_port->bridge_device;
 	mlxsw_sp_bridge_port_destroy(bridge_port);
@@ -1234,7 +1209,7 @@
 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
 {
 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
-			 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
 }
 
 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
@@ -1246,7 +1221,7 @@
 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 				     const char *mac, u16 fid, bool adding,
 				     enum mlxsw_reg_sfd_rec_action action,
-				     bool dynamic)
+				     enum mlxsw_reg_sfd_rec_policy policy)
 {
 	char *sfd_pl;
 	u8 num_rec;
@@ -1257,8 +1232,7 @@
 		return -ENOMEM;
 
 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
-	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
-			      mac, fid, action, local_port);
+	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
 	if (err)
@@ -1277,7 +1251,8 @@
 				   bool dynamic)
 {
 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
-					 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
+					 MLXSW_REG_SFD_REC_ACTION_NOP,
+					 mlxsw_sp_sfd_rec_policy(dynamic));
 }
 
 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
@@ -1285,7 +1260,7 @@
 {
 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
-					 false);
+					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
 }
 
 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
@@ -1761,7 +1736,7 @@
 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 {
-	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
+	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 
 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index aaedf10..2083415 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -585,8 +585,7 @@
 
 		if (adapter->csr.flags &
 		   LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
-			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
-				LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
+			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
@@ -599,12 +598,6 @@
 			/* map TX interrupt to vector */
 			int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
 			lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
-			if (flags &
-			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
-				int_vec_en_auto_clr |= INT_VEC_EN_(vector);
-				lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
-						  int_vec_en_auto_clr);
-			}
 
 			/* Remove TX interrupt from shared mask */
 			intr->vector_list[0].int_mask &= ~int_bit;
@@ -802,14 +795,8 @@
 	u32 mac_addr_hi = 0;
 	u32 mac_addr_lo = 0;
 	u32 data;
-	int ret;
 
 	netdev = adapter->netdev;
-	lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
-	ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
-				       0, 1000, 20000, 100);
-	if (ret)
-		return ret;
 
 	/* setup auto duplex, and speed detection */
 	data = lan743x_csr_read(adapter, MAC_CR);
@@ -968,13 +955,10 @@
 
 		memset(&ksettings, 0, sizeof(ksettings));
 		phy_ethtool_get_link_ksettings(netdev, &ksettings);
-		local_advertisement = phy_read(phydev, MII_ADVERTISE);
-		if (local_advertisement < 0)
-			return;
-
-		remote_advertisement = phy_read(phydev, MII_LPA);
-		if (remote_advertisement < 0)
-			return;
+		local_advertisement =
+			ethtool_adv_to_mii_adv_t(phydev->advertising);
+		remote_advertisement =
+			ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
 
 		lan743x_phy_update_flowcontrol(adapter,
 					       ksettings.base.duplex,
@@ -1412,7 +1396,8 @@
 }
 
 static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
-				     unsigned int frame_length)
+				     unsigned int frame_length,
+				     int nr_frags)
 {
 	/* called only from within lan743x_tx_xmit_frame.
 	 * assuming tx->ring_lock has already been acquired.
@@ -1422,6 +1407,10 @@
 
 	/* wrap up previous descriptor */
 	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
+	if (nr_frags <= 0) {
+		tx->frame_data0 |= TX_DESC_DATA0_LS_;
+		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+	}
 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
 	tx_descriptor->data0 = tx->frame_data0;
 
@@ -1526,8 +1515,11 @@
 	u32 tx_tail_flags = 0;
 
 	/* wrap up previous descriptor */
-	tx->frame_data0 |= TX_DESC_DATA0_LS_;
-	tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
+	    TX_DESC_DATA0_DTYPE_DATA_) {
+		tx->frame_data0 |= TX_DESC_DATA0_LS_;
+		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+	}
 
 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
 	buffer_info = &tx->buffer_info[tx->frame_tail];
@@ -1612,7 +1604,7 @@
 	}
 
 	if (gso)
-		lan743x_tx_frame_add_lso(tx, frame_length);
+		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
 
 	if (nr_frags <= 0)
 		goto finish;
@@ -1906,7 +1898,17 @@
 	return ((++index) % rx->ring_size);
 }
 
-static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
+static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
+{
+	int length = 0;
+
+	length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
+	return __netdev_alloc_skb(rx->adapter->netdev,
+				  length, GFP_ATOMIC | GFP_DMA);
+}
+
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+					struct sk_buff *skb)
 {
 	struct lan743x_rx_buffer_info *buffer_info;
 	struct lan743x_rx_descriptor *descriptor;
@@ -1915,9 +1917,7 @@
 	length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
 	descriptor = &rx->ring_cpu_ptr[index];
 	buffer_info = &rx->buffer_info[index];
-	buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
-					      length,
-					      GFP_ATOMIC | GFP_DMA);
+	buffer_info->skb = skb;
 	if (!(buffer_info->skb))
 		return -ENOMEM;
 	buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
@@ -2064,8 +2064,19 @@
 		/* packet is available */
 		if (first_index == last_index) {
 			/* single buffer packet */
+			struct sk_buff *new_skb = NULL;
 			int packet_length;
 
+			new_skb = lan743x_rx_allocate_skb(rx);
+			if (!new_skb) {
+				/* failed to allocate next skb.
+				 * Memory is very low.
+				 * Drop this packet and reuse buffer.
+				 */
+				lan743x_rx_reuse_ring_element(rx, first_index);
+				goto process_extension;
+			}
+
 			buffer_info = &rx->buffer_info[first_index];
 			skb = buffer_info->skb;
 			descriptor = &rx->ring_cpu_ptr[first_index];
@@ -2085,7 +2096,7 @@
 			skb_put(skb, packet_length - 4);
 			skb->protocol = eth_type_trans(skb,
 						       rx->adapter->netdev);
-			lan743x_rx_allocate_ring_element(rx, first_index);
+			lan743x_rx_init_ring_element(rx, first_index, new_skb);
 		} else {
 			int index = first_index;
 
@@ -2098,26 +2109,23 @@
 			if (first_index <= last_index) {
 				while ((index >= first_index) &&
 				       (index <= last_index)) {
-					lan743x_rx_release_ring_element(rx,
-									index);
-					lan743x_rx_allocate_ring_element(rx,
-									 index);
+					lan743x_rx_reuse_ring_element(rx,
+								      index);
 					index = lan743x_rx_next_index(rx,
 								      index);
 				}
 			} else {
 				while ((index >= first_index) ||
 				       (index <= last_index)) {
-					lan743x_rx_release_ring_element(rx,
-									index);
-					lan743x_rx_allocate_ring_element(rx,
-									 index);
+					lan743x_rx_reuse_ring_element(rx,
+								      index);
 					index = lan743x_rx_next_index(rx,
 								      index);
 				}
 			}
 		}
 
+process_extension:
 		if (extension_index >= 0) {
 			descriptor = &rx->ring_cpu_ptr[extension_index];
 			buffer_info = &rx->buffer_info[extension_index];
@@ -2294,7 +2302,9 @@
 
 	rx->last_head = 0;
 	for (index = 0; index < rx->ring_size; index++) {
-		ret = lan743x_rx_allocate_ring_element(rx, index);
+		struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
+
+		ret = lan743x_rx_init_ring_element(rx, index, new_skb);
 		if (ret)
 			goto cleanup;
 	}
@@ -2722,8 +2732,9 @@
 	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
 		 "pci-%s", pci_name(adapter->pdev));
 
-	/* set to internal PHY id */
-	adapter->mdiobus->phy_mask = ~(u32)BIT(1);
+	if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
+		/* LAN7430 uses internal phy at address 1 */
+		adapter->mdiobus->phy_mask = ~(u32)BIT(1);
 
 	/* register mdiobus */
 	ret = mdiobus_register(adapter->mdiobus);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ed4e298..0bdd3c4 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -733,7 +733,7 @@
 	}
 
 	return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
-				 ENTRYTYPE_NORMAL);
+				 ENTRYTYPE_LOCKED);
 }
 
 static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 398011c..bf4302e 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -807,7 +807,7 @@
 	struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
 	struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
 	struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
-	u64 data0, data1 = 0, steer_ctrl = 0;
+	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
 	enum vxge_hw_status status;
 
 	status = vxge_hw_vpath_fw_api(vpath,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index bd19624..90148db 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -375,13 +375,29 @@
 		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
 			return -EOPNOTSUPP;
 
-		/* We need to store TCP flags in the IPv4 key space, thus
-		 * we need to ensure we include a IPv4 key layer if we have
-		 * not done so already.
+		/* We need to store TCP flags in the either the IPv4 or IPv6 key
+		 * space, thus we need to ensure we include a IPv4/IPv6 key
+		 * layer if we have not done so already.
 		 */
-		if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
-			key_layer |= NFP_FLOWER_LAYER_IPV4;
-			key_size += sizeof(struct nfp_flower_ipv4);
+		if (!key_basic)
+			return -EOPNOTSUPP;
+
+		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
+		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
+			switch (key_basic->n_proto) {
+			case cpu_to_be16(ETH_P_IP):
+				key_layer |= NFP_FLOWER_LAYER_IPV4;
+				key_size += sizeof(struct nfp_flower_ipv4);
+				break;
+
+			case cpu_to_be16(ETH_P_IPV6):
+				key_layer |= NFP_FLOWER_LAYER_IPV6;
+				key_size += sizeof(struct nfp_flower_ipv6);
+				break;
+
+			default:
+				return -EOPNOTSUPP;
+			}
 		}
 	}
 
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 052b3d2..c662c6f 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -912,7 +912,7 @@
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
-static void __init get_mac_address(struct net_device *dev)
+static void get_mac_address(struct net_device *dev)
 {
 	struct w90p910_ether *ether = netdev_priv(dev);
 	struct platform_device *pdev;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 0ea141e..6547a9d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1125,7 +1125,8 @@
 		return -EINVAL;
 	}
 	val = nx_get_bios_version(adapter);
-	netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
+	if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios))
+		return -EIO;
 	if ((__force u32)val != bios) {
 		dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
 				fw_name[fw_type]);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 2f69ee9..4dd82a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -473,19 +473,19 @@
 
 /* get pq index according to PQ_FLAGS */
 static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
-					   u32 pq_flags)
+					   unsigned long pq_flags)
 {
 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 
 	/* Can't have multiple flags set here */
-	if (bitmap_weight((unsigned long *)&pq_flags,
+	if (bitmap_weight(&pq_flags,
 			  sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
-		DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
+		DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
 		goto err;
 	}
 
 	if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
-		DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
+		DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
 		goto err;
 	}
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index a713826..bed8f48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12669,8 +12669,9 @@
 	MFW_DRV_MSG_BW_UPDATE10,
 	MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
 	MFW_DRV_MSG_BW_UPDATE11,
-	MFW_DRV_MSG_OEM_CFG_UPDATE,
+	MFW_DRV_MSG_RESERVED,
 	MFW_DRV_MSG_GET_TLV_REQ,
+	MFW_DRV_MSG_OEM_CFG_UPDATE,
 	MFW_DRV_MSG_MAX
 };
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 67c02ea93..64ac95c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -609,6 +609,10 @@
 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
 
+		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+			  (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+			   !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
 			  !!(accept_filter & QED_ACCEPT_BCAST));
 
@@ -744,6 +748,11 @@
 		return rc;
 	}
 
+	if (p_params->update_ctl_frame_check) {
+		p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
+		p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
+	}
+
 	/* Update mcast bins for VFs, PF doesn't use this functionality */
 	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
 
@@ -2207,7 +2216,7 @@
 			u16 num_queues = 0;
 
 			/* Since the feature controls only queue-zones,
-			 * make sure we have the contexts [rx, tx, xdp] to
+			 * make sure we have the contexts [rx, xdp, tcs] to
 			 * match.
 			 */
 			for_each_hwfn(cdev, i) {
@@ -2217,7 +2226,8 @@
 				u16 cids;
 
 				cids = hwfn->pf_params.eth_pf_params.num_cons;
-				num_queues += min_t(u16, l2_queues, cids / 3);
+				cids /= (2 + info->num_tc);
+				num_queues += min_t(u16, l2_queues, cids);
 			}
 
 			/* queues might theoretically be >256, but interrupts'
@@ -2688,7 +2698,8 @@
 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
 		accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
 						 QED_ACCEPT_MCAST_UNMATCHED;
-		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+		accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+						 QED_ACCEPT_MCAST_UNMATCHED;
 	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
 		accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
 		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8d80f10..7127d5a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -219,6 +219,9 @@
 	struct qed_rss_params		*rss_params;
 	struct qed_filter_accept_flags	accept_flags;
 	struct qed_sge_tpa_params	*sge_tpa_params;
+	u8				update_ctl_frame_check;
+	u8				mac_chk_en;
+	u8				ethtype_chk_en;
 };
 
 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 14ac9ca..015de1e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1592,6 +1592,10 @@
 	cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
 	rx_prod.bd_prod = cpu_to_le16(bd_prod);
 	rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+
+	/* Make sure chain element is updated before ringing the doorbell */
+	dma_wmb();
+
 	DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
 }
 
@@ -2426,19 +2430,24 @@
 {
 	struct qed_ll2_tx_pkt_info pkt;
 	const skb_frag_t *frag;
+	u8 flags = 0, nr_frags;
 	int rc = -EINVAL, i;
 	dma_addr_t mapping;
 	u16 vlan = 0;
-	u8 flags = 0;
 
 	if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
 		DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
 		return -EINVAL;
 	}
 
-	if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+	/* Cache number of fragments from SKB since SKB may be freed by
+	 * the completion routine after calling qed_ll2_prepare_tx_packet()
+	 */
+	nr_frags = skb_shinfo(skb)->nr_frags;
+
+	if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
 		DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
-		       1 + skb_shinfo(skb)->nr_frags);
+		       1 + nr_frags);
 		return -EINVAL;
 	}
 
@@ -2460,7 +2469,7 @@
 	}
 
 	memset(&pkt, 0, sizeof(pkt));
-	pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
+	pkt.num_of_bds = 1 + nr_frags;
 	pkt.vlan = vlan;
 	pkt.bd_flags = flags;
 	pkt.tx_dest = QED_LL2_TX_DEST_NW;
@@ -2471,12 +2480,17 @@
 	    test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
 		pkt.remove_stag = true;
 
+	/* qed_ll2_prepare_tx_packet() may actually send the packet if
+	 * there are no fragments in the skb and subsequently the completion
+	 * routine may run and free the SKB, so no dereferencing the SKB
+	 * beyond this point unless skb has any fragments.
+	 */
 	rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
 				       &pkt, 1);
 	if (rc)
 		goto err;
 
-	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+	for (i = 0; i < nr_frags; i++) {
 		frag = &skb_shinfo(skb)->frags[i];
 
 		mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
@@ -2485,6 +2499,7 @@
 		if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
 			DP_NOTICE(cdev,
 				  "Unable to map frag - dropping packet\n");
+			rc = -ENOMEM;
 			goto err;
 		}
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 3157c0d..dae2896e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -380,6 +380,7 @@
  * @param p_hwfn
  */
 void qed_consq_free(struct qed_hwfn *p_hwfn);
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
 
 /**
  * @file
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 7106ad1..a0ee847 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -402,6 +402,11 @@
 
 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
 
+	/* Attempt to post pending requests */
+	spin_lock_bh(&p_hwfn->p_spq->lock);
+	rc = qed_spq_pend_post(p_hwfn);
+	spin_unlock_bh(&p_hwfn->p_spq->lock);
+
 	return rc;
 }
 
@@ -745,7 +750,7 @@
 	return 0;
 }
 
-static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 {
 	struct qed_spq *p_spq = p_hwfn->p_spq;
 	struct qed_spq_entry *p_ent = NULL;
@@ -883,7 +888,6 @@
 	struct qed_spq_entry	*p_ent = NULL;
 	struct qed_spq_entry	*tmp;
 	struct qed_spq_entry	*found = NULL;
-	int			rc;
 
 	if (!p_hwfn)
 		return -EINVAL;
@@ -941,12 +945,7 @@
 		 */
 		qed_spq_return_entry(p_hwfn, found);
 
-	/* Attempt to post pending requests */
-	spin_lock_bh(&p_spq->lock);
-	rc = qed_spq_pend_post(p_hwfn);
-	spin_unlock_bh(&p_spq->lock);
-
-	return rc;
+	return 0;
 }
 
 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ca6290f..71a7af1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1969,7 +1969,9 @@
 	params.vport_id = vf->vport_id;
 	params.max_buffers_per_cqe = start->max_buffers_per_cqe;
 	params.mtu = vf->mtu;
-	params.check_mac = true;
+
+	/* Non trusted VFs should enable control frame filtering */
+	params.check_mac = !vf->p_vf_info.is_trusted_configured;
 
 	rc = qed_sp_eth_vport_start(p_hwfn, &params);
 	if (rc) {
@@ -5130,6 +5132,9 @@
 		params.opaque_fid = vf->opaque_fid;
 		params.vport_id = vf->vport_id;
 
+		params.update_ctl_frame_check = 1;
+		params.mac_chk_en = !vf_info->is_trusted_configured;
+
 		if (vf_info->rx_accept_mode & mask) {
 			flags->update_rx_mode_config = 1;
 			flags->rx_accept_filter = vf_info->rx_accept_mode;
@@ -5147,7 +5152,8 @@
 		}
 
 		if (flags->update_rx_mode_config ||
-		    flags->update_tx_mode_config)
+		    flags->update_tx_mode_config ||
+		    params.update_ctl_frame_check)
 			qed_sp_vport_update(hwfn, &params,
 					    QED_SPQ_MODE_EBLOCK, NULL);
 	}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index be118d0..6ab3fb0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -261,6 +261,7 @@
 	struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
 	struct vf_pf_resc_request *p_resc;
+	u8 retry_cnt = VF_ACQUIRE_THRESH;
 	bool resources_acquired = false;
 	struct vfpf_acquire_tlv *req;
 	int rc = 0, attempts = 0;
@@ -314,6 +315,15 @@
 
 		/* send acquire request */
 		rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+		/* Re-try acquire in case of vf-pf hw channel timeout */
+		if (retry_cnt && rc == -EBUSY) {
+			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+				   "VF retrying to acquire due to VPC timeout\n");
+			retry_cnt--;
+			continue;
+		}
+
 		if (rc)
 			goto exit;
 
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 6a4d266..d242a57 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -489,6 +489,9 @@
 
 /* Datapath functions definition */
 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+		      struct net_device *sb_dev,
+		      select_queue_fallback_t fallback);
 netdev_features_t qede_features_check(struct sk_buff *skb,
 				      struct net_device *dev,
 				      netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1a78027..a96da16 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1695,6 +1695,19 @@
 	return NETDEV_TX_OK;
 }
 
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+		      struct net_device *sb_dev,
+		      select_queue_fallback_t fallback)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	int total_txq;
+
+	total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
+
+	return QEDE_TSS_COUNT(edev) ?
+		fallback(dev, skb, NULL) % total_txq :  0;
+}
+
 /* 8B udp header + 8B base tunnel header + 32B option length */
 #define QEDE_MAX_TUN_HDR_LEN 48
 
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 46d0f2e..f3d9c40 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -631,6 +631,7 @@
 	.ndo_open = qede_open,
 	.ndo_stop = qede_close,
 	.ndo_start_xmit = qede_start_xmit,
+	.ndo_select_queue = qede_select_queue,
 	.ndo_set_rx_mode = qede_set_rx_mode,
 	.ndo_set_mac_address = qede_set_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
@@ -666,6 +667,7 @@
 	.ndo_open = qede_open,
 	.ndo_stop = qede_close,
 	.ndo_start_xmit = qede_start_xmit,
+	.ndo_select_queue = qede_select_queue,
 	.ndo_set_rx_mode = qede_set_rx_mode,
 	.ndo_set_mac_address = qede_set_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
@@ -684,6 +686,7 @@
 	.ndo_open = qede_open,
 	.ndo_stop = qede_close,
 	.ndo_start_xmit = qede_start_xmit,
+	.ndo_select_queue = qede_select_queue,
 	.ndo_set_rx_mode = qede_set_rx_mode,
 	.ndo_set_mac_address = qede_set_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 3075bfa..4d47bd1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * RMNET configuration engine
  *
@@ -216,6 +216,10 @@
 		synchronize_rcu();
 		kfree(ep);
 	}
+
+	if (!port->nr_rmnet_devs)
+		qmi_rmnet_qmi_exit(port->qmi_info, port);
+
 	rmnet_unregister_real_device(real_dev, port);
 
 	unregister_netdevice_queue(dev, head);
@@ -236,6 +240,7 @@
 	ASSERT_RTNL();
 
 	port = rmnet_get_port_rtnl(dev);
+	qmi_rmnet_qmi_exit(port->qmi_info, port);
 
 	rmnet_unregister_bridge(dev, port);
 
@@ -250,8 +255,6 @@
 
 	unregister_netdevice_many(&list);
 
-	qmi_rmnet_qmi_exit(port->qmi_info, port);
-
 	rmnet_unregister_real_device(real_dev, port);
 }
 
@@ -554,6 +557,7 @@
 
 	*tx = 0;
 	*rx = 0;
+	rcu_read_lock();
 	hash_for_each(((struct rmnet_port *)port)->muxed_ep, bkt, ep, hlnode) {
 		priv = netdev_priv(ep->egress_dev);
 		for_each_possible_cpu(cpu) {
@@ -565,6 +569,7 @@
 			} while (u64_stats_fetch_retry_irq(&ps->syncp, start));
 		}
 	}
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL(rmnet_get_packets);
 
@@ -601,6 +606,30 @@
 }
 EXPORT_SYMBOL(rmnet_enable_all_flows);
 
+bool rmnet_all_flows_enabled(void *port)
+{
+	struct rmnet_endpoint *ep;
+	unsigned long bkt;
+	bool ret = true;
+
+	if (unlikely(!port))
+		return true;
+
+	rcu_read_lock();
+	hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
+			  bkt, ep, hlnode) {
+		if (!qmi_rmnet_all_flows_enabled(ep->egress_dev)) {
+			ret = false;
+			goto out;
+		}
+	}
+out:
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(rmnet_all_flows_enabled);
+
 int rmnet_get_powersave_notif(void *port)
 {
 	if (!port)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 9901c13..d2a667d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -28,8 +28,6 @@
 	u64 dl_hdr_count;
 	u64 dl_hdr_total_bytes;
 	u64 dl_hdr_total_pkts;
-	u64 dl_hdr_avg_bytes;
-	u64 dl_hdr_avg_pkts;
 	u64 dl_trl_last_seq;
 	u64 dl_trl_count;
 };
@@ -58,6 +56,7 @@
 	struct timespec agg_time;
 	struct timespec agg_last;
 	struct hrtimer hrtimer;
+	struct work_struct agg_wq;
 
 	void *qmi_info;
 
@@ -130,6 +129,11 @@
 	void __rcu *qos_info;
 };
 
+enum rmnet_dl_marker_prio {
+	RMNET_PERF,
+	RMNET_SHS,
+};
+
 enum rmnet_trace_func {
 	RMNET_MODULE,
 	NW_STACK_MODULE,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 063289d..b606760 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -83,6 +83,11 @@
 			   struct rmnet_port *port) __rcu __read_mostly;
 EXPORT_SYMBOL(rmnet_shs_skb_entry);
 
+/* Shs hook handler for work queue*/
+int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
+			      struct rmnet_port *port) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
+
 /* Generic handler */
 
 void
@@ -125,6 +130,59 @@
 }
 EXPORT_SYMBOL(rmnet_deliver_skb);
 
+/* Important to note, port cannot be used here if it has gone stale */
+void
+rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
+		     enum rmnet_packet_context ctx)
+{
+	int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port);
+	struct rmnet_priv *priv = netdev_priv(skb->dev);
+
+	trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
+			0xDEF, 0xDEF, (void *)skb, NULL);
+	skb_reset_transport_header(skb);
+	skb_reset_network_header(skb);
+	rmnet_vnd_rx_fixup(skb->dev, skb->len);
+
+	skb->pkt_type = PACKET_HOST;
+	skb_set_mac_header(skb, 0);
+
+	/* packets coming from work queue context due to packet flush timer
+	 * must go through the special workqueue path in SHS driver
+	 */
+	rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
+				   rcu_dereference(rmnet_shs_skb_entry_wq);
+	if (rmnet_shs_stamp) {
+		rmnet_shs_stamp(skb, port);
+		return;
+	}
+
+	if (ctx == RMNET_NET_RX_CTX) {
+		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
+			if (!rmnet_check_skb_can_gro(skb) &&
+			    port->dl_marker_flush >= 0) {
+				struct napi_struct *napi =
+					get_current_napi_context();
+				napi_gro_receive(napi, skb);
+				port->dl_marker_flush++;
+			} else {
+				netif_receive_skb(skb);
+			}
+		} else {
+			if (!rmnet_check_skb_can_gro(skb))
+				gro_cells_receive(&priv->gro_cells, skb);
+			else
+				netif_receive_skb(skb);
+		}
+	} else {
+		if ((port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) &&
+		    port->dl_marker_flush >= 0)
+			port->dl_marker_flush++;
+		gro_cells_receive(&priv->gro_cells, skb);
+	}
+}
+EXPORT_SYMBOL(rmnet_deliver_skb_wq);
+
 /* Deliver a list of skbs after undoing coalescing */
 static void rmnet_deliver_skb_list(struct sk_buff_head *head,
 				   struct rmnet_port *port)
@@ -154,6 +212,7 @@
 
 	qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
 	if (qmap->cd_bit) {
+		qmi_rmnet_set_dl_msg_active(port);
 		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
 			if (!rmnet_map_flow_command(skb, port, false))
 				return;
@@ -182,7 +241,7 @@
 	if (qmap->next_hdr &&
 	    (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
 				  RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
-		if (rmnet_map_process_next_hdr_packet(skb, &list))
+		if (rmnet_map_process_next_hdr_packet(skb, &list, len))
 			goto free_skb;
 	} else {
 		/* We only have the main QMAP header to worry about */
@@ -252,10 +311,15 @@
 		struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
 
 		skb_shinfo(skb)->frag_list = NULL;
-		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
+		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
 			__rmnet_map_ingress_handler(skbn, port);
 
+			if (skbn == skb)
+				goto next_skb;
+		}
+
 		consume_skb(skb);
+next_skb:
 		skb = skb_frag;
 	}
 }
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
index 8ce6fe6c..09a2954 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013, 2016-2019 The Linux Foundation. All rights reserved.
  *
  * RMNET Data ingress/egress handler
  *
@@ -10,8 +10,15 @@
 
 #include "rmnet_config.h"
 
+enum rmnet_packet_context {
+	RMNET_NET_RX_CTX,
+	RMNET_WQ_CTX,
+};
+
 void rmnet_egress_handler(struct sk_buff *skb);
 void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
+void rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
+			  enum rmnet_packet_context ctx);
 void rmnet_set_skb_proto(struct sk_buff *skb);
 rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
 					       struct rmnet_port *port);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 428423a..03ce4f3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -166,6 +166,7 @@
 } __aligned(1);
 
 struct rmnet_map_dl_ind {
+	u8 priority;
 	void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *dlhdr);
 	void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *dltrl);
 	struct list_head list;
@@ -238,7 +239,8 @@
 				      struct net_device *orig_dev,
 				      int csum_type);
 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
-				      struct sk_buff_head *list);
+				      struct sk_buff_head *list,
+				      u16 len);
 int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
 void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
 void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index cb8bdf5..33da4bf 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -136,15 +136,6 @@
 	port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
 	port->stats.dl_hdr_count++;
 
-	if (unlikely(!(port->stats.dl_hdr_count)))
-		port->stats.dl_hdr_count = 1;
-
-	port->stats.dl_hdr_avg_bytes = port->stats.dl_hdr_total_bytes /
-				       port->stats.dl_hdr_count;
-
-	port->stats.dl_hdr_avg_pkts = port->stats.dl_hdr_total_pkts /
-				      port->stats.dl_hdr_count;
-
 	rmnet_map_dl_hdr_notify(port, dlhdr);
 	if (rmnet_perf) {
 		unsigned int pull_size;
@@ -261,11 +252,38 @@
 int rmnet_map_dl_ind_register(struct rmnet_port *port,
 			      struct rmnet_map_dl_ind *dl_ind)
 {
+	struct rmnet_map_dl_ind *dl_ind_iterator;
+	bool empty_ind_list = true;
+
 	if (!port || !dl_ind || !dl_ind->dl_hdr_handler ||
 	    !dl_ind->dl_trl_handler)
 		return -EINVAL;
 
-	list_add_rcu(&dl_ind->list, &port->dl_list);
+	list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) {
+		empty_ind_list = false;
+		if (dl_ind_iterator->priority < dl_ind->priority) {
+			if (dl_ind_iterator->list.next) {
+				if (dl_ind->priority
+				    < list_entry_rcu(dl_ind_iterator->list.next,
+				    typeof(*dl_ind_iterator), list)->priority) {
+					list_add_rcu(&dl_ind->list,
+						     &dl_ind_iterator->list);
+					break;
+				}
+			} else {
+				list_add_rcu(&dl_ind->list,
+					     &dl_ind_iterator->list);
+				break;
+			}
+		} else {
+			list_add_tail_rcu(&dl_ind->list,
+					  &dl_ind_iterator->list);
+			break;
+		}
+	}
+
+	if (empty_ind_list)
+		list_add_rcu(&dl_ind->list, &port->dl_list);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 4bbefd9..e7b25ad 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -316,7 +316,7 @@
 {
 	struct rmnet_map_header *maph;
 	struct sk_buff *skbn;
-	unsigned char *data = rmnet_map_data_ptr(skb);
+	unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL;
 	u32 packet_len;
 
 	if (skb->len == 0)
@@ -327,8 +327,14 @@
 
 	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
 		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
-	else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5)
-		packet_len += sizeof(struct rmnet_map_v5_csum_header);
+	else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
+		if (!maph->cd_bit) {
+			packet_len += sizeof(struct rmnet_map_v5_csum_header);
+
+			/* Coalescing headers require MAPv5 */
+			next_hdr = data + sizeof(*maph);
+		}
+	}
 
 	if (((int)skb->len - (int)packet_len) < 0)
 		return NULL;
@@ -337,6 +343,11 @@
 	if (ntohs(maph->pkt_len) == 0)
 		return NULL;
 
+	if (next_hdr &&
+	    ((struct rmnet_map_v5_coal_header *)next_hdr)->header_type ==
+	     RMNET_MAP_HEADER_TYPE_COALESCING)
+		return skb;
+
 	if (skb_is_nonlinear(skb)) {
 		skb_frag_t *frag0 = skb_shinfo(skb)->frags;
 		struct page *page = skb_frag_page(frag0);
@@ -779,9 +790,30 @@
 	if (iph->version == 4) {
 		protocol = iph->protocol;
 		ip_len = iph->ihl * 4;
+
+		/* Don't allow coalescing of any packets with IP options */
+		if (iph->ihl != 5)
+			gro = false;
 	} else if (iph->version == 6) {
+		__be16 frag_off;
+
 		protocol = ((struct ipv6hdr *)iph)->nexthdr;
-		ip_len = sizeof(struct ipv6hdr);
+		ip_len = ipv6_skip_exthdr(coal_skb, sizeof(struct ipv6hdr),
+					  &protocol, &frag_off);
+
+		/* If we run into a problem, or this has a fragment header
+		 * (which should technically not be possible, if the HW
+		 * works as intended...), bail.
+		 */
+		if (ip_len < 0 || frag_off) {
+			priv->stats.coal.coal_ip_invalid++;
+			return;
+		} else if (ip_len > sizeof(struct ipv6hdr)) {
+			/* Don't allow coalescing of any packets with IPv6
+			 * extension headers.
+			 */
+			gro = false;
+		}
 	} else {
 		priv->stats.coal.coal_ip_invalid++;
 		return;
@@ -818,6 +850,7 @@
 						return;
 
 					__skb_queue_tail(list, new_skb);
+					start += pkt_len * gro_count;
 					gro_count = 0;
 				}
 
@@ -858,7 +891,7 @@
 
 			__skb_queue_tail(list, new_skb);
 
-			start += pkt_len;
+			start += pkt_len * gro_count;
 			start_pkt_num = total_pkt + 1;
 			gro_count = 0;
 		}
@@ -968,7 +1001,8 @@
 
 /* Process a QMAPv5 packet header */
 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
-				      struct sk_buff_head *list)
+				      struct sk_buff_head *list,
+				      u16 len)
 {
 	struct rmnet_priv *priv = netdev_priv(skb->dev);
 	u64 nlo_err_mask;
@@ -995,6 +1029,11 @@
 		pskb_pull(skb,
 			  (sizeof(struct rmnet_map_header) +
 			   sizeof(struct rmnet_map_v5_csum_header)));
+
+		/* Remove padding only for csum offload packets.
+		 * Coalesced packets should never have padding.
+		 */
+		pskb_trim(skb, len);
 		__skb_queue_tail(list, skb);
 		break;
 	default:
@@ -1005,11 +1044,6 @@
 	return rc;
 }
 
-struct rmnet_agg_work {
-	struct work_struct work;
-	struct rmnet_port *port;
-};
-
 long rmnet_agg_time_limit __read_mostly = 1000000L;
 long rmnet_agg_bypass_time __read_mostly = 10000000L;
 
@@ -1043,22 +1077,17 @@
 
 static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
 {
-	struct rmnet_agg_work *real_work;
+	struct sk_buff *skb = NULL;
 	struct rmnet_port *port;
 	unsigned long flags;
-	struct sk_buff *skb;
-	int agg_count = 0;
 
-	real_work = (struct rmnet_agg_work *)work;
-	port = real_work->port;
-	skb = NULL;
+	port = container_of(work, struct rmnet_port, agg_wq);
 
 	spin_lock_irqsave(&port->agg_lock, flags);
 	if (likely(port->agg_state == -EINPROGRESS)) {
 		/* Buffer may have already been shipped out */
 		if (likely(port->agg_skb)) {
 			skb = port->agg_skb;
-			agg_count = port->agg_count;
 			port->agg_skb = NULL;
 			port->agg_count = 0;
 			memset(&port->agg_time, 0, sizeof(struct timespec));
@@ -1069,27 +1098,15 @@
 	spin_unlock_irqrestore(&port->agg_lock, flags);
 	if (skb)
 		dev_queue_xmit(skb);
-
-	kfree(work);
 }
 
 enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
 {
-	struct rmnet_agg_work *work;
 	struct rmnet_port *port;
 
 	port = container_of(t, struct rmnet_port, hrtimer);
 
-	work = kmalloc(sizeof(*work), GFP_ATOMIC);
-	if (!work) {
-		port->agg_state = 0;
-
-		return HRTIMER_NORESTART;
-	}
-
-	INIT_WORK(&work->work, rmnet_map_flush_tx_packet_work);
-	work->port = port;
-	schedule_work((struct work_struct *)work);
+	schedule_work(&port->agg_wq);
 	return HRTIMER_NORESTART;
 }
 
@@ -1111,15 +1128,16 @@
 		 * sparse, don't aggregate. We will need to tune this later
 		 */
 		diff = timespec_sub(port->agg_last, last);
+		size = port->egress_agg_size - skb->len;
 
-		if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time) {
+		if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
+		    size <= 0) {
 			spin_unlock_irqrestore(&port->agg_lock, flags);
 			skb->protocol = htons(ETH_P_MAP);
 			dev_queue_xmit(skb);
 			return;
 		}
 
-		size = port->egress_agg_size - skb->len;
 		port->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
 		if (!port->agg_skb) {
 			port->agg_skb = 0;
@@ -1174,6 +1192,8 @@
 	port->egress_agg_size = 8192;
 	port->egress_agg_count = 20;
 	spin_lock_init(&port->agg_lock);
+
+	INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
 }
 
 void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
@@ -1181,6 +1201,8 @@
 	unsigned long flags;
 
 	hrtimer_cancel(&port->hrtimer);
+	cancel_work_sync(&port->agg_wq);
+
 	spin_lock_irqsave(&port->agg_lock, flags);
 	if (port->agg_state == -EINPROGRESS) {
 		if (port->agg_skb) {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 2c4c825..e6bba00 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -16,6 +16,7 @@
 #include "rmnet_vnd.h"
 
 #include <soc/qcom/qmi_rmnet.h>
+#include <soc/qcom/rmnet_qmi.h>
 #include <trace/events/rmnet.h>
 
 /* RX/TX Fixup */
@@ -65,6 +66,7 @@
 		trace_rmnet_xmit_skb(skb);
 		rmnet_egress_handler(skb);
 		qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
+		qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev));
 	} else {
 		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
 		kfree_skb(skb);
@@ -219,8 +221,6 @@
 	"DL header pkts received",
 	"DL header total bytes received",
 	"DL header total pkts received",
-	"DL header average bytes",
-	"DL header average packets",
 	"DL trailer last seen sequence",
 	"DL trailer pkts received",
 };
@@ -311,10 +311,6 @@
 
 	rmnet_dev->needs_free_netdev = true;
 	rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
-
-	/* This perm addr will be used as interface identifier by IPv6 */
-	rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
-	eth_random_addr(rmnet_dev->perm_addr);
 }
 
 /* Exposed API */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4930e03..5f45ffe 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -214,6 +214,8 @@
 };
 
 static const struct pci_device_id rtl8169_pci_tbl[] = {
+	{ PCI_VDEVICE(REALTEK,	0x2502), RTL_CFG_1 },
+	{ PCI_VDEVICE(REALTEK,	0x2600), RTL_CFG_1 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8161), 0, 0, RTL_CFG_1 },
@@ -717,6 +719,7 @@
 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 module_param_named(debug, debug.msg_enable, int, 0);
 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_SOFTDEP("pre: realtek");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(FIRMWARE_8168D_1);
 MODULE_FIRMWARE(FIRMWARE_8168D_2);
@@ -1528,6 +1531,8 @@
 	}
 
 	RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+
+	device_set_wakeup_enable(tp_to_dev(tp), wolopts);
 }
 
 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -1549,8 +1554,6 @@
 
 	rtl_unlock_work(tp);
 
-	device_set_wakeup_enable(d, tp->saved_wolopts);
-
 	pm_runtime_put_noidle(d);
 
 	return 0;
@@ -1730,11 +1733,13 @@
 
 static bool rtl8169_update_counters(struct rtl8169_private *tp)
 {
+	u8 val = RTL_R8(tp, ChipCmd);
+
 	/*
 	 * Some chips are unable to dump tally counters when the receiver
-	 * is disabled.
+	 * is disabled. If 0xff chip may be in a PCI power-save state.
 	 */
-	if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
+	if (!(val & CmdRxEnb) || val == 0xff)
 		return true;
 
 	return rtl8169_do_counters(tp, CounterDump);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index d6f7539..5f092bb 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -344,7 +344,7 @@
 	int i;
 
 	priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
-		ETH_HLEN + VLAN_HLEN;
+		ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
 
 	/* Allocate RX and TX skb rings */
 	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -459,7 +459,7 @@
 		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
 
 	/* Set FIFO size */
-	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
 
 	/* Timestamp enable */
 	ravb_write(ndev, TCCR_TFEN, TCCR);
@@ -525,13 +525,15 @@
 {
 	u8 *hw_csum;
 
-	/* The hardware checksum is 2 bytes appended to packet data */
-	if (unlikely(skb->len < 2))
+	/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
+	 * appended to packet data
+	 */
+	if (unlikely(skb->len < sizeof(__sum16)))
 		return;
-	hw_csum = skb_tail_pointer(skb) - 2;
+	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
 	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 	skb->ip_summed = CHECKSUM_COMPLETE;
-	skb_trim(skb, skb->len - 2);
+	skb_trim(skb, skb->len - sizeof(__sum16));
 }
 
 /* Packet receive function for Ethernet AVB */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7b92336..3b174ea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1342,8 +1342,10 @@
 	}
 
 	ret = phy_power_on(bsp_priv, true);
-	if (ret)
+	if (ret) {
+		gmac_clk_enable(bsp_priv, false);
 		return ret;
+	}
 
 	pm_runtime_enable(dev);
 	pm_runtime_get_sync(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 20299f6..736e296 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -241,15 +241,18 @@
 static int dwmac4_rx_check_timestamp(void *desc)
 {
 	struct dma_desc *p = (struct dma_desc *)desc;
+	unsigned int rdes0 = le32_to_cpu(p->des0);
+	unsigned int rdes1 = le32_to_cpu(p->des1);
+	unsigned int rdes3 = le32_to_cpu(p->des3);
 	u32 own, ctxt;
 	int ret = 1;
 
-	own = p->des3 & RDES3_OWN;
-	ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+	own = rdes3 & RDES3_OWN;
+	ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
 		>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
 
 	if (likely(!own && ctxt)) {
-		if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+		if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
 			/* Corrupted value */
 			ret = -EINVAL;
 		else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 2090903..1c39305 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -260,6 +260,7 @@
 				  struct stmmac_extra_stats *x, u32 chan)
 {
 	u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
+	u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
 	int ret = 0;
 
 	/* ABNORMAL interrupts */
@@ -279,8 +280,7 @@
 		x->normal_irq_n++;
 
 		if (likely(intr_status & XGMAC_RI)) {
-			u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
-			if (likely(value & XGMAC_RIE)) {
+			if (likely(intr_en & XGMAC_RIE)) {
 				x->rx_normal_irq_n++;
 				ret |= handle_rx;
 			}
@@ -292,7 +292,7 @@
 	}
 
 	/* Clear interrupts */
-	writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
+	writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
 
 	return ret;
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5710864..4d5fb4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -692,33 +692,38 @@
 				     struct ethtool_eee *edata)
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
+	int ret;
 
-	priv->eee_enabled = edata->eee_enabled;
-
-	if (!priv->eee_enabled)
+	if (!edata->eee_enabled) {
 		stmmac_disable_eee_mode(priv);
-	else {
+	} else {
 		/* We are asking for enabling the EEE but it is safe
 		 * to verify all by invoking the eee_init function.
 		 * In case of failure it will return an error.
 		 */
-		priv->eee_enabled = stmmac_eee_init(priv);
-		if (!priv->eee_enabled)
+		edata->eee_enabled = stmmac_eee_init(priv);
+		if (!edata->eee_enabled)
 			return -EOPNOTSUPP;
-
-		/* Do not change tx_lpi_timer in case of failure */
-		priv->tx_lpi_timer = edata->tx_lpi_timer;
 	}
 
-	return phy_ethtool_set_eee(dev->phydev, edata);
+	ret = phy_ethtool_set_eee(dev->phydev, edata);
+	if (ret)
+		return ret;
+
+	priv->eee_enabled = edata->eee_enabled;
+	priv->tx_lpi_timer = edata->tx_lpi_timer;
+	return 0;
 }
 
 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
 {
 	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 
-	if (!clk)
-		return 0;
+	if (!clk) {
+		clk = priv->plat->clk_ref_rate;
+		if (!clk)
+			return 0;
+	}
 
 	return (usec * (clk / 1000000)) / 256;
 }
@@ -727,8 +732,11 @@
 {
 	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 
-	if (!clk)
-		return 0;
+	if (!clk) {
+		clk = priv->plat->clk_ref_rate;
+		if (!clk)
+			return 0;
+	}
 
 	return (riwt * 256) / (clk / 1000000);
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 99ea5c4..43ab9e9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3028,10 +3028,22 @@
 
 	tx_q = &priv->tx_queue[queue];
 
+	if (priv->tx_path_in_lpi_mode)
+		stmmac_disable_eee_mode(priv);
+
 	/* Manage oversized TCP frames for GMAC4 device */
 	if (skb_is_gso(skb) && priv->tso) {
-		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+			/*
+			 * There is no way to determine the number of TSO
+			 * capable Queues. Let's use always the Queue 0
+			 * because if TSO is supported then at least this
+			 * one will be capable.
+			 */
+			skb_set_queue_mapping(skb, 0);
+
 			return stmmac_tso_xmit(skb, dev);
+		}
 	}
 
 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3046,9 +3058,6 @@
 		return NETDEV_TX_BUSY;
 	}
 
-	if (priv->tx_path_in_lpi_mode)
-		stmmac_disable_eee_mode(priv);
-
 	entry = tx_q->cur_tx;
 	first_entry = entry;
 	WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -3522,27 +3531,28 @@
 	struct stmmac_channel *ch =
 		container_of(napi, struct stmmac_channel, napi);
 	struct stmmac_priv *priv = ch->priv_data;
-	int work_done = 0, work_rem = budget;
+	int work_done, rx_done = 0, tx_done = 0;
 	u32 chan = ch->index;
 
 	priv->xstats.napi_poll++;
 
-	if (ch->has_tx) {
-		int done = stmmac_tx_clean(priv, work_rem, chan);
+	if (ch->has_tx)
+		tx_done = stmmac_tx_clean(priv, budget, chan);
+	if (ch->has_rx)
+		rx_done = stmmac_rx(priv, budget, chan);
 
-		work_done += done;
-		work_rem -= done;
-	}
+	work_done = max(rx_done, tx_done);
+	work_done = min(work_done, budget);
 
-	if (ch->has_rx) {
-		int done = stmmac_rx(priv, work_rem, chan);
+	if (work_done < budget && napi_complete_done(napi, work_done)) {
+		int stat;
 
-		work_done += done;
-		work_rem -= done;
-	}
-
-	if (work_done < budget && napi_complete_done(napi, work_done))
 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+		stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+						   &priv->xstats, chan);
+		if (stat && napi_reschedule(napi))
+			stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+	}
 
 	return work_done;
 }
@@ -4191,6 +4201,18 @@
 			return ret;
 	}
 
+	/* Rx Watchdog is available in the COREs newer than the 3.40.
+	 * In some case, for example on bugged HW this feature
+	 * has to be disable and this can be done by passing the
+	 * riwt_off field from the platform.
+	 */
+	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+		priv->use_riwt = 1;
+		dev_info(priv->device,
+			 "Enable RX Mitigation via HW Watchdog Timer\n");
+	}
+
 	return 0;
 }
 
@@ -4247,6 +4269,7 @@
 	priv->wq = create_singlethread_workqueue("stmmac_wq");
 	if (!priv->wq) {
 		dev_err(priv->device, "failed to create workqueue\n");
+		ret = -ENOMEM;
 		goto error_wq;
 	}
 
@@ -4322,18 +4345,6 @@
 	if (flow_ctrl)
 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
 
-	/* Rx Watchdog is available in the COREs newer than the 3.40.
-	 * In some case, for example on bugged HW this feature
-	 * has to be disable and this can be done by passing the
-	 * riwt_off field from the platform.
-	 */
-	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
-	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
-		priv->use_riwt = 1;
-		dev_info(priv->device,
-			 "Enable RX Mitigation via HW Watchdog Timer\n");
-	}
-
 	/* Setup channels NAPI */
 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index c54a50d..d819e8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -299,7 +299,17 @@
  */
 static void stmmac_pci_remove(struct pci_dev *pdev)
 {
+	int i;
+
 	stmmac_dvr_remove(&pdev->dev);
+
+	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+		if (pci_resource_len(pdev, i) == 0)
+			continue;
+		pcim_iounmap_regions(pdev, BIT(i));
+		break;
+	}
+
 	pci_disable_device(pdev);
 }
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 531294f..58ea18a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -301,6 +301,8 @@
 	/* Queue 0 is not AVB capable */
 	if (queue <= 0 || queue >= tx_queues_count)
 		return -EINVAL;
+	if (!priv->dma_cap.av)
+		return -EOPNOTSUPP;
 	if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
 		return -EOPNOTSUPP;
 
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 9020b08..7ec4eb7 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1,22 +1,9 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
 /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
  *
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
  * This driver uses the sungem driver (c) David Miller
  * (davem@redhat.com) as its basis.
  *
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index 13f3860..ae5f05f 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -1,23 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
 /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
  * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
  *
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
  * vendor id: 0x108E (Sun Microsystems, Inc.)
  * device id: 0xabba (Cassini)
  * revision ids: 0x01 = Cassini
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 9319d84..d845014 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8100,6 +8100,8 @@
 		start += 3;
 
 		prop_len = niu_pci_eeprom_read(np, start + 4);
+		if (prop_len < 0)
+			return prop_len;
 		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
 		if (err < 0)
 			return err;
@@ -8144,8 +8146,12 @@
 			netif_printk(np, probe, KERN_DEBUG, np->dev,
 				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
 				     namebuf, prop_len);
-			for (i = 0; i < prop_len; i++)
-				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
+			for (i = 0; i < prop_len; i++) {
+				err = niu_pci_eeprom_read(np, off + i);
+				if (err >= 0)
+					*prop_buf = err;
+				++prop_buf;
+			}
 		}
 
 		start += len;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 493cd38..e1427b5 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -636,15 +636,20 @@
 static int geneve_open(struct net_device *dev)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
-	bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
 	bool metadata = geneve->collect_md;
+	bool ipv4, ipv6;
 	int ret = 0;
 
+	ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
+	ipv4 = !ipv6 || metadata;
 #if IS_ENABLED(CONFIG_IPV6)
-	if (ipv6 || metadata)
+	if (ipv6) {
 		ret = geneve_sock_add(geneve, true);
+		if (ret < 0 && ret != -EAFNOSUPPORT)
+			ipv4 = false;
+	}
 #endif
-	if (!ret && (!ipv6 || metadata))
+	if (ipv4)
 		ret = geneve_sock_add(geneve, false);
 	if (ret < 0)
 		geneve_sock_release(geneve);
@@ -1406,9 +1411,13 @@
 	}
 #if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6: {
-		struct rt6_info *rt = rt6_lookup(geneve->net,
-						 &info->key.u.ipv6.dst, NULL, 0,
-						 NULL, 0);
+		struct rt6_info *rt;
+
+		if (!__in6_dev_get(dev))
+			break;
+
+		rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
+				NULL, 0);
 
 		if (rt && rt->dst.dev)
 			ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index d79a69d..54e63ec 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -524,10 +524,7 @@
 
 
 	/* Start resync timer again -- the TNC might be still absent */
-
-	del_timer(&sp->resync_t);
-	sp->resync_t.expires	= jiffies + SIXP_RESYNC_TIMEOUT;
-	add_timer(&sp->resync_t);
+	mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
 }
 
 static inline int tnc_init(struct sixpack *sp)
@@ -538,9 +535,7 @@
 
 	sp->tty->ops->write(sp->tty, &inbyte, 1);
 
-	del_timer(&sp->resync_t);
-	sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
-	add_timer(&sp->resync_t);
+	mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
 
 	return 0;
 }
@@ -918,11 +913,8 @@
         /* if the state byte has been received, the TNC is present,
            so the resync timer can be reset. */
 
-	if (sp->tnc_state == TNC_IN_SYNC) {
-		del_timer(&sp->resync_t);
-		sp->resync_t.expires	= jiffies + SIXP_INIT_RESYNC_TIMEOUT;
-		add_timer(&sp->resync_t);
-	}
+	if (sp->tnc_state == TNC_IN_SYNC)
+		mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT);
 
 	sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
 }
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a32ded5..42d2846 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -144,6 +144,8 @@
 	u32 total_data_buflen;
 };
 
+#define NETVSC_HASH_KEYLEN 40
+
 struct netvsc_device_info {
 	unsigned char mac_adr[ETH_ALEN];
 	u32  num_chn;
@@ -151,6 +153,8 @@
 	u32  recv_sections;
 	u32  send_section_size;
 	u32  recv_section_size;
+
+	u8 rss_key[NETVSC_HASH_KEYLEN];
 };
 
 enum rndis_device_state {
@@ -160,8 +164,6 @@
 	RNDIS_DEV_DATAINITIALIZED,
 };
 
-#define NETVSC_HASH_KEYLEN 40
-
 struct rndis_device {
 	struct net_device *ndev;
 
@@ -210,7 +212,9 @@
 void netvsc_channel_cb(void *context);
 int netvsc_poll(struct napi_struct *napi, int budget);
 
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
+int rndis_set_subchannel(struct net_device *ndev,
+			 struct netvsc_device *nvdev,
+			 struct netvsc_device_info *dev_info);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index fe01e14..1a942fe 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -84,7 +84,7 @@
 
 	rdev = nvdev->extension;
 	if (rdev) {
-		ret = rndis_set_subchannel(rdev->ndev, nvdev);
+		ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
 		if (ret == 0) {
 			netif_device_attach(rdev->ndev);
 		} else {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1c37a82..c832040 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -743,6 +743,14 @@
 	schedule_delayed_work(&ndev_ctx->dwork, 0);
 }
 
+static void netvsc_comp_ipcsum(struct sk_buff *skb)
+{
+	struct iphdr *iph = (struct iphdr *)skb->data;
+
+	iph->check = 0;
+	iph->check = ip_fast_csum(iph, iph->ihl);
+}
+
 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
 					     struct napi_struct *napi,
 					     const struct ndis_tcp_ip_checksum_info *csum_info,
@@ -766,9 +774,17 @@
 	/* skb is already created with CHECKSUM_NONE */
 	skb_checksum_none_assert(skb);
 
-	/*
-	 * In Linux, the IP checksum is always checked.
-	 * Do L4 checksum offload if enabled and present.
+	/* Incoming packets may have IP header checksum verified by the host.
+	 * They may not have IP header checksum computed after coalescing.
+	 * We compute it here if the flags are set, because on Linux, the IP
+	 * checksum is always checked.
+	 */
+	if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+	    csum_info->receive.ip_checksum_succeeded &&
+	    skb->protocol == htons(ETH_P_IP))
+		netvsc_comp_ipcsum(skb);
+
+	/* Do L4 checksum offload if enabled and present.
 	 */
 	if (csum_info && (net->features & NETIF_F_RXCSUM)) {
 		if (csum_info->receive.tcp_checksum_succeeded ||
@@ -856,6 +872,39 @@
 	}
 }
 
+/* Alloc struct netvsc_device_info, and initialize it from either existing
+ * struct netvsc_device, or from default values.
+ */
+static struct netvsc_device_info *netvsc_devinfo_get
+			(struct netvsc_device *nvdev)
+{
+	struct netvsc_device_info *dev_info;
+
+	dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
+
+	if (!dev_info)
+		return NULL;
+
+	if (nvdev) {
+		dev_info->num_chn = nvdev->num_chn;
+		dev_info->send_sections = nvdev->send_section_cnt;
+		dev_info->send_section_size = nvdev->send_section_size;
+		dev_info->recv_sections = nvdev->recv_section_cnt;
+		dev_info->recv_section_size = nvdev->recv_section_size;
+
+		memcpy(dev_info->rss_key, nvdev->extension->rss_key,
+		       NETVSC_HASH_KEYLEN);
+	} else {
+		dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+		dev_info->send_sections = NETVSC_DEFAULT_TX;
+		dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
+		dev_info->recv_sections = NETVSC_DEFAULT_RX;
+		dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
+	}
+
+	return dev_info;
+}
+
 static int netvsc_detach(struct net_device *ndev,
 			 struct netvsc_device *nvdev)
 {
@@ -907,7 +956,7 @@
 		return PTR_ERR(nvdev);
 
 	if (nvdev->num_chn > 1) {
-		ret = rndis_set_subchannel(ndev, nvdev);
+		ret = rndis_set_subchannel(ndev, nvdev, dev_info);
 
 		/* if unavailable, just proceed with one queue */
 		if (ret) {
@@ -941,7 +990,7 @@
 	struct net_device_context *net_device_ctx = netdev_priv(net);
 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
 	unsigned int orig, count = channels->combined_count;
-	struct netvsc_device_info device_info;
+	struct netvsc_device_info *device_info;
 	int ret;
 
 	/* We do not support separate count for rx, tx, or other */
@@ -960,24 +1009,26 @@
 
 	orig = nvdev->num_chn;
 
-	memset(&device_info, 0, sizeof(device_info));
-	device_info.num_chn = count;
-	device_info.send_sections = nvdev->send_section_cnt;
-	device_info.send_section_size = nvdev->send_section_size;
-	device_info.recv_sections = nvdev->recv_section_cnt;
-	device_info.recv_section_size = nvdev->recv_section_size;
+	device_info = netvsc_devinfo_get(nvdev);
+
+	if (!device_info)
+		return -ENOMEM;
+
+	device_info->num_chn = count;
 
 	ret = netvsc_detach(net, nvdev);
 	if (ret)
-		return ret;
+		goto out;
 
-	ret = netvsc_attach(net, &device_info);
+	ret = netvsc_attach(net, device_info);
 	if (ret) {
-		device_info.num_chn = orig;
-		if (netvsc_attach(net, &device_info))
+		device_info->num_chn = orig;
+		if (netvsc_attach(net, device_info))
 			netdev_err(net, "restoring channel setting failed\n");
 	}
 
+out:
+	kfree(device_info);
 	return ret;
 }
 
@@ -1044,48 +1095,45 @@
 	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
 	int orig_mtu = ndev->mtu;
-	struct netvsc_device_info device_info;
+	struct netvsc_device_info *device_info;
 	int ret = 0;
 
 	if (!nvdev || nvdev->destroy)
 		return -ENODEV;
 
+	device_info = netvsc_devinfo_get(nvdev);
+
+	if (!device_info)
+		return -ENOMEM;
+
 	/* Change MTU of underlying VF netdev first. */
 	if (vf_netdev) {
 		ret = dev_set_mtu(vf_netdev, mtu);
 		if (ret)
-			return ret;
+			goto out;
 	}
 
-	memset(&device_info, 0, sizeof(device_info));
-	device_info.num_chn = nvdev->num_chn;
-	device_info.send_sections = nvdev->send_section_cnt;
-	device_info.send_section_size = nvdev->send_section_size;
-	device_info.recv_sections = nvdev->recv_section_cnt;
-	device_info.recv_section_size = nvdev->recv_section_size;
-
 	ret = netvsc_detach(ndev, nvdev);
 	if (ret)
 		goto rollback_vf;
 
 	ndev->mtu = mtu;
 
-	ret = netvsc_attach(ndev, &device_info);
-	if (ret)
-		goto rollback;
+	ret = netvsc_attach(ndev, device_info);
+	if (!ret)
+		goto out;
 
-	return 0;
-
-rollback:
 	/* Attempt rollback to original MTU */
 	ndev->mtu = orig_mtu;
 
-	if (netvsc_attach(ndev, &device_info))
+	if (netvsc_attach(ndev, device_info))
 		netdev_err(ndev, "restoring mtu failed\n");
 rollback_vf:
 	if (vf_netdev)
 		dev_set_mtu(vf_netdev, orig_mtu);
 
+out:
+	kfree(device_info);
 	return ret;
 }
 
@@ -1690,7 +1738,7 @@
 {
 	struct net_device_context *ndevctx = netdev_priv(ndev);
 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
-	struct netvsc_device_info device_info;
+	struct netvsc_device_info *device_info;
 	struct ethtool_ringparam orig;
 	u32 new_tx, new_rx;
 	int ret = 0;
@@ -1710,26 +1758,29 @@
 	    new_rx == orig.rx_pending)
 		return 0;	 /* no change */
 
-	memset(&device_info, 0, sizeof(device_info));
-	device_info.num_chn = nvdev->num_chn;
-	device_info.send_sections = new_tx;
-	device_info.send_section_size = nvdev->send_section_size;
-	device_info.recv_sections = new_rx;
-	device_info.recv_section_size = nvdev->recv_section_size;
+	device_info = netvsc_devinfo_get(nvdev);
+
+	if (!device_info)
+		return -ENOMEM;
+
+	device_info->send_sections = new_tx;
+	device_info->recv_sections = new_rx;
 
 	ret = netvsc_detach(ndev, nvdev);
 	if (ret)
-		return ret;
+		goto out;
 
-	ret = netvsc_attach(ndev, &device_info);
+	ret = netvsc_attach(ndev, device_info);
 	if (ret) {
-		device_info.send_sections = orig.tx_pending;
-		device_info.recv_sections = orig.rx_pending;
+		device_info->send_sections = orig.tx_pending;
+		device_info->recv_sections = orig.rx_pending;
 
-		if (netvsc_attach(ndev, &device_info))
+		if (netvsc_attach(ndev, device_info))
 			netdev_err(ndev, "restoring ringparam failed");
 	}
 
+out:
+	kfree(device_info);
 	return ret;
 }
 
@@ -2158,7 +2209,7 @@
 {
 	struct net_device *net = NULL;
 	struct net_device_context *net_device_ctx;
-	struct netvsc_device_info device_info;
+	struct netvsc_device_info *device_info = NULL;
 	struct netvsc_device *nvdev;
 	int ret = -ENOMEM;
 
@@ -2205,21 +2256,21 @@
 	netif_set_real_num_rx_queues(net, 1);
 
 	/* Notify the netvsc driver of the new device */
-	memset(&device_info, 0, sizeof(device_info));
-	device_info.num_chn = VRSS_CHANNEL_DEFAULT;
-	device_info.send_sections = NETVSC_DEFAULT_TX;
-	device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
-	device_info.recv_sections = NETVSC_DEFAULT_RX;
-	device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
+	device_info = netvsc_devinfo_get(NULL);
 
-	nvdev = rndis_filter_device_add(dev, &device_info);
+	if (!device_info) {
+		ret = -ENOMEM;
+		goto devinfo_failed;
+	}
+
+	nvdev = rndis_filter_device_add(dev, device_info);
 	if (IS_ERR(nvdev)) {
 		ret = PTR_ERR(nvdev);
 		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
 		goto rndis_failed;
 	}
 
-	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+	memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
 
 	/* We must get rtnl lock before scheduling nvdev->subchan_work,
 	 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2257,12 +2308,16 @@
 
 	list_add(&net_device_ctx->list, &netvsc_dev_list);
 	rtnl_unlock();
+
+	kfree(device_info);
 	return 0;
 
 register_failed:
 	rtnl_unlock();
 	rndis_filter_device_remove(dev, nvdev);
 rndis_failed:
+	kfree(device_info);
+devinfo_failed:
 	free_percpu(net_device_ctx->vf_stats);
 no_stats:
 	hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2a5209f..53c6039 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -715,8 +715,8 @@
 	return ret;
 }
 
-int rndis_filter_set_rss_param(struct rndis_device *rdev,
-			       const u8 *rss_key)
+static int rndis_set_rss_param_msg(struct rndis_device *rdev,
+				   const u8 *rss_key, u16 flag)
 {
 	struct net_device *ndev = rdev->ndev;
 	struct rndis_request *request;
@@ -745,7 +745,7 @@
 	rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
 	rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
 	rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
-	rssp->flag = 0;
+	rssp->flag = flag;
 	rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
 			 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
 			 NDIS_HASH_TCP_IPV6;
@@ -770,9 +770,12 @@
 
 	wait_for_completion(&request->wait_event);
 	set_complete = &request->response_msg.msg.set_complete;
-	if (set_complete->status == RNDIS_STATUS_SUCCESS)
-		memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
-	else {
+	if (set_complete->status == RNDIS_STATUS_SUCCESS) {
+		if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
+		    !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
+			memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
+
+	} else {
 		netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
 			   set_complete->status);
 		ret = -EINVAL;
@@ -783,6 +786,16 @@
 	return ret;
 }
 
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+			       const u8 *rss_key)
+{
+	/* Disable RSS before change */
+	rndis_set_rss_param_msg(rdev, rss_key,
+				NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
+
+	return rndis_set_rss_param_msg(rdev, rss_key, 0);
+}
+
 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
 						 struct netvsc_device *net_device)
 {
@@ -1062,7 +1075,9 @@
  * This breaks overlap of processing the host message for the
  * new primary channel with the initialization of sub-channels.
  */
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
+int rndis_set_subchannel(struct net_device *ndev,
+			 struct netvsc_device *nvdev,
+			 struct netvsc_device_info *dev_info)
 {
 	struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1103,7 +1118,10 @@
 		   atomic_read(&nvdev->open_chn) == nvdev->num_chn);
 
 	/* ignore failues from setting rss parameters, still have channels */
-	rndis_filter_set_rss_param(rdev, netvsc_hash_key);
+	if (dev_info)
+		rndis_filter_set_rss_param(rdev, dev_info->rss_key);
+	else
+		rndis_filter_set_rss_param(rdev, netvsc_hash_key);
 
 	netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
 	netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 0ff5a40..b2ff903 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -721,7 +721,7 @@
 static void ca8210_rx_done(struct cas_control *cas_ctl)
 {
 	u8 *buf;
-	u8 len;
+	unsigned int len;
 	struct work_priv_container *mlme_reset_wpc;
 	struct ca8210_priv *priv = cas_ctl->priv;
 
@@ -730,7 +730,7 @@
 	if (len > CA8210_SPI_BUF_SIZE) {
 		dev_crit(
 			&priv->spi->dev,
-			"Received packet len (%d) erroneously long\n",
+			"Received packet len (%u) erroneously long\n",
 			len
 		);
 		goto finish;
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index bf70ab8..624bff4 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -500,7 +500,7 @@
 	    !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
 		return -EINVAL;
 
-	if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+	if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
 			     info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
 			     hwsim_edge_policy, NULL))
 		return -EINVAL;
@@ -550,7 +550,7 @@
 	    !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
 		return -EINVAL;
 
-	if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+	if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
 			     info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
 			     hwsim_edge_policy, NULL))
 		return -EINVAL;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4a94956..68b8007 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -97,12 +97,12 @@
 			err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
 			if (!err) {
 				mdev->l3mdev_ops = &ipvl_l3mdev_ops;
-				mdev->priv_flags |= IFF_L3MDEV_MASTER;
+				mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
 			} else
 				goto fail;
 		} else if (port->mode == IPVLAN_MODE_L3S) {
 			/* Old mode was L3S */
-			mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
+			mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
 			ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
 			mdev->l3mdev_ops = NULL;
 		}
@@ -162,7 +162,7 @@
 	struct sk_buff *skb;
 
 	if (port->mode == IPVLAN_MODE_L3S) {
-		dev->priv_flags &= ~IFF_L3MDEV_MASTER;
+		dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
 		ipvlan_unregister_nf_hook(dev_net(dev));
 		dev->l3mdev_ops = NULL;
 	}
@@ -494,6 +494,8 @@
 
 	if (!data)
 		return 0;
+	if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
+		return -EPERM;
 
 	if (data[IFLA_IPVLAN_MODE]) {
 		u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -596,6 +598,8 @@
 		struct ipvl_dev *tmp = netdev_priv(phy_dev);
 
 		phy_dev = tmp->phy_dev;
+		if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
 	} else if (!netif_is_ipvlan_port(phy_dev)) {
 		/* Exit early if the underlying link is invalid or busy */
 		if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 29aa8d7..59b3f1f 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -896,14 +896,14 @@
 			struct phy_txts *phy_txts)
 {
 	struct skb_shared_hwtstamps shhwtstamps;
+	struct dp83640_skb_info *skb_info;
 	struct sk_buff *skb;
-	u64 ns;
 	u8 overflow;
+	u64 ns;
 
 	/* We must already have the skb that triggered this. */
-
+again:
 	skb = skb_dequeue(&dp83640->tx_queue);
-
 	if (!skb) {
 		pr_debug("have timestamp but tx_queue empty\n");
 		return;
@@ -918,6 +918,11 @@
 		}
 		return;
 	}
+	skb_info = (struct dp83640_skb_info *)skb->cb;
+	if (time_after(jiffies, skb_info->tmo)) {
+		kfree_skb(skb);
+		goto again;
+	}
 
 	ns = phy2txts(phy_txts);
 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -1470,6 +1475,7 @@
 static void dp83640_txtstamp(struct phy_device *phydev,
 			     struct sk_buff *skb, int type)
 {
+	struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
 	struct dp83640_private *dp83640 = phydev->priv;
 
 	switch (dp83640->hwts_tx_en) {
@@ -1482,6 +1488,7 @@
 		/* fall through */
 	case HWTSTAMP_TX_ON:
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
 		skb_queue_tail(&dp83640->tx_queue, skb);
 		break;
 
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f7c69ca..73813c7 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -868,8 +868,6 @@
 
 	/* SGMII-to-Copper mode initialization */
 	if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
-		u32 pause;
-
 		/* Select page 18 */
 		err = marvell_set_page(phydev, 18);
 		if (err < 0)
@@ -892,16 +890,6 @@
 		err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
 		if (err < 0)
 			return err;
-
-		/* There appears to be a bug in the 88e1512 when used in
-		 * SGMII to copper mode, where the AN advertisement register
-		 * clears the pause bits each time a negotiation occurs.
-		 * This means we can never be truely sure what was advertised,
-		 * so disable Pause support.
-		 */
-		pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-		phydev->supported &= ~pause;
-		phydev->advertising &= ~pause;
 	}
 
 	return m88e1318_config_init(phydev);
@@ -1063,6 +1051,39 @@
 	return 0;
 }
 
+/* The VOD can be out of specification on link up. Poke an
+ * undocumented register, in an undocumented page, with a magic value
+ * to fix this.
+ */
+static int m88e6390_errata(struct phy_device *phydev)
+{
+	int err;
+
+	err = phy_write(phydev, MII_BMCR,
+			BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
+	if (err)
+		return err;
+
+	usleep_range(300, 400);
+
+	err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
+	if (err)
+		return err;
+
+	return genphy_soft_reset(phydev);
+}
+
+static int m88e6390_config_aneg(struct phy_device *phydev)
+{
+	int err;
+
+	err = m88e6390_errata(phydev);
+	if (err)
+		return err;
+
+	return m88e1510_config_aneg(phydev);
+}
+
 /**
  * fiber_lpa_to_ethtool_lpa_t
  * @lpa: value of the MII_LPA register for fiber link
@@ -1418,7 +1439,7 @@
 		 * before enabling it if !phy_interrupt_is_valid()
 		 */
 		if (!phy_interrupt_is_valid(phydev))
-			phy_read(phydev, MII_M1011_IEVENT);
+			__phy_read(phydev, MII_M1011_IEVENT);
 
 		/* Enable the WOL interrupt */
 		err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
@@ -2313,7 +2334,7 @@
 		.flags = PHY_HAS_INTERRUPT,
 		.probe = m88e6390_probe,
 		.config_init = &marvell_config_init,
-		.config_aneg = &m88e1510_config_aneg,
+		.config_aneg = &m88e6390_config_aneg,
 		.read_status = &marvell_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 98f4b1f..c5588d4 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -380,7 +380,6 @@
 	err = device_register(&bus->dev);
 	if (err) {
 		pr_err("mii_bus %s failed to register\n", bus->id);
-		put_device(&bus->dev);
 		return -EINVAL;
 	}
 
@@ -391,6 +390,7 @@
 	if (IS_ERR(gpiod)) {
 		dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
 			bus->id);
+		device_del(&bus->dev);
 		return PTR_ERR(gpiod);
 	} else	if (gpiod) {
 		bus->reset_gpiod = gpiod;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3db06b4..05a6ae3 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -339,6 +339,17 @@
 	return genphy_config_aneg(phydev);
 }
 
+static int ksz8061_config_init(struct phy_device *phydev)
+{
+	int ret;
+
+	ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+	if (ret)
+		return ret;
+
+	return kszphy_config_init(phydev);
+}
+
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
 				       const struct device_node *of_node,
 				       u16 reg,
@@ -934,7 +945,7 @@
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
-	.config_init	= kszphy_config_init,
+	.config_init	= ksz8061_config_init,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.suspend	= genphy_suspend,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 20d1be2..2c32c79 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -164,11 +164,8 @@
 	if (ret < 0)
 		return ret;
 
-	/* The PHY needs to renegotiate. */
-	phydev->link = 0;
-	phydev->state = PHY_UP;
-
-	phy_start_machine(phydev);
+	if (phydev->attached_dev && phydev->adjust_link)
+		phy_start_machine(phydev);
 
 	return 0;
 }
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 70f3f90..f6e70f2 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -348,6 +348,10 @@
 	linkmode_zero(state->lp_advertising);
 	state->interface = pl->link_config.interface;
 	state->an_enabled = pl->link_config.an_enabled;
+	state->speed = SPEED_UNKNOWN;
+	state->duplex = DUPLEX_UNKNOWN;
+	state->pause = MLO_PAUSE_NONE;
+	state->an_complete = 0;
 	state->link = 1;
 
 	return pl->ops->mac_link_state(ndev, state);
@@ -502,6 +506,17 @@
 		queue_work(system_power_efficient_wq, &pl->resolve);
 }
 
+static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
+{
+	unsigned long state = pl->phylink_disable_state;
+
+	set_bit(bit, &pl->phylink_disable_state);
+	if (state == 0) {
+		queue_work(system_power_efficient_wq, &pl->resolve);
+		flush_work(&pl->resolve);
+	}
+}
+
 static void phylink_fixed_poll(struct timer_list *t)
 {
 	struct phylink *pl = container_of(t, struct phylink, link_poll);
@@ -955,9 +970,7 @@
 	if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
 		del_timer_sync(&pl->link_poll);
 
-	set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
-	queue_work(system_power_efficient_wq, &pl->resolve);
-	flush_work(&pl->resolve);
+	phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
 }
 EXPORT_SYMBOL_GPL(phylink_stop);
 
@@ -1664,9 +1677,7 @@
 
 	ASSERT_RTNL();
 
-	set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
-	queue_work(system_power_efficient_wq, &pl->resolve);
-	flush_work(&pl->resolve);
+	phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
 }
 
 static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index ad9db65..fef701b 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -347,6 +347,7 @@
 				return ret;
 		}
 	}
+	bus->socket_ops->attach(bus->sfp);
 	if (bus->started)
 		bus->socket_ops->start(bus->sfp);
 	bus->netdev->sfp_bus = bus;
@@ -362,6 +363,7 @@
 	if (bus->registered) {
 		if (bus->started)
 			bus->socket_ops->stop(bus->sfp);
+		bus->socket_ops->detach(bus->sfp);
 		if (bus->phydev && ops && ops->disconnect_phy)
 			ops->disconnect_phy(bus->upstream);
 	}
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index fd8bb99..68c8fbf 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -184,6 +184,7 @@
 
 	struct gpio_desc *gpio[GPIO_MAX];
 
+	bool attached;
 	unsigned int state;
 	struct delayed_work poll;
 	struct delayed_work timeout;
@@ -1475,7 +1476,7 @@
 	 */
 	switch (sfp->sm_mod_state) {
 	default:
-		if (event == SFP_E_INSERT) {
+		if (event == SFP_E_INSERT && sfp->attached) {
 			sfp_module_tx_disable(sfp);
 			sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
 		}
@@ -1607,6 +1608,19 @@
 	mutex_unlock(&sfp->sm_mutex);
 }
 
+static void sfp_attach(struct sfp *sfp)
+{
+	sfp->attached = true;
+	if (sfp->state & SFP_F_PRESENT)
+		sfp_sm_event(sfp, SFP_E_INSERT);
+}
+
+static void sfp_detach(struct sfp *sfp)
+{
+	sfp->attached = false;
+	sfp_sm_event(sfp, SFP_E_REMOVE);
+}
+
 static void sfp_start(struct sfp *sfp)
 {
 	sfp_sm_event(sfp, SFP_E_DEV_UP);
@@ -1667,6 +1681,8 @@
 }
 
 static const struct sfp_socket_ops sfp_module_ops = {
+	.attach = sfp_attach,
+	.detach = sfp_detach,
 	.start = sfp_start,
 	.stop = sfp_stop,
 	.module_info = sfp_module_info,
@@ -1834,10 +1850,6 @@
 	dev_info(sfp->dev, "Host maximum power %u.%uW\n",
 		 sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
 
-	sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
-	if (!sfp->sfp_bus)
-		return -ENOMEM;
-
 	/* Get the initial state, and always signal TX disable,
 	 * since the network interface will not be up.
 	 */
@@ -1848,10 +1860,6 @@
 		sfp->state |= SFP_F_RATE_SELECT;
 	sfp_set_state(sfp, sfp->state);
 	sfp_module_tx_disable(sfp);
-	rtnl_lock();
-	if (sfp->state & SFP_F_PRESENT)
-		sfp_sm_event(sfp, SFP_E_INSERT);
-	rtnl_unlock();
 
 	for (i = 0; i < GPIO_MAX; i++) {
 		if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
@@ -1884,6 +1892,10 @@
 		dev_warn(sfp->dev,
 			 "No tx_disable pin: SFP modules will always be emitting.\n");
 
+	sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
+	if (!sfp->sfp_bus)
+		return -ENOMEM;
+
 	return 0;
 }
 
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 31b0acf..64f54b0 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -7,6 +7,8 @@
 struct sfp;
 
 struct sfp_socket_ops {
+	void (*attach)(struct sfp *sfp);
+	void (*detach)(struct sfp *sfp);
 	void (*start)(struct sfp *sfp);
 	void (*stop)(struct sfp *sfp);
 	int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 74a8782..bd6084e 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,10 @@
 	u16 val = 0;
 	int err;
 
-	err = priv->phy_drv->read_status(phydev);
+	if (priv->phy_drv->read_status)
+		err = priv->phy_drv->read_status(phydev);
+	else
+		err = genphy_read_status(phydev);
 	if (err < 0)
 		return err;
 
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index bdc4d23..7eae088 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -770,7 +770,7 @@
 {
 	struct sk_buff *skb;
 	unsigned char *p;
-	unsigned int len, fcs, proto;
+	unsigned int len, fcs;
 
 	skb = ap->rpkt;
 	if (ap->state & (SC_TOSS | SC_ESCAPE))
@@ -799,14 +799,14 @@
 			goto err;
 		p = skb_pull(skb, 2);
 	}
-	proto = p[0];
-	if (proto & 1) {
-		/* protocol is compressed */
-		*(u8 *)skb_push(skb, 1) = 0;
-	} else {
+
+	/* If protocol field is not compressed, it can be LCP packet */
+	if (!(p[0] & 0x01)) {
+		unsigned int proto;
+
 		if (skb->len < 2)
 			goto err;
-		proto = (proto << 8) + p[1];
+		proto = (p[0] << 8) + p[1];
 		if (proto == PPP_LCP)
 			async_lcp_peek(ap, p, skb->len, 1);
 	}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 02ad03a..8b1ef1b 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1965,6 +1965,46 @@
 	ppp_recv_unlock(ppp);
 }
 
+/**
+ * __ppp_decompress_proto - Decompress protocol field, slim version.
+ * @skb: Socket buffer where protocol field should be decompressed. It must have
+ *	 at least 1 byte of head room and 1 byte of linear data. First byte of
+ *	 data must be a protocol field byte.
+ *
+ * Decompress protocol field in PPP header if it's compressed, e.g. when
+ * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data
+ * length are done in this function.
+ */
+static void __ppp_decompress_proto(struct sk_buff *skb)
+{
+	if (skb->data[0] & 0x01)
+		*(u8 *)skb_push(skb, 1) = 0x00;
+}
+
+/**
+ * ppp_decompress_proto - Check skb data room and decompress protocol field.
+ * @skb: Socket buffer where protocol field should be decompressed. First byte
+ *	 of data must be a protocol field byte.
+ *
+ * Decompress protocol field in PPP header if it's compressed, e.g. when
+ * Protocol-Field-Compression (PFC) was negotiated. This function also makes
+ * sure that skb data room is sufficient for Protocol field, before and after
+ * decompression.
+ *
+ * Return: true - decompressed successfully, false - not enough room in skb.
+ */
+static bool ppp_decompress_proto(struct sk_buff *skb)
+{
+	/* At least one byte should be present (if protocol is compressed) */
+	if (!pskb_may_pull(skb, 1))
+		return false;
+
+	__ppp_decompress_proto(skb);
+
+	/* Protocol field should occupy 2 bytes when not compressed */
+	return pskb_may_pull(skb, 2);
+}
+
 void
 ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
 {
@@ -1977,7 +2017,7 @@
 	}
 
 	read_lock_bh(&pch->upl);
-	if (!pskb_may_pull(skb, 2)) {
+	if (!ppp_decompress_proto(skb)) {
 		kfree_skb(skb);
 		if (pch->ppp) {
 			++pch->ppp->dev->stats.rx_length_errors;
@@ -2074,6 +2114,9 @@
 	if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
 		goto err;
 
+	/* At this point the "Protocol" field MUST be decompressed, either in
+	 * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame().
+	 */
 	proto = PPP_PROTO(skb);
 	switch (proto) {
 	case PPP_VJC_COMP:
@@ -2245,6 +2288,9 @@
 		skb_put(skb, len);
 		skb_pull(skb, 2);	/* pull off the A/C bytes */
 
+		/* Don't call __ppp_decompress_proto() here, but instead rely on
+		 * corresponding algo (mppe/bsd/deflate) to decompress it.
+		 */
 	} else {
 		/* Uncompressed frame - pass to decompressor so it
 		   can update its dictionary if necessary. */
@@ -2290,9 +2336,11 @@
 
 	/*
 	 * Do protocol ID decompression on the first fragment of each packet.
+	 * We have to do that here, because ppp_receive_nonmp_frame() expects
+	 * decompressed protocol field.
 	 */
-	if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
-		*(u8 *)skb_push(skb, 1) = 0;
+	if (PPP_MP_CB(skb)->BEbits & B)
+		__ppp_decompress_proto(skb);
 
 	/*
 	 * Expand sequence number to 32 bits, making it as close
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 047f6c6..d02ba24 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -709,11 +709,10 @@
 		p = skb_pull(skb, 2);
 	}
 
-	/* decompress protocol field if compressed */
-	if (p[0] & 1) {
-		/* protocol is compressed */
-		*(u8 *)skb_push(skb, 1) = 0;
-	} else if (skb->len < 2)
+	/* PPP packet length should be >= 2 bytes when protocol field is not
+	 * compressed.
+	 */
+	if (!(p[0] & 0x01) && skb->len < 2)
 		goto err;
 
 	/* queue the frame to be processed */
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 62dc564..f22639f 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -445,6 +445,7 @@
 	if (pskb_trim_rcsum(skb, len))
 		goto drop;
 
+	ph = pppoe_hdr(skb);
 	pn = pppoe_pernet(dev_net(dev));
 
 	/* Note that get_item does a sock_hold(), so sk_pppox(po)
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 67ffe74..50c6055 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -325,11 +325,6 @@
 			skb_pull(skb, 2);
 		}
 
-		if ((*skb->data) & 1) {
-			/* protocol is compressed */
-			*(u8 *)skb_push(skb, 1) = 0;
-		}
-
 		skb->ip_summed = CHECKSUM_NONE;
 		skb_set_network_header(skb, skb->head-skb->data);
 		ppp_input(&po->chan, skb);
@@ -537,6 +532,7 @@
 		pppox_unbind_sock(sk);
 	}
 	skb_queue_purge(&sk->sk_receive_queue);
+	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
 }
 
 static int pptp_create(struct net *net, struct socket *sock, int kern)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 4b6572f..95ee9d8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -256,17 +256,6 @@
 	}
 }
 
-static bool __team_option_inst_tmp_find(const struct list_head *opts,
-					const struct team_option_inst *needle)
-{
-	struct team_option_inst *opt_inst;
-
-	list_for_each_entry(opt_inst, opts, tmp_list)
-		if (opt_inst == needle)
-			return true;
-	return false;
-}
-
 static int __team_options_register(struct team *team,
 				   const struct team_option *option,
 				   size_t option_count)
@@ -1270,7 +1259,7 @@
 	list_add_tail_rcu(&port->list, &team->port_list);
 	team_port_enable(team, port);
 	__team_compute_features(team);
-	__team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
+	__team_port_change_port_added(port, !!netif_oper_up(port_dev));
 	__team_options_change_check(team);
 
 	netdev_info(dev, "Port device %s added\n", portname);
@@ -2463,7 +2452,6 @@
 	int err = 0;
 	int i;
 	struct nlattr *nl_option;
-	LIST_HEAD(opt_inst_list);
 
 	rtnl_lock();
 
@@ -2483,6 +2471,7 @@
 		struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
 		struct nlattr *attr;
 		struct nlattr *attr_data;
+		LIST_HEAD(opt_inst_list);
 		enum team_option_type opt_type;
 		int opt_port_ifindex = 0; /* != 0 for per-port options */
 		u32 opt_array_index = 0;
@@ -2587,23 +2576,17 @@
 			if (err)
 				goto team_put;
 			opt_inst->changed = true;
-
-			/* dumb/evil user-space can send us duplicate opt,
-			 * keep only the last one
-			 */
-			if (__team_option_inst_tmp_find(&opt_inst_list,
-							opt_inst))
-				continue;
-
 			list_add(&opt_inst->tmp_list, &opt_inst_list);
 		}
 		if (!opt_found) {
 			err = -ENOENT;
 			goto team_put;
 		}
-	}
 
-	err = team_nl_send_event_options_get(team, &opt_inst_list);
+		err = team_nl_send_event_options_get(team, &opt_inst_list);
+		if (err)
+			break;
+	}
 
 team_put:
 	team_nl_team_put(team);
@@ -2935,7 +2918,7 @@
 
 	switch (event) {
 	case NETDEV_UP:
-		if (netif_carrier_ok(dev))
+		if (netif_oper_up(dev))
 			team_port_change_check(port, true);
 		break;
 	case NETDEV_DOWN:
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index a5ef970..5541e1c 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -325,6 +325,20 @@
 	return 0;
 }
 
+static void lb_bpf_func_free(struct team *team)
+{
+	struct lb_priv *lb_priv = get_lb_priv(team);
+	struct bpf_prog *fp;
+
+	if (!lb_priv->ex->orig_fprog)
+		return;
+
+	__fprog_destroy(lb_priv->ex->orig_fprog);
+	fp = rcu_dereference_protected(lb_priv->fp,
+				       lockdep_is_held(&team->lock));
+	bpf_prog_destroy(fp);
+}
+
 static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
 {
 	struct lb_priv *lb_priv = get_lb_priv(team);
@@ -639,6 +653,7 @@
 
 	team_options_unregister(team, lb_options,
 				ARRAY_SIZE(lb_options));
+	lb_bpf_func_free(team);
 	cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
 	free_percpu(lb_priv->pcpu_stats);
 	kfree(lb_priv->ex);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1cd8728..ee4f901 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -859,10 +859,6 @@
 		err = 0;
 	}
 
-	rcu_assign_pointer(tfile->tun, tun);
-	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
-	tun->numqueues++;
-
 	if (tfile->detached) {
 		tun_enable_queue(tfile);
 	} else {
@@ -870,12 +866,18 @@
 		tun_napi_init(tun, tfile, napi, napi_frags);
 	}
 
-	tun_set_real_num_queues(tun);
-
 	/* device is allowed to go away first, so no need to hold extra
 	 * refcnt.
 	 */
 
+	/* Publish tfile->tun and tun->tfiles only after we've fully
+	 * initialized tfile; otherwise we risk using half-initialized
+	 * object.
+	 */
+	rcu_assign_pointer(tfile->tun, tun);
+	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+	tun->numqueues++;
+	tun_set_real_num_queues(tun);
 out:
 	return err;
 }
@@ -2124,9 +2126,9 @@
 	}
 
 	add_wait_queue(&tfile->wq.wait, &wait);
-	current->state = TASK_INTERRUPTIBLE;
 
 	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
 		ptr = ptr_ring_consume(&tfile->tx_ring);
 		if (ptr)
 			break;
@@ -2142,7 +2144,7 @@
 		schedule();
 	}
 
-	current->state = TASK_RUNNING;
+	__set_current_state(TASK_RUNNING);
 	remove_wait_queue(&tfile->wq.wait, &wait);
 
 out:
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b654f05..3d93993 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -739,8 +739,13 @@
 	asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
 	chipcode &= AX_CHIPCODE_MASK;
 
-	(chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
-					    ax88772a_hw_reset(dev, 0);
+	ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
+						  ax88772a_hw_reset(dev, 0);
+
+	if (ret < 0) {
+		netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
+		return ret;
+	}
 
 	/* Read PHYID register *AFTER* the PHY was reset properly */
 	phyid = asix_get_phyid(dev);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 184c24b..d6916f7 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2807,6 +2807,12 @@
 		return -EIO;
 	}
 
+	/* check if we have a valid interface */
+	if (if_num > 16) {
+		kfree(config_data);
+		return -EINVAL;
+	}
+
 	switch (config_data[if_num]) {
 	case 0x0:
 		result = 0;
@@ -2877,10 +2883,18 @@
 
 	/* Get the interface/port specification from either driver_info or from
 	 * the device itself */
-	if (id->driver_info)
+	if (id->driver_info) {
+		/* if_num is controlled by the device, driver_info is a 0 terminated
+		 * array. Make sure, the access is in bounds! */
+		for (i = 0; i <= if_num; ++i)
+			if (((u32 *)(id->driver_info))[i] == 0)
+				goto exit;
 		port_spec = ((u32 *)(id->driver_info))[if_num];
-	else
+	} else {
 		port_spec = hso_get_config_data(interface);
+		if (port_spec < 0)
+			goto exit;
+	}
 
 	/* Check if we need to switch to alt interfaces prior to port
 	 * configuration */
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 8c5949c..d2f94ea 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2320,6 +2320,10 @@
 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
 
+	/* Added to support MAC address changes */
+	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
 	return 0;
 }
 
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 72a55b6..6e38135 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,6 +123,7 @@
 	dev->addr_len        = 0;
 	dev->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
 	dev->netdev_ops      = &qmimux_netdev_ops;
+	dev->mtu             = 1500;
 	dev->needs_free_netdev = true;
 }
 
@@ -151,17 +152,18 @@
 
 static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 {
-	unsigned int len, offset = sizeof(struct qmimux_hdr);
+	unsigned int len, offset = 0;
 	struct qmimux_hdr *hdr;
 	struct net_device *net;
 	struct sk_buff *skbn;
+	u8 qmimux_hdr_sz = sizeof(*hdr);
 
-	while (offset < skb->len) {
-		hdr = (struct qmimux_hdr *)skb->data;
+	while (offset + qmimux_hdr_sz < skb->len) {
+		hdr = (struct qmimux_hdr *)(skb->data + offset);
 		len = be16_to_cpu(hdr->pkt_len);
 
 		/* drop the packet, bogus length */
-		if (offset + len > skb->len)
+		if (offset + len + qmimux_hdr_sz > skb->len)
 			return 0;
 
 		/* control packet, we do not know what to do */
@@ -176,7 +178,7 @@
 			return 0;
 		skbn->dev = net;
 
-		switch (skb->data[offset] & 0xf0) {
+		switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
 		case 0x40:
 			skbn->protocol = htons(ETH_P_IP);
 			break;
@@ -188,12 +190,12 @@
 			goto skip;
 		}
 
-		skb_put_data(skbn, skb->data + offset, len);
+		skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len);
 		if (netif_rx(skbn) != NET_RX_SUCCESS)
 			return 0;
 
 skip:
-		offset += len + sizeof(struct qmimux_hdr);
+		offset += len + qmimux_hdr_sz;
 	}
 	return 1;
 }
@@ -974,6 +976,13 @@
 					      0xff),
 		.driver_info	    = (unsigned long)&qmi_wwan_info_quirk_dtr,
 	},
+	{	/* Quectel EG12/EM12 */
+		USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
+					      USB_CLASS_VENDOR_SPEC,
+					      USB_SUBCLASS_VENDOR_SPEC,
+					      0xff),
+		.driver_info	    = (unsigned long)&qmi_wwan_info_quirk_dtr,
+	},
 
 	/* 3. Combined interface devices matching on interface number */
 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
@@ -1117,6 +1126,7 @@
 	{QMI_FIXED_INTF(0x1435, 0xd181, 4)},	/* Wistron NeWeb D18Q1 */
 	{QMI_FIXED_INTF(0x1435, 0xd181, 5)},	/* Wistron NeWeb D18Q1 */
 	{QMI_FIXED_INTF(0x1435, 0xd191, 4)},	/* Wistron NeWeb D19Q1 */
+	{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)},	/* Fibocom NL668 series */
 	{QMI_FIXED_INTF(0x16d8, 0x6003, 0)},	/* CMOTech 6003 */
 	{QMI_FIXED_INTF(0x16d8, 0x6007, 0)},	/* CMOTech CHE-628S */
 	{QMI_FIXED_INTF(0x16d8, 0x6008, 0)},	/* CMOTech CMU-301 */
@@ -1229,6 +1239,7 @@
 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)},	/* Telit LE920, LE920A4 */
+	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)},	/* Telit LN940 series */
 	{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},	/* Telewell TW-3G HSPA+ */
 	{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},	/* Telewell TW-3G HSPA+ */
 	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
@@ -1263,6 +1274,7 @@
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)},	/* Quectel EC21 Mini PCIe */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)},	/* Quectel EG91 */
 	{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},	/* Quectel BG96 */
+	{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)},	/* Fibocom NL678 series */
 
 	/* 4. Gobi 1000 devices */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
@@ -1338,17 +1350,20 @@
 	return false;
 }
 
-static bool quectel_ep06_diag_detected(struct usb_interface *intf)
+static bool quectel_diag_detected(struct usb_interface *intf)
 {
 	struct usb_device *dev = interface_to_usbdev(intf);
 	struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+	u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
+	u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
 
-	if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
-	    le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
-	    intf_desc.bNumEndpoints == 2)
+	if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
+		return false;
+
+	if (id_product == 0x0306 || id_product == 0x0512)
 		return true;
-
-	return false;
+	else
+		return false;
 }
 
 static int qmi_wwan_probe(struct usb_interface *intf,
@@ -1385,13 +1400,13 @@
 		return -ENODEV;
 	}
 
-	/* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
+	/* Several Quectel modems supports dynamic interface configuration, so
 	 * we need to match on class/subclass/protocol. These values are
 	 * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
 	 * different. Ignore the current interface if the number of endpoints
 	 * the number for the diag interface (two).
 	 */
-	if (quectel_ep06_diag_detected(intf))
+	if (quectel_diag_detected(intf))
 		return -ENODEV;
 
 	return usbnet_probe(intf, id);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index f2d01cb..6e97162 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1295,6 +1295,7 @@
 		dev->net->features |= NETIF_F_RXCSUM;
 
 	dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+	set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
 
 	smsc95xx_init_mac_address(dev);
 
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ad14fbf..c88ee37 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -57,6 +57,8 @@
 #define VIRTIO_XDP_TX		BIT(0)
 #define VIRTIO_XDP_REDIR	BIT(1)
 
+#define VIRTIO_XDP_FLAG	BIT(0)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -251,6 +253,21 @@
 	char padding[4];
 };
 
+static bool is_xdp_frame(void *ptr)
+{
+	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
+}
+
+static void *xdp_to_ptr(struct xdp_frame *ptr)
+{
+	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
+}
+
+static struct xdp_frame *ptr_to_xdp(void *ptr)
+{
+	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
+}
+
 /* Converting between virtqueue no. and kernel tx/rx queue no.
  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
  */
@@ -461,7 +478,8 @@
 
 	sg_init_one(sq->sg, xdpf->data, xdpf->len);
 
-	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
+	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
+				   GFP_ATOMIC);
 	if (unlikely(err))
 		return -ENOSPC; /* Caller handle free/refcnt */
 
@@ -481,15 +499,24 @@
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 	struct receive_queue *rq = vi->rq;
-	struct xdp_frame *xdpf_sent;
 	struct bpf_prog *xdp_prog;
 	struct send_queue *sq;
 	unsigned int len;
+	int packets = 0;
+	int bytes = 0;
 	int drops = 0;
 	int kicks = 0;
 	int ret, err;
+	void *ptr;
 	int i;
 
+	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
+	 * indicate XDP resources have been successfully allocated.
+	 */
+	xdp_prog = rcu_dereference(rq->xdp_prog);
+	if (!xdp_prog)
+		return -ENXIO;
+
 	sq = virtnet_xdp_sq(vi);
 
 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
@@ -498,19 +525,21 @@
 		goto out;
 	}
 
-	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
-	 * indicate XDP resources have been successfully allocated.
-	 */
-	xdp_prog = rcu_dereference(rq->xdp_prog);
-	if (!xdp_prog) {
-		ret = -ENXIO;
-		drops = n;
-		goto out;
-	}
-
 	/* Free up any pending old buffers before queueing new ones. */
-	while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
-		xdp_return_frame(xdpf_sent);
+	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+		if (likely(is_xdp_frame(ptr))) {
+			struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+			bytes += frame->len;
+			xdp_return_frame(frame);
+		} else {
+			struct sk_buff *skb = ptr;
+
+			bytes += skb->len;
+			napi_consume_skb(skb, false);
+		}
+		packets++;
+	}
 
 	for (i = 0; i < n; i++) {
 		struct xdp_frame *xdpf = frames[i];
@@ -529,6 +558,8 @@
 	}
 out:
 	u64_stats_update_begin(&sq->stats.syncp);
+	sq->stats.bytes += bytes;
+	sq->stats.packets += packets;
 	sq->stats.xdp_tx += n;
 	sq->stats.xdp_tx_drops += drops;
 	sq->stats.kicks += kicks;
@@ -1329,20 +1360,28 @@
 	return stats.packets;
 }
 
-static void free_old_xmit_skbs(struct send_queue *sq)
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
 {
-	struct sk_buff *skb;
 	unsigned int len;
 	unsigned int packets = 0;
 	unsigned int bytes = 0;
+	void *ptr;
 
-	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-		pr_debug("Sent skb %p\n", skb);
+	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+		if (likely(!is_xdp_frame(ptr))) {
+			struct sk_buff *skb = ptr;
 
-		bytes += skb->len;
+			pr_debug("Sent skb %p\n", skb);
+
+			bytes += skb->len;
+			napi_consume_skb(skb, in_napi);
+		} else {
+			struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+			bytes += frame->len;
+			xdp_return_frame(frame);
+		}
 		packets++;
-
-		dev_consume_skb_any(skb);
 	}
 
 	/* Avoid overhead when no packets have been processed
@@ -1357,6 +1396,16 @@
 	u64_stats_update_end(&sq->stats.syncp);
 }
 
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+		return false;
+	else if (q < vi->curr_queue_pairs)
+		return true;
+	else
+		return false;
+}
+
 static void virtnet_poll_cleantx(struct receive_queue *rq)
 {
 	struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1364,11 +1413,11 @@
 	struct send_queue *sq = &vi->sq[index];
 	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
 
-	if (!sq->napi.weight)
+	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
 		return;
 
 	if (__netif_tx_trylock(txq)) {
-		free_old_xmit_skbs(sq);
+		free_old_xmit_skbs(sq, true);
 		__netif_tx_unlock(txq);
 	}
 
@@ -1441,10 +1490,18 @@
 {
 	struct send_queue *sq = container_of(napi, struct send_queue, napi);
 	struct virtnet_info *vi = sq->vq->vdev->priv;
-	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+	unsigned int index = vq2txq(sq->vq);
+	struct netdev_queue *txq;
 
+	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
+		/* We don't need to enable cb for XDP */
+		napi_complete_done(napi, 0);
+		return 0;
+	}
+
+	txq = netdev_get_tx_queue(vi->dev, index);
 	__netif_tx_lock(txq, raw_smp_processor_id());
-	free_old_xmit_skbs(sq);
+	free_old_xmit_skbs(sq, true);
 	__netif_tx_unlock(txq);
 
 	virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1513,7 +1570,7 @@
 	bool use_napi = sq->napi.weight;
 
 	/* Free up any pending old buffers before queueing new ones. */
-	free_old_xmit_skbs(sq);
+	free_old_xmit_skbs(sq, false);
 
 	if (use_napi && kick)
 		virtqueue_enable_cb_delayed(sq->vq);
@@ -1556,7 +1613,7 @@
 		if (!use_napi &&
 		    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
 			/* More just got used, free them then recheck. */
-			free_old_xmit_skbs(sq);
+			free_old_xmit_skbs(sq, false);
 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
 				netif_start_subqueue(dev, qnum);
 				virtqueue_disable_cb(sq->vq);
@@ -2345,6 +2402,10 @@
 		return -ENOMEM;
 	}
 
+	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
+	if (!prog && !old_prog)
+		return 0;
+
 	if (prog) {
 		prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
 		if (IS_ERR(prog))
@@ -2352,36 +2413,62 @@
 	}
 
 	/* Make sure NAPI is not using any XDP TX queues for RX. */
-	if (netif_running(dev))
-		for (i = 0; i < vi->max_queue_pairs; i++)
+	if (netif_running(dev)) {
+		for (i = 0; i < vi->max_queue_pairs; i++) {
 			napi_disable(&vi->rq[i].napi);
+			virtnet_napi_tx_disable(&vi->sq[i].napi);
+		}
+	}
 
-	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
+	if (!prog) {
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+			if (i == 0)
+				virtnet_restore_guest_offloads(vi);
+		}
+		synchronize_net();
+	}
+
 	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
 	if (err)
 		goto err;
+	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
 	vi->xdp_queue_pairs = xdp_qp;
 
-	for (i = 0; i < vi->max_queue_pairs; i++) {
-		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
-		rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
-		if (i == 0) {
-			if (!old_prog)
+	if (prog) {
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+			if (i == 0 && !old_prog)
 				virtnet_clear_guest_offloads(vi);
-			if (!prog)
-				virtnet_restore_guest_offloads(vi);
 		}
+	}
+
+	for (i = 0; i < vi->max_queue_pairs; i++) {
 		if (old_prog)
 			bpf_prog_put(old_prog);
-		if (netif_running(dev))
+		if (netif_running(dev)) {
 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+					       &vi->sq[i].napi);
+		}
 	}
 
 	return 0;
 
 err:
-	for (i = 0; i < vi->max_queue_pairs; i++)
-		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+	if (!prog) {
+		virtnet_clear_guest_offloads(vi);
+		for (i = 0; i < vi->max_queue_pairs; i++)
+			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
+	}
+
+	if (netif_running(dev)) {
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+					       &vi->sq[i].napi);
+		}
+	}
 	if (prog)
 		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
 	return err;
@@ -2537,16 +2624,6 @@
 			put_page(vi->rq[i].alloc_frag.page);
 }
 
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
-	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
-		return false;
-	else if (q < vi->curr_queue_pairs)
-		return true;
-	else
-		return false;
-}
-
 static void free_unused_bufs(struct virtnet_info *vi)
 {
 	void *buf;
@@ -2555,10 +2632,10 @@
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		struct virtqueue *vq = vi->sq[i].vq;
 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
-			if (!is_xdp_raw_buffer_queue(vi, i))
+			if (!is_xdp_frame(buf))
 				dev_kfree_skb(buf);
 			else
-				put_page(virt_to_head_page(buf));
+				xdp_return_frame(ptr_to_xdp(buf));
 		}
 	}
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 27bd586..52387f7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1469,6 +1469,14 @@
 		goto drop;
 	}
 
+	rcu_read_lock();
+
+	if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
+		rcu_read_unlock();
+		atomic_long_inc(&vxlan->dev->rx_dropped);
+		goto drop;
+	}
+
 	stats = this_cpu_ptr(vxlan->dev->tstats);
 	u64_stats_update_begin(&stats->syncp);
 	stats->rx_packets++;
@@ -1476,6 +1484,9 @@
 	u64_stats_update_end(&stats->syncp);
 
 	gro_cells_receive(&vxlan->gro_cells, skb);
+
+	rcu_read_unlock();
+
 	return 0;
 
 drop:
@@ -2003,7 +2014,7 @@
 	struct pcpu_sw_netstats *tx_stats, *rx_stats;
 	union vxlan_addr loopback;
 	union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
-	struct net_device *dev = skb->dev;
+	struct net_device *dev;
 	int len = skb->len;
 
 	tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
@@ -2023,9 +2034,15 @@
 #endif
 	}
 
+	rcu_read_lock();
+	dev = skb->dev;
+	if (unlikely(!(dev->flags & IFF_UP))) {
+		kfree_skb(skb);
+		goto drop;
+	}
+
 	if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
-		vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
-			    vni);
+		vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
 
 	u64_stats_update_begin(&tx_stats->syncp);
 	tx_stats->tx_packets++;
@@ -2038,8 +2055,10 @@
 		rx_stats->rx_bytes += len;
 		u64_stats_update_end(&rx_stats->syncp);
 	} else {
+drop:
 		dev->stats.rx_dropped++;
 	}
+	rcu_read_unlock();
 }
 
 static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
@@ -2452,6 +2471,8 @@
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 
+	gro_cells_destroy(&vxlan->gro_cells);
+
 	vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
 
 	free_percpu(dev->tstats);
@@ -3518,7 +3539,6 @@
 
 	vxlan_flush(vxlan, true);
 
-	gro_cells_destroy(&vxlan->gro_cells);
 	list_del(&vxlan->next);
 	unregister_netdevice_queue(dev, head);
 }
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 74c06a5..4f25c2d 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -486,8 +486,10 @@
 
 	/* Cleanup */
 	kfree(sl->xbuff);
+	sl->xbuff = NULL;
 noxbuff:
 	kfree(sl->rbuff);
+	sl->rbuff = NULL;
 norbuff:
 	return -ENOMEM;
 }
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index c40cd12..5210cff 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -532,6 +532,7 @@
 		.hw_ops = &wcn3990_ops,
 		.decap_align_bytes = 1,
 		.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
+		.n_cipher_suites = 8,
 		.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
 		.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
 		.target_64bit = true,
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index a63c97e..6f10331 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -71,7 +71,7 @@
 	spin_lock_bh(&ar->data_lock);
 
 	peer = ath10k_peer_find_by_id(ar, peer_id);
-	if (!peer)
+	if (!peer || !peer->sta)
 		goto out;
 
 	arsta = (struct ath10k_sta *)peer->sta->drv_priv;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 4d1cd90..03d4cc6 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2589,7 +2589,7 @@
 	rcu_read_lock();
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find_by_id(ar, peer_id);
-	if (!peer) {
+	if (!peer || !peer->sta) {
 		ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
 			    peer_id);
 		goto out;
@@ -2642,7 +2642,7 @@
 	rcu_read_lock();
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find_by_id(ar, peer_id);
-	if (!peer) {
+	if (!peer || !peer->sta) {
 		ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
 			    peer_id);
 		goto out;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 21ba209..0fca44e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -272,7 +272,7 @@
 #endif
 	u8 key_idx[4];
 
-	u32 ackto;
+	int ackto;
 	struct list_head list;
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index 7334c9b0..6e236a4 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -29,9 +29,13 @@
  * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
  *
  */
-static inline u32 ath_dynack_ewma(u32 old, u32 new)
+static inline int ath_dynack_ewma(int old, int new)
 {
-	return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV;
+	if (old > 0)
+		return (new * (EWMA_DIV - EWMA_LEVEL) +
+			old * EWMA_LEVEL) / EWMA_DIV;
+	else
+		return new;
 }
 
 /**
@@ -82,10 +86,10 @@
  */
 static void ath_dynack_compute_ackto(struct ath_hw *ah)
 {
-	struct ath_node *an;
-	u32 to = 0;
-	struct ath_dynack *da = &ah->dynack;
 	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_dynack *da = &ah->dynack;
+	struct ath_node *an;
+	int to = 0;
 
 	list_for_each_entry(an, &da->nodes, list)
 		if (an->ackto > to)
@@ -144,7 +148,8 @@
 					an->ackto = ath_dynack_ewma(an->ackto,
 								    ackto);
 					ath_dbg(ath9k_hw_common(ah), DYNACK,
-						"%pM to %u\n", dst, an->ackto);
+						"%pM to %d [%u]\n", dst,
+						an->ackto, ackto);
 					if (time_is_before_jiffies(da->lto)) {
 						ath_dynack_compute_ackto(ah);
 						da->lto = jiffies + COMPUTE_TO;
@@ -166,10 +171,12 @@
  * @ah: ath hw
  * @skb: socket buffer
  * @ts: tx status info
+ * @sta: station pointer
  *
  */
 void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
-			     struct ath_tx_status *ts)
+			     struct ath_tx_status *ts,
+			     struct ieee80211_sta *sta)
 {
 	u8 ridx;
 	struct ieee80211_hdr *hdr;
@@ -177,7 +184,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
-	if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled)
+	if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK))
 		return;
 
 	spin_lock_bh(&da->qlock);
@@ -187,11 +194,19 @@
 	/* late ACK */
 	if (ts->ts_status & ATH9K_TXERR_XRETRY) {
 		if (ieee80211_is_assoc_req(hdr->frame_control) ||
-		    ieee80211_is_assoc_resp(hdr->frame_control)) {
+		    ieee80211_is_assoc_resp(hdr->frame_control) ||
+		    ieee80211_is_auth(hdr->frame_control)) {
 			ath_dbg(common, DYNACK, "late ack\n");
+
 			ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2);
 			ath9k_hw_set_ack_timeout(ah, LATEACK_TO);
 			ath9k_hw_set_cts_timeout(ah, LATEACK_TO);
+			if (sta) {
+				struct ath_node *an;
+
+				an = (struct ath_node *)sta->drv_priv;
+				an->ackto = -1;
+			}
 			da->lto = jiffies + LATEACK_DELAY;
 		}
 
@@ -251,7 +266,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-	if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled)
+	if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1))
 		return;
 
 	spin_lock_bh(&da->qlock);
diff --git a/drivers/net/wireless/ath/ath9k/dynack.h b/drivers/net/wireless/ath/ath9k/dynack.h
index 6d7bef9..cf60224 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.h
+++ b/drivers/net/wireless/ath/ath9k/dynack.h
@@ -86,7 +86,8 @@
 void ath_dynack_init(struct ath_hw *ah);
 void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts);
 void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
-			     struct ath_tx_status *ts);
+			     struct ath_tx_status *ts,
+			     struct ieee80211_sta *sta);
 #else
 static inline void ath_dynack_init(struct ath_hw *ah) {}
 static inline void ath_dynack_node_init(struct ath_hw *ah,
@@ -97,7 +98,8 @@
 					    struct sk_buff *skb, u32 ts) {}
 static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah,
 					   struct sk_buff *skb,
-					   struct ath_tx_status *ts) {}
+					   struct ath_tx_status *ts,
+					   struct ieee80211_sta *sta) {}
 #endif
 
 #endif /* DYNACK_H */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c070a9e..fae572b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -636,15 +636,15 @@
 		ret = ath9k_eeprom_request(sc, eeprom_name);
 		if (ret)
 			return ret;
+
+		ah->ah_flags &= ~AH_USE_EEPROM;
+		ah->ah_flags |= AH_NO_EEP_SWAP;
 	}
 
 	mac = of_get_mac_address(np);
 	if (mac)
 		ether_addr_copy(common->macaddr, mac);
 
-	ah->ah_flags &= ~AH_USE_EEPROM;
-	ah->ah_flags |= AH_NO_EEP_SWAP;
-
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 43b6c85..4b7a7fc 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -629,7 +629,7 @@
 				if (bf == bf->bf_lastbf)
 					ath_dynack_sample_tx_ts(sc->sc_ah,
 								bf->bf_mpdu,
-								ts);
+								ts, sta);
 			}
 
 			ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
@@ -773,7 +773,8 @@
 			memcpy(info->control.rates, bf->rates,
 			       sizeof(info->control.rates));
 			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
-			ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
+			ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts,
+						sta);
 		}
 		ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
 	} else
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 7d72b64..760992f 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -64,3 +64,14 @@
 	  option if you are interested in debugging the driver.
 
 	  If unsure, say Y to make it easier to debug problems.
+
+config WIL6210_WRITE_IOCTL
+	bool "wil6210 write ioctl to the device"
+	depends on WIL6210
+	default y
+	help
+	  Say Y here to allow write-access from user-space to
+	  the device memory through ioctl. This is useful for
+	  debugging purposes only.
+
+	  If unsure, say N.
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 9dcdded..2bca87b 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -13,6 +13,7 @@
 wil6210-y += txrx_edma.o
 wil6210-y += debug.o
 wil6210-y += rx_reorder.o
+wil6210-y += ioctl.o
 wil6210-y += fw.o
 wil6210-y += pm.o
 wil6210-y += pmc.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 4da9a81..0ad83e5 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1556,7 +1556,12 @@
 			     params->wait);
 
 out:
+	/* when the sent packet was not acked by receiver(ACK=0), rc will
+	 * be -EAGAIN. In this case this function needs to return success,
+	 * the ACK=0 will be reflected in tx_status.
+	 */
 	tx_status = (rc == 0);
+	rc = (rc == -EAGAIN) ? 0 : rc;
 	cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
 				tx_status, GFP_KERNEL);
 
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 2a2ebaa..dbd65e1 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -207,6 +207,8 @@
 		seq_puts(s, "???\n");
 	}
 	seq_printf(s, "  desc_rdy_pol   = %d\n", sring->desc_rdy_pol);
+	seq_printf(s, "  invalid_buff_id_cnt   = %d\n",
+		   sring->invalid_buff_id_cnt);
 
 	if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
 		uint i;
@@ -269,6 +271,11 @@
 
 	wil_halp_vote(wil);
 
+	if (wil_mem_access_lock(wil)) {
+		wil_halp_unvote(wil);
+		return;
+	}
+
 	wil_memcpy_fromio_32(&r, off, sizeof(r));
 	wil_mbox_ring_le2cpus(&r);
 	/*
@@ -334,6 +341,7 @@
 	}
  out:
 	seq_puts(s, "}\n");
+	wil_mem_access_unlock(wil);
 	wil_halp_unvote(wil);
 }
 
@@ -622,6 +630,12 @@
 	if (ret < 0)
 		return ret;
 
+	ret = wil_mem_access_lock(wil);
+	if (ret) {
+		wil_pm_runtime_put(wil);
+		return ret;
+	}
+
 	a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
 	if (a)
@@ -629,6 +643,8 @@
 	else
 		seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
+	wil_mem_access_unlock(wil);
+
 	wil_pm_runtime_put(wil);
 
 	return 0;
@@ -658,10 +674,6 @@
 	size_t unaligned_bytes, aligned_count, ret;
 	int rc;
 
-	if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
-	    test_bit(wil_status_suspended, wil_blob->wil->status))
-		return 0;
-
 	if (pos < 0)
 		return -EINVAL;
 
@@ -688,11 +700,19 @@
 		return rc;
 	}
 
+	rc = wil_mem_access_lock(wil);
+	if (rc) {
+		kfree(buf);
+		wil_pm_runtime_put(wil);
+		return rc;
+	}
+
 	wil_memcpy_fromio_32(buf, (const void __iomem *)
 			     wil_blob->blob.data + aligned_pos, aligned_count);
 
 	ret = copy_to_user(user_buf, buf + unaligned_bytes, count);
 
+	wil_mem_access_unlock(wil);
 	wil_pm_runtime_put(wil);
 
 	kfree(buf);
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 7969579..37c9647 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -285,21 +285,24 @@
 static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
-	u32 isr = wil_ioread32_and_clear(wil->csr +
-					 HOSTADDR(RGF_DMA_EP_RX_ICR) +
-					 offsetof(struct RGF_ICR, ICR));
+	u32 isr;
 	bool need_unmask = true;
 
+	wil6210_mask_irq_rx(wil);
+
+	isr = wil_ioread32_and_clear(wil->csr +
+				     HOSTADDR(RGF_DMA_EP_RX_ICR) +
+				     offsetof(struct RGF_ICR, ICR));
+
 	trace_wil6210_irq_rx(isr);
 	wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
 
 	if (unlikely(!isr)) {
 		wil_err_ratelimited(wil, "spurious IRQ: RX\n");
+		wil6210_unmask_irq_rx(wil);
 		return IRQ_NONE;
 	}
 
-	wil6210_mask_irq_rx(wil);
-
 	/* RX_DONE and RX_HTRSH interrupts are the same if interrupt
 	 * moderation is not used. Interrupt moderation may cause RX
 	 * buffer overflow while RX_DONE is delayed. The required
@@ -344,21 +347,24 @@
 static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
-	u32 isr = wil_ioread32_and_clear(wil->csr +
-					 HOSTADDR(RGF_INT_GEN_RX_ICR) +
-					 offsetof(struct RGF_ICR, ICR));
+	u32 isr;
 	bool need_unmask = true;
 
+	wil6210_mask_irq_rx_edma(wil);
+
+	isr = wil_ioread32_and_clear(wil->csr +
+				     HOSTADDR(RGF_INT_GEN_RX_ICR) +
+				     offsetof(struct RGF_ICR, ICR));
+
 	trace_wil6210_irq_rx(isr);
 	wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
 
 	if (unlikely(!isr)) {
 		wil_err(wil, "spurious IRQ: RX\n");
+		wil6210_unmask_irq_rx_edma(wil);
 		return IRQ_NONE;
 	}
 
-	wil6210_mask_irq_rx_edma(wil);
-
 	if (likely(isr & BIT_RX_STATUS_IRQ)) {
 		wil_dbg_irq(wil, "RX status ring\n");
 		isr &= ~BIT_RX_STATUS_IRQ;
@@ -392,21 +398,24 @@
 static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
-	u32 isr = wil_ioread32_and_clear(wil->csr +
-					 HOSTADDR(RGF_INT_GEN_TX_ICR) +
-					 offsetof(struct RGF_ICR, ICR));
+	u32 isr;
 	bool need_unmask = true;
 
+	wil6210_mask_irq_tx_edma(wil);
+
+	isr = wil_ioread32_and_clear(wil->csr +
+				     HOSTADDR(RGF_INT_GEN_TX_ICR) +
+				     offsetof(struct RGF_ICR, ICR));
+
 	trace_wil6210_irq_tx(isr);
 	wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
 
 	if (unlikely(!isr)) {
 		wil_err(wil, "spurious IRQ: TX\n");
+		wil6210_unmask_irq_tx_edma(wil);
 		return IRQ_NONE;
 	}
 
-	wil6210_mask_irq_tx_edma(wil);
-
 	if (likely(isr & BIT_TX_STATUS_IRQ)) {
 		wil_dbg_irq(wil, "TX status ring\n");
 		isr &= ~BIT_TX_STATUS_IRQ;
@@ -435,21 +444,24 @@
 static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
-	u32 isr = wil_ioread32_and_clear(wil->csr +
-					 HOSTADDR(RGF_DMA_EP_TX_ICR) +
-					 offsetof(struct RGF_ICR, ICR));
+	u32 isr;
 	bool need_unmask = true;
 
+	wil6210_mask_irq_tx(wil);
+
+	isr = wil_ioread32_and_clear(wil->csr +
+				     HOSTADDR(RGF_DMA_EP_TX_ICR) +
+				     offsetof(struct RGF_ICR, ICR));
+
 	trace_wil6210_irq_tx(isr);
 	wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
 
 	if (unlikely(!isr)) {
 		wil_err_ratelimited(wil, "spurious IRQ: TX\n");
+		wil6210_unmask_irq_tx(wil);
 		return IRQ_NONE;
 	}
 
-	wil6210_mask_irq_tx(wil);
-
 	if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
 		wil_dbg_irq(wil, "TX done\n");
 		isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
@@ -521,20 +533,23 @@
 static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
-	u32 isr = wil_ioread32_and_clear(wil->csr +
-					 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-					 offsetof(struct RGF_ICR, ICR));
+	u32 isr;
+
+	wil6210_mask_irq_misc(wil, false);
+
+	isr = wil_ioread32_and_clear(wil->csr +
+				     HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+				     offsetof(struct RGF_ICR, ICR));
 
 	trace_wil6210_irq_misc(isr);
 	wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
 
 	if (!isr) {
 		wil_err(wil, "spurious IRQ: MISC\n");
+		wil6210_unmask_irq_misc(wil, false);
 		return IRQ_NONE;
 	}
 
-	wil6210_mask_irq_misc(wil, false);
-
 	if (isr & ISR_MISC_FW_ERROR) {
 		u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
 		u32 ucode_assert_code =
@@ -569,7 +584,7 @@
 			/* no need to handle HALP ICRs until next vote */
 			atomic_set(&wil->halp.handle_icr, 0);
 			wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
-			wil6210_mask_halp(wil);
+			wil6210_mask_irq_misc(wil, true);
 			complete(&wil->halp.comp);
 		}
 	}
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
new file mode 100644
index 0000000..4143d5e
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/uaccess.h>
+
+#include "wil6210.h"
+#include <uapi/linux/wil6210_uapi.h>
+
+#define wil_hex_dump_ioctl(prefix_str, buf, len) \
+	print_hex_dump_debug("DBG[IOC ]" prefix_str, \
+			     DUMP_PREFIX_OFFSET, 16, 1, buf, len, true)
+#define wil_dbg_ioctl(wil, fmt, arg...) wil_dbg(wil, "DBG[IOC ]" fmt, ##arg)
+
+#define WIL_PRIV_DATA_MAX_LEN	8192
+#define CMD_SET_AP_WPS_P2P_IE	"SET_AP_WPS_P2P_IE"
+
+struct wil_android_priv_data {
+	char *buf;
+	int used_len;
+	int total_len;
+};
+
+static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, u32 addr,
+				  u32 size, u32 op)
+{
+	void __iomem *a;
+	u32 off;
+
+	switch (op & WIL_MMIO_ADDR_MASK) {
+	case WIL_MMIO_ADDR_LINKER:
+		a = wmi_buffer(wil, cpu_to_le32(addr));
+		break;
+	case WIL_MMIO_ADDR_AHB:
+		a = wmi_addr(wil, addr);
+		break;
+	case WIL_MMIO_ADDR_BAR:
+		a = wmi_addr(wil, addr + WIL6210_FW_HOST_OFF);
+		break;
+	default:
+		wil_err(wil, "Unsupported address mode, op = 0x%08x\n", op);
+		return NULL;
+	}
+
+	off = a - wil->csr;
+	if (size > wil->bar_size - off) {
+		wil_err(wil,
+			"Invalid requested block: off(0x%08x) size(0x%08x)\n",
+			off, size);
+		return NULL;
+	}
+
+	return a;
+}
+
+static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
+{
+	struct wil_memio io;
+	void __iomem *a;
+	bool need_copy = false;
+	int rc;
+
+	if (copy_from_user(&io, data, sizeof(io)))
+		return -EFAULT;
+
+	wil_dbg_ioctl(wil, "IO: addr = 0x%08x val = 0x%08x op = 0x%08x\n",
+		      io.addr, io.val, io.op);
+
+	a = wil_ioc_addr(wil, io.addr, sizeof(u32), io.op);
+	if (!a) {
+		wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
+			io.op);
+		return -EINVAL;
+	}
+
+	rc = wil_mem_access_lock(wil);
+	if (rc)
+		return rc;
+
+	/* operation */
+	switch (io.op & WIL_MMIO_OP_MASK) {
+	case WIL_MMIO_READ:
+		io.val = readl_relaxed(a);
+		need_copy = true;
+		break;
+#if defined(CONFIG_WIL6210_WRITE_IOCTL)
+	case WIL_MMIO_WRITE:
+		writel_relaxed(io.val, a);
+		wmb(); /* make sure write propagated to HW */
+		break;
+#endif
+	default:
+		wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
+		wil_mem_access_unlock(wil);
+		return -EINVAL;
+	}
+
+	wil_mem_access_unlock(wil);
+
+	if (need_copy) {
+		wil_dbg_ioctl(wil,
+			      "IO done: addr(0x%08x) val(0x%08x) op(0x%08x)\n",
+			      io.addr, io.val, io.op);
+		if (copy_to_user(data, &io, sizeof(io)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data)
+{
+	struct wil_memio_block io;
+	void *block;
+	void __iomem *a;
+	int rc = 0;
+
+	if (copy_from_user(&io, data, sizeof(io)))
+		return -EFAULT;
+
+	wil_dbg_ioctl(wil, "IO: addr = 0x%08x size = 0x%08x op = 0x%08x\n",
+		      io.addr, io.size, io.op);
+
+	/* size */
+	if (io.size > WIL6210_MAX_MEM_SIZE) {
+		wil_err(wil, "size is too large:  0x%08x\n", io.size);
+		return -EINVAL;
+	}
+	if (io.size % 4) {
+		wil_err(wil, "size is not multiple of 4:  0x%08x\n", io.size);
+		return -EINVAL;
+	}
+
+	a = wil_ioc_addr(wil, io.addr, io.size, io.op);
+	if (!a) {
+		wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
+			io.op);
+		return -EINVAL;
+	}
+
+	block = kmalloc(io.size, GFP_USER);
+	if (!block)
+		return -ENOMEM;
+
+	rc = wil_mem_access_lock(wil);
+	if (rc) {
+		kfree(block);
+		return rc;
+	}
+
+	/* operation */
+	switch (io.op & WIL_MMIO_OP_MASK) {
+	case WIL_MMIO_READ:
+		wil_memcpy_fromio_32(block, a, io.size);
+		wil_hex_dump_ioctl("Read  ", block, io.size);
+		if (copy_to_user((void __user *)(uintptr_t)io.block,
+				 block, io.size)) {
+			rc = -EFAULT;
+			goto out_unlock;
+		}
+		break;
+#if defined(CONFIG_WIL6210_WRITE_IOCTL)
+	case WIL_MMIO_WRITE:
+		if (copy_from_user(block, (void __user *)(uintptr_t)io.block,
+				   io.size)) {
+			rc = -EFAULT;
+			goto out_unlock;
+		}
+		wil_memcpy_toio_32(a, block, io.size);
+		wmb(); /* make sure write propagated to HW */
+		wil_hex_dump_ioctl("Write ", block, io.size);
+		break;
+#endif
+	default:
+		wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
+		rc = -EINVAL;
+		break;
+	}
+
+out_unlock:
+	wil_mem_access_unlock(wil);
+	kfree(block);
+	return rc;
+}
+
+static int wil_ioc_android(struct wil6210_priv *wil, void __user *data)
+{
+	int rc = 0;
+	char *command;
+	struct wil_android_priv_data priv_data;
+
+	wil_dbg_ioctl(wil, "ioc_android\n");
+
+	if (copy_from_user(&priv_data, data, sizeof(priv_data)))
+		return -EFAULT;
+
+	if (priv_data.total_len <= 0 ||
+	    priv_data.total_len >= WIL_PRIV_DATA_MAX_LEN) {
+		wil_err(wil, "invalid data len %d\n", priv_data.total_len);
+		return -EINVAL;
+	}
+
+	command = kmalloc(priv_data.total_len + 1, GFP_KERNEL);
+	if (!command)
+		return -ENOMEM;
+
+	if (copy_from_user(command, priv_data.buf, priv_data.total_len)) {
+		rc = -EFAULT;
+		goto out_free;
+	}
+
+	/* Make sure the command is NUL-terminated */
+	command[priv_data.total_len] = '\0';
+
+	wil_dbg_ioctl(wil, "ioc_android: command = %s\n", command);
+
+	/* P2P not supported, but WPS is (in AP mode).
+	 * Ignore those in order not to block WPS functionality
+	 * in non-P2P mode.
+	 */
+	if (strncasecmp(command, CMD_SET_AP_WPS_P2P_IE,
+			strlen(CMD_SET_AP_WPS_P2P_IE)) == 0)
+		rc = 0;
+	else
+		rc = -ENOIOCTLCMD;
+
+out_free:
+	kfree(command);
+	return rc;
+}
+
+int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
+{
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	switch (cmd) {
+	case WIL_IOCTL_MEMIO:
+		ret = wil_ioc_memio_dword(wil, data);
+		break;
+	case WIL_IOCTL_MEMIO_BLOCK:
+		ret = wil_ioc_memio_block(wil, data);
+		break;
+	case (SIOCDEVPRIVATE + 1):
+		ret = wil_ioc_android(wil, data);
+		break;
+	default:
+		wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
+		wil_pm_runtime_put(wil);
+		return -ENOIOCTLCMD;
+	}
+
+	wil_pm_runtime_put(wil);
+
+	wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
+	return ret;
+}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 6a2148d..b5ac2d9 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -173,6 +173,28 @@
 	}
 }
 
+/* Device memory access is prohibited while reset or suspend.
+ * wil_mem_access_lock protects accessing device memory in these cases
+ */
+int wil_mem_access_lock(struct wil6210_priv *wil)
+{
+	if (!down_read_trylock(&wil->mem_lock))
+		return -EBUSY;
+
+	if (test_bit(wil_status_suspending, wil->status) ||
+	    test_bit(wil_status_suspended, wil->status)) {
+		up_read(&wil->mem_lock);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+void wil_mem_access_unlock(struct wil6210_priv *wil)
+{
+	up_read(&wil->mem_lock);
+}
+
 static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
 {
 	struct wil_ring *ring = &wil->ring_tx[id];
@@ -503,22 +525,16 @@
 	return no_fw_recovery && (wil->recovery_state == fw_recovery_pending);
 }
 
-static void wil_fw_error_worker(struct work_struct *work)
+void wil_fw_recovery(struct wil6210_priv *wil)
 {
-	struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
-						fw_error_worker);
 	struct net_device *ndev = wil->main_ndev;
 	struct wireless_dev *wdev;
 
-	wil_dbg_misc(wil, "fw error worker\n");
+	wil_dbg_misc(wil, "fw recovery\n");
 
-	if (!ndev || !(ndev->flags & IFF_UP)) {
-		wil_info(wil, "No recovery - interface is down\n");
-		return;
-	}
 	wdev = ndev->ieee80211_ptr;
 
-	/* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
+	/* increment @recovery_count if less than WIL6210_FW_RECOVERY_TO
 	 * passed since last recovery attempt
 	 */
 	if (time_is_after_jiffies(wil->last_fw_recovery +
@@ -578,6 +594,22 @@
 	rtnl_unlock();
 }
 
+static void wil_fw_error_worker(struct work_struct *work)
+{
+	struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+						fw_error_worker);
+	struct net_device *ndev = wil->main_ndev;
+
+	wil_dbg_misc(wil, "fw error worker\n");
+
+	if (!ndev || !(ndev->flags & IFF_UP)) {
+		wil_info(wil, "No recovery - interface is down\n");
+		return;
+	}
+
+	wil_fw_recovery(wil);
+}
+
 static int wil_find_free_ring(struct wil6210_priv *wil)
 {
 	int i;
@@ -690,11 +722,14 @@
 
 	INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
 	INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
+	INIT_WORK(&wil->pci_linkdown_recovery_worker,
+		  wil_pci_linkdown_recovery_worker);
 
 	INIT_LIST_HEAD(&wil->pending_wmi_ev);
 	spin_lock_init(&wil->wmi_ev_lock);
 	spin_lock_init(&wil->net_queue_lock);
 	init_waitqueue_head(&wil->wq);
+	init_rwsem(&wil->mem_lock);
 
 	wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
 	if (!wil->wmi_wq)
@@ -804,6 +839,7 @@
 
 	wil_set_recovery_state(wil, fw_recovery_idle);
 	cancel_work_sync(&wil->fw_error_worker);
+	cancel_work_sync(&wil->pci_linkdown_recovery_worker);
 	wmi_event_flush(wil);
 	destroy_workqueue(wil->wq_service);
 	destroy_workqueue(wil->wmi_wq);
@@ -1399,13 +1435,22 @@
 	u8 mac[8];
 	int mac_addr;
 
-	if (wil->hw_version >= HW_VER_TALYN_MB)
-		mac_addr = RGF_OTP_MAC_TALYN_MB;
-	else
-		mac_addr = RGF_OTP_MAC;
+	/* OEM MAC has precedence */
+	mac_addr = RGF_OTP_OEM_MAC;
+	wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr), sizeof(mac));
 
-	wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
-			     sizeof(mac));
+	if (is_valid_ether_addr(mac)) {
+		wil_info(wil, "using OEM MAC %pM\n", mac);
+	} else {
+		if (wil->hw_version >= HW_VER_TALYN_MB)
+			mac_addr = RGF_OTP_MAC_TALYN_MB;
+		else
+			mac_addr = RGF_OTP_MAC;
+
+		wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
+				     sizeof(mac));
+	}
+
 	if (!is_valid_ether_addr(mac)) {
 		u8 dummy_mac[ETH_ALEN] = {
 			0x00, 0xde, 0xad, 0x12, 0x34, 0x56,
@@ -1515,11 +1560,6 @@
 	if (wil->hw_version < HW_VER_TALYN_MB) {
 		wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
 		wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
-	} else {
-		wil_s(wil,
-		      RGF_CAF_ICR_TALYN_MB + offsetof(struct RGF_ICR, ICR), 0);
-		wil_w(wil, RGF_CAF_ICR_TALYN_MB +
-		      offsetof(struct RGF_ICR, IMV), ~0);
 	}
 	/* clear PAL_UNIT_ICR (potential D0->D3 leftover)
 	 * In Talyn-MB host cannot access this register due to
@@ -1615,15 +1655,6 @@
 	}
 
 	set_bit(wil_status_resetting, wil->status);
-	if (test_bit(wil_status_collecting_dumps, wil->status)) {
-		/* Device collects crash dump, cancel the reset.
-		 * following crash dump collection, reset would take place.
-		 */
-		wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
-		rc = -EBUSY;
-		goto out;
-	}
-
 	mutex_lock(&wil->vif_mutex);
 	wil_abort_scan_all_vifs(wil, false);
 	mutex_unlock(&wil->vif_mutex);
@@ -1807,7 +1838,9 @@
 
 	WARN_ON(!mutex_is_locked(&wil->mutex));
 
+	down_write(&wil->mem_lock);
 	rc = wil_reset(wil, true);
+	up_write(&wil->mem_lock);
 	if (rc)
 		return rc;
 
@@ -1879,6 +1912,7 @@
 
 int __wil_down(struct wil6210_priv *wil)
 {
+	int rc;
 	WARN_ON(!mutex_is_locked(&wil->mutex));
 
 	set_bit(wil_status_resetting, wil->status);
@@ -1899,7 +1933,11 @@
 	wil_abort_scan_all_vifs(wil, false);
 	mutex_unlock(&wil->vif_mutex);
 
-	return wil_reset(wil, false);
+	down_write(&wil->mem_lock);
+	rc = wil_reset(wil, false);
+	up_write(&wil->mem_lock);
+
+	return rc;
 }
 
 int wil_down(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 4a424c4..098caeb 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -85,12 +85,20 @@
 	return rc;
 }
 
+static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+	struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+	return wil_ioctl(wil, ifr->ifr_data, cmd);
+}
+
 static const struct net_device_ops wil_netdev_ops = {
 	.ndo_open		= wil_open,
 	.ndo_stop		= wil_stop,
 	.ndo_start_xmit		= wil_start_xmit,
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= wil_do_ioctl,
 };
 
 static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index dbbad5a..4180449 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -283,6 +283,108 @@
 	return 0;
 }
 
+void wil_pci_linkdown_recovery_worker(struct work_struct *work)
+{
+	struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+						pci_linkdown_recovery_worker);
+	int rc, i;
+	struct wil6210_vif *vif;
+	struct net_device *ndev = wil->main_ndev;
+
+	wil_dbg_misc(wil, "starting pci_linkdown recovery\n");
+
+	rtnl_lock();
+	mutex_lock(&wil->mutex);
+	down_write(&wil->mem_lock);
+	clear_bit(wil_status_fwready, wil->status);
+	set_bit(wil_status_pci_linkdown, wil->status);
+	set_bit(wil_status_resetting, wil->status);
+	up_write(&wil->mem_lock);
+
+	if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
+		napi_disable(&wil->napi_rx);
+		napi_disable(&wil->napi_tx);
+	}
+
+	mutex_unlock(&wil->mutex);
+	rtnl_unlock();
+
+	mutex_lock(&wil->mutex);
+
+	mutex_lock(&wil->vif_mutex);
+	wil_ftm_stop_operations(wil);
+	wil_p2p_stop_radio_operations(wil);
+	wil_abort_scan_all_vifs(wil, false);
+	mutex_unlock(&wil->vif_mutex);
+
+	for (i = 0; i < wil->max_vifs; i++) {
+		vif = wil->vifs[i];
+		if (vif) {
+			cancel_work_sync(&vif->disconnect_worker);
+			wil6210_disconnect(vif, NULL,
+					   WLAN_REASON_DEAUTH_LEAVING);
+		}
+	}
+
+	wmi_event_flush(wil);
+	flush_workqueue(wil->wq_service);
+	flush_workqueue(wil->wmi_wq);
+
+	/* Recover PCIe */
+	if (wil->platform_ops.pci_linkdown_recovery) {
+		rc = wil->platform_ops.pci_linkdown_recovery(
+			wil->platform_handle);
+		if (rc) {
+			wil_err(wil,
+				"platform device failed to recover from pci linkdown (%d)\n",
+				rc);
+			mutex_unlock(&wil->mutex);
+			goto out;
+		}
+	} else {
+		wil_err(wil,
+			"platform device doesn't support pci_linkdown recovery\n");
+		mutex_unlock(&wil->mutex);
+		goto out;
+	}
+
+	if (!ndev || !(ndev->flags & IFF_UP)) {
+		wil_reset(wil, false);
+		mutex_unlock(&wil->mutex);
+	} else {
+		mutex_unlock(&wil->mutex);
+		wil->recovery_state = fw_recovery_pending;
+		wil_fw_recovery(wil);
+	}
+
+out:
+	return;
+}
+
+static int wil_platform_rop_notify(void *wil_handle,
+				   enum wil_platform_notif notif)
+{
+	struct wil6210_priv *wil = wil_handle;
+
+	if (!wil)
+		return -EINVAL;
+
+	switch (notif) {
+	case WIL_PLATFORM_NOTIF_PCI_LINKDOWN:
+		wil_info(wil, "received WIL_PLATFORM_NOTIF_PCI_LINKDOWN\n");
+		clear_bit(wil_status_fwready, wil->status);
+		set_bit(wil_status_resetting, wil->status);
+		set_bit(wil_status_pci_linkdown, wil->status);
+
+		schedule_work(&wil->pci_linkdown_recovery_worker);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
 static void wil_platform_ops_uninit(struct wil6210_priv *wil)
 {
 	if (wil->platform_ops.uninit)
@@ -298,6 +400,7 @@
 	const struct wil_platform_rops rops = {
 		.ramdump = wil_platform_rop_ramdump,
 		.fw_recovery = wil_platform_rop_fw_recovery,
+		.notify = wil_platform_rop_notify,
 	};
 	u32 bar_size = pci_resource_len(pdev, 0);
 	int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
@@ -536,6 +639,11 @@
 	struct wil6210_priv *wil = pci_get_drvdata(pdev);
 	bool keep_radio_on, active_ifaces;
 
+	if (test_bit(wil_status_pci_linkdown, wil->status)) {
+		wil_dbg_pm(wil, "ignore resume during pci linkdown\n");
+		return 0;
+	}
+
 	wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
 	mutex_lock(&wil->vif_mutex);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 3a41947..6a49aa8 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
 /*
  * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "wil6210.h"
@@ -101,6 +90,12 @@
 		goto out;
 	}
 
+	if (test_bit(wil_status_pci_linkdown, wil->status)) {
+		wil_dbg_pm(wil, "Delay suspend during pci linkdown\n");
+		rc = -EBUSY;
+		goto out;
+	}
+
 	mutex_lock(&wil->vif_mutex);
 	active_ifaces = wil_has_active_ifaces(wil, true, false);
 	mutex_unlock(&wil->vif_mutex);
@@ -195,14 +190,18 @@
 	wil_dbg_pm(wil, "suspend keep radio on\n");
 
 	/* Prevent handling of new tx and wmi commands */
-	set_bit(wil_status_suspending, wil->status);
-	if (test_bit(wil_status_collecting_dumps, wil->status)) {
-		/* Device collects crash dump, cancel the suspend */
-		wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
-		clear_bit(wil_status_suspending, wil->status);
+	rc = down_write_trylock(&wil->mem_lock);
+	if (!rc) {
+		wil_err(wil,
+			"device is busy. down_write_trylock failed, returned (0x%x)\n",
+			rc);
 		wil->suspend_stats.rejected_by_host++;
 		return -EBUSY;
 	}
+
+	set_bit(wil_status_suspending, wil->status);
+	up_write(&wil->mem_lock);
+
 	wil_pm_stop_all_net_queues(wil);
 
 	if (!wil_is_tx_idle(wil)) {
@@ -311,15 +310,18 @@
 
 	wil_dbg_pm(wil, "suspend radio off\n");
 
-	set_bit(wil_status_suspending, wil->status);
-	if (test_bit(wil_status_collecting_dumps, wil->status)) {
-		/* Device collects crash dump, cancel the suspend */
-		wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
-		clear_bit(wil_status_suspending, wil->status);
+	rc = down_write_trylock(&wil->mem_lock);
+	if (!rc) {
+		wil_err(wil,
+			"device is busy. down_write_trylock failed, returned (0x%x)\n",
+			rc);
 		wil->suspend_stats.rejected_by_host++;
 		return -EBUSY;
 	}
 
+	set_bit(wil_status_suspending, wil->status);
+	up_write(&wil->mem_lock);
+
 	/* if netif up, hardware is alive, shut it down */
 	mutex_lock(&wil->vif_mutex);
 	active_ifaces = wil_has_active_ifaces(wil, true, false);
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index ac53f5d..4be8a82 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -18,6 +18,7 @@
 #define WIL_EDMA_MAX_DATA_OFFSET (2)
 /* RX buffer size must be aligned to 4 bytes */
 #define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
+#define MAX_INVALID_BUFF_ID_RETRY (3)
 
 static void wil_tx_desc_unmap_edma(struct device *dev,
 				   union wil_tx_desc *desc,
@@ -302,7 +303,8 @@
 	struct list_head *free = &wil->rx_buff_mgmt.free;
 	int i;
 
-	wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
+	wil->rx_buff_mgmt.buff_arr = kcalloc(size + 1,
+					     sizeof(struct wil_rx_buff),
 					     GFP_KERNEL);
 	if (!wil->rx_buff_mgmt.buff_arr)
 		return -ENOMEM;
@@ -311,14 +313,16 @@
 	INIT_LIST_HEAD(active);
 	INIT_LIST_HEAD(free);
 
-	/* Linkify the list */
+	/* Linkify the list.
+	 * buffer id 0 should not be used (marks invalid id).
+	 */
 	buff_arr = wil->rx_buff_mgmt.buff_arr;
-	for (i = 0; i < size; i++) {
+	for (i = 1; i <= size; i++) {
 		list_add(&buff_arr[i].list, free);
 		buff_arr[i].id = i;
 	}
 
-	wil->rx_buff_mgmt.size = size;
+	wil->rx_buff_mgmt.size = size + 1;
 
 	return 0;
 }
@@ -882,26 +886,50 @@
 
 	/* Extract the buffer ID from the status message */
 	buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
-	if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
-		wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
-			buff_id, sring->swhead);
-		wil_sring_advance_swhead(sring);
-		goto again;
+
+	while (!buff_id) {
+		struct wil_rx_status_extended *s;
+		int invalid_buff_id_retry = 0;
+
+		wil_dbg_txrx(wil,
+			     "buff_id is not updated yet by HW, (swhead 0x%x)\n",
+			     sring->swhead);
+		if (++invalid_buff_id_retry > MAX_INVALID_BUFF_ID_RETRY)
+			break;
+
+		/* Read the status message again */
+		s = (struct wil_rx_status_extended *)
+			(sring->va + (sring->elem_size * sring->swhead));
+		*(struct wil_rx_status_extended *)msg = *s;
+		buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
 	}
 
-	wil_sring_advance_swhead(sring);
+	if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
+		wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
+			buff_id, sring->swhead);
+		wil_rx_status_reset_buff_id(sring);
+		wil_sring_advance_swhead(sring);
+		sring->invalid_buff_id_cnt++;
+		goto again;
+	}
 
 	/* Extract the SKB from the rx_buff management array */
 	skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
 	wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
 	if (!skb) {
 		wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+		wil_rx_status_reset_buff_id(sring);
 		/* Move the buffer from the active list to the free list */
-		list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
-			  &wil->rx_buff_mgmt.free);
+		list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+			       &wil->rx_buff_mgmt.free);
+		wil_sring_advance_swhead(sring);
+		sring->invalid_buff_id_cnt++;
 		goto again;
 	}
 
+	wil_rx_status_reset_buff_id(sring);
+	wil_sring_advance_swhead(sring);
+
 	memcpy(&pa, skb->cb, sizeof(pa));
 	dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
 	dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
@@ -916,8 +944,8 @@
 			  sizeof(struct wil_rx_status_extended), false);
 
 	/* Move the buffer from the active list to the free list */
-	list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
-		  &wil->rx_buff_mgmt.free);
+	list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+		       &wil->rx_buff_mgmt.free);
 
 	eop = wil_rx_status_get_eop(msg);
 
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index 761c4b60..4eefa33 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -416,6 +416,12 @@
 			    30, 30);
 }
 
+static inline void wil_rx_status_reset_buff_id(struct wil_status_ring *s)
+{
+	((struct wil_rx_status_compressed *)
+		(s->va + (s->elem_size * s->swhead)))->buff_id = 0;
+}
+
 static inline __le16 wil_rx_status_get_buff_id(void *msg)
 {
 	return ((struct wil_rx_status_compressed *)msg)->buff_id;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index d224ea5..e736f22 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -357,6 +357,7 @@
 	#define REVISION_ID_SPARROW_D0	(0x3)
 
 #define RGF_OTP_MAC_TALYN_MB		(0x8a0304)
+#define RGF_OTP_OEM_MAC			(0x8a0334)
 #define RGF_OTP_MAC			(0x8a0620)
 
 /* Talyn-MB */
@@ -556,6 +557,7 @@
 	bool is_rx;
 	u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
 	struct wil_ring_rx_data rx_data;
+	u32 invalid_buff_id_cnt; /* relevant only for RX */
 };
 
 #define WIL_STA_TID_NUM (16)
@@ -650,7 +652,7 @@
 	wil_status_suspending, /* suspend in progress */
 	wil_status_suspended, /* suspend completed, device is suspended */
 	wil_status_resuming, /* resume in progress */
-	wil_status_collecting_dumps, /* crashdump collection in progress */
+	wil_status_pci_linkdown, /* pci linkdown occurred */
 	wil_status_last /* keep last */
 };
 
@@ -986,6 +988,8 @@
 	struct wil_txrx_ops txrx_ops;
 
 	struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+	/* for synchronizing device memory access while reset or suspend */
+	struct rw_semaphore mem_lock;
 	/* statistics */
 	atomic_t isr_count_rx, isr_count_tx;
 	/* debugfs */
@@ -1057,6 +1061,8 @@
 
 	u32 max_agg_wsize;
 	u32 max_ampdu_size;
+
+	struct work_struct pci_linkdown_recovery_worker;
 };
 
 #define wil_to_wiphy(i) (i->wiphy)
@@ -1184,6 +1190,8 @@
 			  size_t count);
 void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 			size_t count);
+int wil_mem_access_lock(struct wil6210_priv *wil);
+void wil_mem_access_unlock(struct wil6210_priv *wil);
 
 struct wil6210_vif *
 wil_vif_alloc(struct wil6210_priv *wil, const char *name,
@@ -1346,6 +1354,9 @@
 
 void wil_init_txrx_ops(struct wil6210_priv *wil);
 
+void wil_fw_recovery(struct wil6210_priv *wil);
+void wil_pci_linkdown_recovery_worker(struct work_struct *work);
+
 /* TX API */
 int wil_ring_init_tx(struct wil6210_vif *vif, int cid);
 int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size);
@@ -1372,6 +1383,7 @@
 
 int wil_iftype_nl2wmi(enum nl80211_iftype type);
 
+int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
 int wil_request_firmware(struct wil6210_priv *wil, const char *name,
 			 bool load);
 int wil_request_board(struct wil6210_priv *wil, const char *name);
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index dc33a0b..1332eb8 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
 /*
  * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "wil6210.h"
@@ -57,7 +46,7 @@
 
 int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 {
-	int i;
+	int i, rc;
 	const struct fw_map *map;
 	void *data;
 	u32 host_min, dump_size, offset, len;
@@ -73,14 +62,9 @@
 		return -EINVAL;
 	}
 
-	set_bit(wil_status_collecting_dumps, wil->status);
-	if (test_bit(wil_status_suspending, wil->status) ||
-	    test_bit(wil_status_suspended, wil->status) ||
-	    test_bit(wil_status_resetting, wil->status)) {
-		wil_err(wil, "cannot collect fw dump during suspend/reset\n");
-		clear_bit(wil_status_collecting_dumps, wil->status);
-		return -EINVAL;
-	}
+	rc = wil_mem_access_lock(wil);
+	if (rc)
+		return rc;
 
 	/* copy to crash dump area */
 	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
@@ -100,8 +84,7 @@
 		wil_memcpy_fromio_32((void * __force)(dest + offset),
 				     (const void __iomem * __force)data, len);
 	}
-
-	clear_bit(wil_status_collecting_dumps, wil->status);
+	wil_mem_access_unlock(wil);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index bca0906..19e00fa 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: ISC */
 /*
  * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __WIL_PLATFORM_H__
@@ -27,6 +17,10 @@
 	WIL_PLATFORM_EVT_POST_SUSPEND = 4,
 };
 
+enum wil_platform_notif {
+	WIL_PLATFORM_NOTIF_PCI_LINKDOWN = 0,
+};
+
 enum wil_platform_features {
 	WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
 	WIL_PLATFORM_FEATURE_TRIPLE_MSI = 1,
@@ -52,6 +46,7 @@
 	int (*notify)(void *handle, enum wil_platform_event evt);
 	int (*get_capa)(void *handle);
 	void (*set_features)(void *handle, int features);
+	int (*pci_linkdown_recovery)(void *handle);
 };
 
 /**
@@ -63,10 +58,13 @@
  * @fw_recovery: start a firmware recovery process. Called as
  *      part of a crash recovery process which may include other
  *      related platform subsystems.
+ * @notify: get notifications from the Platform driver, such as
+ *      pci linkdown
  */
 struct wil_platform_rops {
 	int (*ramdump)(void *wil_handle, void *buf, uint32_t size);
 	int (*fw_recovery)(void *wil_handle);
+	int (*notify)(void *wil_handle, enum wil_platform_notif notif);
 };
 
 /**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 889d166..05c4541 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -3681,8 +3681,9 @@
 	rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total,
 		      WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
 	if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
-		wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status);
-		rc = -EINVAL;
+		wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n",
+			    evt.evt.status);
+		rc = -EAGAIN;
 	}
 
 	kfree(cmd);
@@ -3734,9 +3735,9 @@
 	rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
 		      WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
 	if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
-		wil_err(wil, "mgmt_tx_ext failed with status %d\n",
-			evt.evt.status);
-		rc = -EINVAL;
+		wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n",
+			    evt.evt.status);
+		rc = -EAGAIN;
 	}
 
 	kfree(cmd);
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index 85f2ca9..ef3ffa5 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -616,7 +616,7 @@
 	u8 i;
 	s32 tmp;
 	s8 signx = 1;
-	u32 angle = 0;
+	s32 angle = 0;
 	struct b43_c32 ret = { .i = 39797, .q = 0, };
 
 	while (theta > (180 << 16))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 64a794b..6f3faaf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5188,10 +5188,17 @@
 	.del_pmk = brcmf_cfg80211_del_pmk,
 };
 
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void)
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
 {
-	return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
+	struct cfg80211_ops *ops;
+
+	ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
 		       GFP_KERNEL);
+
+	if (ops && settings->roamoff)
+		ops->update_connect_params = NULL;
+
+	return ops;
 }
 
 struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index a4aec00..9a6287f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -404,7 +404,7 @@
 void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
 s32 brcmf_cfg80211_up(struct net_device *ndev);
 s32 brcmf_cfg80211_down(struct net_device *ndev);
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void);
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings);
 enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
 
 struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index b1f702f..860a437 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1130,7 +1130,7 @@
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	ops = brcmf_cfg80211_get_ops();
+	ops = brcmf_cfg80211_get_ops(settings);
 	if (!ops)
 		return -ENOMEM;
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 9095b83..9927079 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -641,8 +641,9 @@
 	struct brcmf_fw_request *fwreq;
 	char chipname[12];
 	const char *mp_path;
+	size_t mp_path_len;
 	u32 i, j;
-	char end;
+	char end = '\0';
 	size_t reqsz;
 
 	for (i = 0; i < table_size; i++) {
@@ -667,7 +668,10 @@
 		   mapping_table[i].fw_base, chipname);
 
 	mp_path = brcmf_mp_global.firmware_path;
-	end = mp_path[strlen(mp_path) - 1];
+	mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN);
+	if (mp_path_len)
+		end = mp_path[mp_path_len - 1];
+
 	fwreq->n_items = n_fwnames;
 
 	for (j = 0; j < n_fwnames; j++) {
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
index 4859eb2..3d6b813 100644
--- a/drivers/net/wireless/cnss2/Makefile
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -7,4 +7,5 @@
 cnss2-y += debug.o
 cnss2-y += pci.o
 cnss2-y += power.o
+cnss2-$(CONFIG_CNSS2_DEBUG) += genl.o
 cnss2-$(CONFIG_CNSS2_QMI) += qmi.o wlan_firmware_service_v01.o coexistence_service_v01.o
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
index f808ca1..99ce869 100644
--- a/drivers/net/wireless/cnss2/bus.c
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -124,6 +124,37 @@
 	}
 }
 
+int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_alloc_qdss_mem(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		cnss_pci_free_qdss_mem(plat_priv->bus_priv);
+		return;
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
 u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv)
 {
 	if (!plat_priv)
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
index ad5cb1b..710f92f 100644
--- a/drivers/net/wireless/cnss2/bus.h
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -24,6 +24,8 @@
 void cnss_bus_deinit(struct cnss_plat_data *plat_priv);
 int cnss_bus_load_m3(struct cnss_plat_data *plat_priv);
 int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv);
+int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv);
+void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv);
 u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv);
 int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv);
 void cnss_bus_fw_boot_timeout_hdlr(struct timer_list *t);
diff --git a/drivers/net/wireless/cnss2/debug.h b/drivers/net/wireless/cnss2/debug.h
index 33a4a7e..51c74c5 100644
--- a/drivers/net/wireless/cnss2/debug.h
+++ b/drivers/net/wireless/cnss2/debug.h
@@ -54,6 +54,9 @@
 	} while (0)
 #endif
 
+#define cnss_fatal_err(_fmt, ...)					\
+	cnss_pr_err("fatal: " _fmt, ##__VA_ARGS__)
+
 int cnss_debug_init(void);
 void cnss_debug_deinit(void);
 int cnss_debugfs_create(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/genl.c b/drivers/net/wireless/cnss2/genl.c
new file mode 100644
index 0000000..ecc6eb5
--- /dev/null
+++ b/drivers/net/wireless/cnss2/genl.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#define pr_fmt(fmt) "cnss_genl: " fmt
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "main.h"
+#include "debug.h"
+
+#define CNSS_GENL_FAMILY_NAME "cnss-genl"
+#define CNSS_GENL_MCAST_GROUP_NAME "cnss-genl-grp"
+#define CNSS_GENL_VERSION 1
+#define CNSS_GENL_DATA_LEN_MAX (15 * 1024)
+#define CNSS_GENL_STR_LEN_MAX 16
+
+enum {
+	CNSS_GENL_ATTR_MSG_UNSPEC,
+	CNSS_GENL_ATTR_MSG_TYPE,
+	CNSS_GENL_ATTR_MSG_FILE_NAME,
+	CNSS_GENL_ATTR_MSG_TOTAL_SIZE,
+	CNSS_GENL_ATTR_MSG_SEG_ID,
+	CNSS_GENL_ATTR_MSG_END,
+	CNSS_GENL_ATTR_MSG_DATA_LEN,
+	CNSS_GENL_ATTR_MSG_DATA,
+	__CNSS_GENL_ATTR_MAX,
+};
+
+#define CNSS_GENL_ATTR_MAX (__CNSS_GENL_ATTR_MAX - 1)
+
+enum {
+	CNSS_GENL_CMD_UNSPEC,
+	CNSS_GENL_CMD_MSG,
+	__CNSS_GENL_CMD_MAX,
+};
+
+#define CNSS_GENL_CMD_MAX (__CNSS_GENL_CMD_MAX - 1)
+
+static struct nla_policy cnss_genl_msg_policy[CNSS_GENL_ATTR_MAX + 1] = {
+	[CNSS_GENL_ATTR_MSG_TYPE] = { .type = NLA_U8 },
+	[CNSS_GENL_ATTR_MSG_FILE_NAME] = { .type = NLA_NUL_STRING,
+					   .len = CNSS_GENL_STR_LEN_MAX },
+	[CNSS_GENL_ATTR_MSG_TOTAL_SIZE] = { .type = NLA_U32 },
+	[CNSS_GENL_ATTR_MSG_SEG_ID] = { .type = NLA_U32 },
+	[CNSS_GENL_ATTR_MSG_END] = { .type = NLA_U8 },
+	[CNSS_GENL_ATTR_MSG_DATA_LEN] = { .type = NLA_U32 },
+	[CNSS_GENL_ATTR_MSG_DATA] = { .type = NLA_BINARY,
+				      .len = CNSS_GENL_DATA_LEN_MAX },
+};
+
+static int cnss_genl_process_msg(struct sk_buff *skb, struct genl_info *info)
+{
+	return 0;
+}
+
+static struct genl_ops cnss_genl_ops[] = {
+	{
+		.cmd = CNSS_GENL_CMD_MSG,
+		.policy = cnss_genl_msg_policy,
+		.doit = cnss_genl_process_msg,
+	},
+};
+
+static struct genl_multicast_group cnss_genl_mcast_grp[] = {
+	{
+		.name = CNSS_GENL_MCAST_GROUP_NAME,
+	},
+};
+
+static struct genl_family cnss_genl_family = {
+	.id = 0,
+	.hdrsize = 0,
+	.name = CNSS_GENL_FAMILY_NAME,
+	.version = CNSS_GENL_VERSION,
+	.maxattr = CNSS_GENL_ATTR_MAX,
+	.module = THIS_MODULE,
+	.ops = cnss_genl_ops,
+	.n_ops = ARRAY_SIZE(cnss_genl_ops),
+	.mcgrps = cnss_genl_mcast_grp,
+	.n_mcgrps = ARRAY_SIZE(cnss_genl_mcast_grp),
+};
+
+static int cnss_genl_send_data(u8 type, char *file_name, u32 total_size,
+			       u32 seg_id, u8 end, u32 data_len, u8 *msg_buff)
+{
+	struct sk_buff *skb = NULL;
+	void *msg_header = NULL;
+	int ret = 0;
+	char filename[CNSS_GENL_STR_LEN_MAX + 1];
+
+	cnss_pr_dbg("type: %u, file_name %s, total_size: %x, seg_id %u, end %u, data_len %u\n",
+		    type, file_name, total_size, seg_id, end, data_len);
+
+	if (!file_name)
+		strlcpy(filename, "default", sizeof(filename));
+	else
+		strlcpy(filename, file_name, sizeof(filename));
+
+	skb = genlmsg_new(NLMSG_HDRLEN +
+			  nla_total_size(sizeof(type)) +
+			  nla_total_size(strlen(filename) + 1) +
+			  nla_total_size(sizeof(total_size)) +
+			  nla_total_size(sizeof(seg_id)) +
+			  nla_total_size(sizeof(end)) +
+			  nla_total_size(sizeof(data_len)) +
+			  nla_total_size(data_len), GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	msg_header = genlmsg_put(skb, 0, 0,
+				 &cnss_genl_family, 0,
+				 CNSS_GENL_CMD_MSG);
+	if (!msg_header) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = nla_put_u8(skb, CNSS_GENL_ATTR_MSG_TYPE, type);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_string(skb, CNSS_GENL_ATTR_MSG_FILE_NAME, filename);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_TOTAL_SIZE, total_size);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_SEG_ID, seg_id);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_u8(skb, CNSS_GENL_ATTR_MSG_END, end);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_DATA_LEN, data_len);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put(skb, CNSS_GENL_ATTR_MSG_DATA, data_len, msg_buff);
+	if (ret < 0)
+		goto fail;
+
+	genlmsg_end(skb, msg_header);
+	ret = genlmsg_multicast(&cnss_genl_family, skb, 0, 0, GFP_KERNEL);
+	if (ret < 0)
+		goto fail;
+
+	return ret;
+fail:
+	cnss_pr_err("genl msg send fail: %d\n", ret);
+	if (skb)
+		nlmsg_free(skb);
+	return ret;
+}
+
+int cnss_genl_send_msg(void *buff, u8 type, char *file_name, u32 total_size)
+{
+	int ret = 0;
+	u8 *msg_buff = buff;
+	u32 remaining = total_size;
+	u32 seg_id = 0;
+	u32 data_len = 0;
+	u8 end = 0;
+
+	cnss_pr_dbg("type: %u, total_size: %x\n", type, total_size);
+
+	while (remaining) {
+		if (remaining > CNSS_GENL_DATA_LEN_MAX) {
+			data_len = CNSS_GENL_DATA_LEN_MAX;
+		} else {
+			data_len = remaining;
+			end = 1;
+		}
+		ret = cnss_genl_send_data(type, file_name, total_size,
+					  seg_id, end, data_len, msg_buff);
+		if (ret < 0) {
+			cnss_pr_err("fail to send genl data, ret %d\n", ret);
+			return ret;
+		}
+
+		remaining -= data_len;
+		msg_buff += data_len;
+		seg_id++;
+	}
+
+	return ret;
+}
+
+int cnss_genl_init(void)
+{
+	int ret = 0;
+
+	ret = genl_register_family(&cnss_genl_family);
+	if (ret != 0)
+		cnss_pr_err("genl_register_family fail: %d\n", ret);
+
+	return ret;
+}
+
+void cnss_genl_exit(void)
+{
+	genl_unregister_family(&cnss_genl_family);
+}
diff --git a/drivers/net/wireless/cnss2/genl.h b/drivers/net/wireless/cnss2/genl.h
new file mode 100644
index 0000000..33ca30a
--- /dev/null
+++ b/drivers/net/wireless/cnss2/genl.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef __CNSS_GENL_H__
+#define __CNSS_GENL_H__
+
+enum cnss_genl_msg_type {
+	CNSS_GENL_MSG_TYPE_UNSPEC,
+	CNSS_GENL_MSG_TYPE_QDSS,
+};
+
+#ifdef CONFIG_CNSS2_DEBUG
+int cnss_genl_init(void);
+void cnss_genl_exit(void);
+int cnss_genl_send_msg(void *buff, u8 type,
+		       char *file_name, u32 total_size);
+#else
+static inline int cnss_genl_init(void)
+{
+	return 0;
+}
+
+static inline void cnss_genl_exit(void)
+{
+}
+
+static inline int cnss_genl_send_msg(void *buff, u8 type,
+				     char *file_name, u32 total_size)
+{
+	return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index a204e08..898d59a 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -16,6 +16,7 @@
 #include "main.h"
 #include "bus.h"
 #include "debug.h"
+#include "genl.h"
 
 #define CNSS_DUMP_FORMAT_VER		0x11
 #define CNSS_DUMP_FORMAT_VER_V2		0x22
@@ -356,7 +357,10 @@
 	if (ret)
 		goto out;
 
-	ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv);
+	cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_REGDB);
+
+	ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv,
+					   plat_priv->ctrl_params.bdf_type);
 	if (ret)
 		goto out;
 
@@ -373,6 +377,38 @@
 	return ret;
 }
 
+static int cnss_request_antenna_sharing(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	if (!plat_priv->antenna) {
+		ret = cnss_wlfw_antenna_switch_send_sync(plat_priv);
+		if (ret)
+			goto out;
+	}
+
+	if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state)) {
+		ret = coex_antenna_switch_to_wlan_send_sync_msg(plat_priv);
+		if (ret)
+			goto out;
+	}
+
+	ret = cnss_wlfw_antenna_grant_send_sync(plat_priv);
+	if (ret)
+		goto out;
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static void cnss_release_antenna_sharing(struct cnss_plat_data *plat_priv)
+{
+	if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state))
+		coex_antenna_switch_to_mdm_send_sync_msg(plat_priv);
+}
+
 static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -393,6 +429,7 @@
 		ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
 						    CNSS_WALTEST);
 	} else if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) {
+		cnss_request_antenna_sharing(plat_priv);
 		ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
 						    CNSS_CALIBRATION);
 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
@@ -448,6 +485,12 @@
 		return "POWER_UP";
 	case CNSS_DRIVER_EVENT_POWER_DOWN:
 		return "POWER_DOWN";
+	case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
+		return "QDSS_TRACE_REQ_MEM";
+	case CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
+		return "QDSS_TRACE_SAVE";
+	case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
+		return "QDSS_TRACE_FREE";
 	case CNSS_DRIVER_EVENT_MAX:
 		return "EVENT_MAX";
 	}
@@ -599,7 +642,7 @@
 {
 	int ret = 0;
 
-	ret = cnss_get_vreg(plat_priv);
+	ret = cnss_get_vreg_type(plat_priv, CNSS_VREG_PRIM);
 	if (ret) {
 		cnss_pr_err("Failed to get vreg, err = %d\n", ret);
 		goto out;
@@ -993,6 +1036,11 @@
 		return -EOPNOTSUPP;
 	}
 
+	if (cnss_pci_is_device_down(dev)) {
+		cnss_pr_info("Device is already in bad state, ignore force assert\n");
+		return 0;
+	}
+
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
 		cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n");
 		return 0;
@@ -1021,14 +1069,19 @@
 		return -EOPNOTSUPP;
 	}
 
+	if (cnss_pci_is_device_down(dev)) {
+		cnss_pr_info("Device is already in bad state, ignore force collect rddm\n");
+		return 0;
+	}
+
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
 		cnss_pr_info("Recovery is already in progress, ignore forced collect rddm\n");
 		return 0;
 	}
 
-	cnss_driver_event_post(plat_priv,
-			       CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
-			       0, NULL);
+	ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
+	if (ret)
+		return ret;
 
 	reinit_completion(&plat_priv->rddm_complete);
 	ret = wait_for_completion_timeout
@@ -1071,6 +1124,7 @@
 
 	plat_priv->cal_done = true;
 	cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
+	cnss_release_antenna_sharing(plat_priv);
 	cnss_bus_dev_shutdown(plat_priv);
 	complete(&plat_priv->cal_complete);
 	clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
@@ -1090,6 +1144,109 @@
 	return 0;
 }
 
+static int cnss_qdss_trace_req_mem_hdlr(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	ret = cnss_bus_alloc_qdss_mem(plat_priv);
+	if (ret < 0)
+		return ret;
+
+	return cnss_wlfw_qdss_trace_mem_info_send_sync(plat_priv);
+}
+
+static void *cnss_qdss_trace_pa_to_va(struct cnss_plat_data *plat_priv,
+				      u64 pa, u32 size, int *seg_id)
+{
+	int i = 0;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	u64 offset = 0;
+	void *va = NULL;
+	u64 local_pa;
+	u32 local_size;
+
+	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+		local_pa = (u64)qdss_mem[i].pa;
+		local_size = (u32)qdss_mem[i].size;
+		if (pa == local_pa && size <= local_size) {
+			va = qdss_mem[i].va;
+			break;
+		}
+		if (pa > local_pa &&
+		    pa < local_pa + local_size &&
+		    pa + size <= local_pa + local_size) {
+			offset = pa - local_pa;
+			va = qdss_mem[i].va + offset;
+			break;
+		}
+	}
+
+	*seg_id = i;
+	return va;
+}
+
+static int cnss_qdss_trace_save_hdlr(struct cnss_plat_data *plat_priv,
+				     void *data)
+{
+	struct cnss_qmi_event_qdss_trace_save_data *event_data = data;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	int ret = 0;
+	int i;
+	void *va = NULL;
+	u64 pa;
+	u32 size;
+	int seg_id = 0;
+
+	if (!plat_priv->qdss_mem_seg_len) {
+		cnss_pr_err("Memory for QDSS trace is not available\n");
+		return -ENOMEM;
+	}
+
+	if (event_data->mem_seg_len == 0) {
+		for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+			ret = cnss_genl_send_msg(qdss_mem[i].va,
+						 CNSS_GENL_MSG_TYPE_QDSS,
+						 event_data->file_name,
+						 qdss_mem[i].size);
+			if (ret < 0) {
+				cnss_pr_err("Fail to save QDSS data: %d\n",
+					    ret);
+				break;
+			}
+		}
+	} else {
+		for (i = 0; i < event_data->mem_seg_len; i++) {
+			pa = event_data->mem_seg[i].addr;
+			size = event_data->mem_seg[i].size;
+			va = cnss_qdss_trace_pa_to_va(plat_priv, pa,
+						      size, &seg_id);
+			if (!va) {
+				cnss_pr_err("Fail to find matching va for pa %pa\n",
+					    pa);
+				ret = -EINVAL;
+				break;
+			}
+			ret = cnss_genl_send_msg(va, CNSS_GENL_MSG_TYPE_QDSS,
+						 event_data->file_name, size);
+			if (ret < 0) {
+				cnss_pr_err("Fail to save QDSS data: %d\n",
+					    ret);
+				break;
+			}
+		}
+	}
+
+	kfree(data);
+	return ret;
+}
+
+static int cnss_qdss_trace_free_hdlr(struct cnss_plat_data *plat_priv)
+{
+	cnss_bus_free_qdss_mem(plat_priv);
+
+	return 0;
+}
+
 static void cnss_driver_event_work(struct work_struct *work)
 {
 	struct cnss_plat_data *plat_priv =
@@ -1163,6 +1320,16 @@
 		case CNSS_DRIVER_EVENT_POWER_DOWN:
 			ret = cnss_power_down_hdlr(plat_priv);
 			break;
+		case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
+			ret = cnss_qdss_trace_req_mem_hdlr(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
+			ret = cnss_qdss_trace_save_hdlr(plat_priv,
+							event->data);
+			break;
+		case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
+			ret = cnss_qdss_trace_free_hdlr(plat_priv);
+			break;
 		default:
 			cnss_pr_err("Invalid driver event type: %d",
 				    event->type);
@@ -1651,6 +1818,7 @@
 	plat_priv->bus_type = cnss_get_bus_type(plat_priv->device_id);
 	cnss_set_plat_priv(plat_dev, plat_priv);
 	platform_set_drvdata(plat_dev, plat_priv);
+	INIT_LIST_HEAD(&plat_priv->vreg_list);
 
 	cnss_init_control_params(plat_priv);
 
@@ -1696,6 +1864,12 @@
 	if (ret)
 		goto destroy_debugfs;
 
+	cnss_register_coex_service(plat_priv);
+
+	ret = cnss_genl_init();
+	if (ret < 0)
+		cnss_pr_err("CNSS genl init failed %d\n", ret);
+
 	cnss_pr_info("Platform driver probed successfully.\n");
 
 	return 0;
@@ -1731,6 +1905,8 @@
 {
 	struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
 
+	cnss_genl_exit();
+	cnss_unregister_coex_service(plat_priv);
 	cnss_misc_deinit(plat_priv);
 	cnss_debugfs_destroy(plat_priv);
 	cnss_qmi_deinit(plat_priv);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index a53a3cc..2756d55 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -29,8 +29,7 @@
 	CNSS_BUS_PCI,
 };
 
-struct cnss_vreg_info {
-	struct regulator *reg;
+struct cnss_vreg_cfg {
 	const char *name;
 	u32 min_uv;
 	u32 max_uv;
@@ -38,6 +37,17 @@
 	u32 delay_us;
 };
 
+struct cnss_vreg_info {
+	struct list_head list;
+	struct regulator *reg;
+	struct cnss_vreg_cfg cfg;
+	u32 enabled;
+};
+
+enum cnss_vreg_type {
+	CNSS_VREG_PRIM,
+};
+
 struct cnss_pinctrl_info {
 	struct pinctrl *pinctrl;
 	struct pinctrl_state *bootstrap_active;
@@ -151,6 +161,9 @@
 	CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
 	CNSS_DRIVER_EVENT_POWER_UP,
 	CNSS_DRIVER_EVENT_POWER_DOWN,
+	CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
+	CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE,
+	CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
 	CNSS_DRIVER_EVENT_MAX,
 };
 
@@ -207,6 +220,7 @@
 enum cnss_bdf_type {
 	CNSS_BDF_BIN,
 	CNSS_BDF_ELF,
+	CNSS_BDF_REGDB = 4,
 	CNSS_BDF_DUMMY = 255,
 };
 
@@ -237,7 +251,7 @@
 	struct platform_device *plat_dev;
 	void *bus_priv;
 	enum cnss_dev_bus_type bus_type;
-	struct cnss_vreg_info *vreg_info;
+	struct list_head vreg_list;
 	struct cnss_pinctrl_info pinctrl_info;
 	struct cnss_subsys_info subsys_info;
 	struct cnss_ramdump_info ramdump_info;
@@ -263,6 +277,9 @@
 	u32 fw_mem_seg_len;
 	struct cnss_fw_mem fw_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
 	struct cnss_fw_mem m3_mem;
+	u32 qdss_mem_seg_len;
+	struct cnss_fw_mem qdss_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
+	u32 *qdss_reg;
 	struct cnss_pin_connect_result pin_result;
 	struct dentry *root_dentry;
 	atomic_t pm_count;
@@ -289,7 +306,14 @@
 int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
 			   enum cnss_driver_event_type type,
 			   u32 flags, void *data);
-int cnss_get_vreg(struct cnss_plat_data *plat_priv);
+int cnss_get_vreg_type(struct cnss_plat_data *plat_priv,
+		       enum cnss_vreg_type type);
+void cnss_put_vreg_type(struct cnss_plat_data *plat_priv,
+			enum cnss_vreg_type type);
+int cnss_vreg_on_type(struct cnss_plat_data *plat_priv,
+		      enum cnss_vreg_type type);
+int cnss_vreg_off_type(struct cnss_plat_data *plat_priv,
+		       enum cnss_vreg_type type);
 int cnss_get_pinctrl(struct cnss_plat_data *plat_priv);
 int cnss_power_on_device(struct cnss_plat_data *plat_priv);
 void cnss_power_off_device(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 3688a70..b09a32e 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -88,6 +88,13 @@
 
 #define QCA6390_CE_REG_INTERVAL			0x2000
 
+#define QDSS_APB_DEC_CSR_BASE			0x1C01000
+
+#define QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET	0x6C
+#define QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET	0x70
+#define QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET	0x74
+#define QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET	0x78
+
 #define MAX_UNWINDOWED_ADDRESS			0x80000
 #define WINDOW_ENABLE_BIT			0x40000000
 #define WINDOW_SHIFT				19
@@ -128,6 +135,14 @@
 	{ NULL },
 };
 
+static struct cnss_pci_reg qdss_csr[] = {
+	{ "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
+	{ "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
+	{ "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
+	{ "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
+	{ NULL },
+};
+
 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
 {
 	u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
@@ -335,8 +350,8 @@
 
 	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
 	if (device_id != pci_priv->device_id)  {
-		cnss_pr_err("PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
-			    device_id, pci_priv->device_id);
+		cnss_fatal_err("PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
+			       device_id, pci_priv->device_id);
 		return -EIO;
 	}
 
@@ -605,7 +620,7 @@
 
 	ret = cnss_pci_start_mhi(pci_priv);
 	if (ret) {
-		cnss_pr_err("Failed to start MHI, err = %d\n", ret);
+		cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
 		    !pci_priv->pci_link_down_ind && timeout)
 			mod_timer(&plat_priv->fw_boot_timer,
@@ -669,6 +684,8 @@
 
 	cnss_power_off_device(plat_priv);
 
+	pci_priv->remap_window = 0;
+
 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
@@ -683,11 +700,6 @@
 	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
 		    plat_priv->driver_state);
 
-	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
-		cnss_pr_dbg("Ignore crash shutdown\n");
-		return;
-	}
-
 	cnss_pci_collect_dump_info(pci_priv, true);
 }
 
@@ -939,99 +951,6 @@
 	return 0;
 }
 
-static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
-{
-	int ret = 0;
-	struct device *dev;
-	struct dma_iommu_mapping *mapping;
-	int atomic_ctx = 1, s1_bypass = 1, fast = 1, cb_stall_disable = 1,
-		no_cfre = 1;
-
-	cnss_pr_dbg("Initializing SMMU\n");
-
-	dev = &pci_priv->pci_dev->dev;
-
-	mapping = __depr_arm_iommu_create_mapping(dev->bus,
-						  pci_priv->smmu_iova_start,
-						  pci_priv->smmu_iova_len);
-	if (IS_ERR(mapping)) {
-		ret = PTR_ERR(mapping);
-		cnss_pr_err("Failed to create SMMU mapping, err = %d\n", ret);
-		goto out;
-	}
-
-	if (pci_priv->smmu_s1_enable) {
-		cnss_pr_dbg("Enabling SMMU S1 stage\n");
-
-		ret = iommu_domain_set_attr(mapping->domain,
-					    DOMAIN_ATTR_ATOMIC,
-					    &atomic_ctx);
-		if (ret) {
-			pr_err("Failed to set SMMU atomic_ctx attribute, err = %d\n",
-			       ret);
-			goto release_mapping;
-		}
-
-		ret = iommu_domain_set_attr(mapping->domain,
-					    DOMAIN_ATTR_FAST,
-					    &fast);
-		if (ret) {
-			pr_err("Failed to set SMMU fast attribute, err = %d\n",
-			       ret);
-			goto release_mapping;
-		}
-
-		ret = iommu_domain_set_attr(mapping->domain,
-					    DOMAIN_ATTR_CB_STALL_DISABLE,
-					    &cb_stall_disable);
-		if (ret) {
-			pr_err("Failed to set SMMU cb_stall_disable attribute, err = %d\n",
-			       ret);
-			goto release_mapping;
-		}
-
-		ret = iommu_domain_set_attr(mapping->domain,
-					    DOMAIN_ATTR_NO_CFRE,
-					    &no_cfre);
-		if (ret) {
-			pr_err("Failed to set SMMU no_cfre attribute, err = %d\n",
-			       ret);
-			goto release_mapping;
-		}
-	} else {
-		ret = iommu_domain_set_attr(mapping->domain,
-					    DOMAIN_ATTR_S1_BYPASS,
-					    &s1_bypass);
-		if (ret) {
-			pr_err("Failed to set SMMU s1_bypass attribute, err = %d\n",
-			       ret);
-			goto release_mapping;
-		}
-	}
-
-	ret = __depr_arm_iommu_attach_device(dev, mapping);
-	if (ret) {
-		pr_err("Failed to attach SMMU device, err = %d\n", ret);
-		goto release_mapping;
-	}
-
-	pci_priv->smmu_mapping = mapping;
-
-	return ret;
-release_mapping:
-	__depr_arm_iommu_release_mapping(mapping);
-out:
-	return ret;
-}
-
-static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
-{
-	__depr_arm_iommu_detach_device(&pci_priv->pci_dev->dev);
-	__depr_arm_iommu_release_mapping(pci_priv->smmu_mapping);
-
-	pci_priv->smmu_mapping = NULL;
-}
-
 static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
 {
 	unsigned long flags;
@@ -1066,7 +985,7 @@
 		pci_priv->pci_link_down_ind = true;
 		spin_unlock_irqrestore(&pci_link_down_lock, flags);
 
-		cnss_pr_err("PCI link down, schedule recovery!\n");
+		cnss_fatal_err("PCI link down, schedule recovery!\n");
 		if (pci_dev->device == QCA6174_DEVICE_ID)
 			disable_irq(pci_dev->irq);
 		cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
@@ -1215,6 +1134,9 @@
 	if (driver_ops && driver_ops->suspend_noirq)
 		ret = driver_ops->suspend_noirq(pci_dev);
 
+	if (pci_priv->disable_pc && !pci_dev->state_saved)
+		pci_save_state(pci_dev);
+
 out:
 	return ret;
 }
@@ -1588,6 +1510,63 @@
 	return 0;
 }
 
+int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	int i, j;
+
+	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+		if (!qdss_mem[i].va && qdss_mem[i].size) {
+			qdss_mem[i].va =
+				dma_alloc_coherent(&pci_priv->pci_dev->dev,
+						   qdss_mem[i].size,
+						   &qdss_mem[i].pa,
+						   GFP_KERNEL);
+			if (!qdss_mem[i].va) {
+				cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
+					    qdss_mem[i].size,
+					    qdss_mem[i].type, i);
+				break;
+			}
+		}
+	}
+
+	/* Best-effort allocation for QDSS trace */
+	if (i < plat_priv->qdss_mem_seg_len) {
+		for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
+			qdss_mem[j].type = 0;
+			qdss_mem[j].size = 0;
+		}
+		plat_priv->qdss_mem_seg_len = i;
+	}
+
+	return 0;
+}
+
+void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	int i;
+
+	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+		if (qdss_mem[i].va && qdss_mem[i].size) {
+			cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
+				    &qdss_mem[i].pa, qdss_mem[i].size,
+				    qdss_mem[i].type);
+			dma_free_coherent(&pci_priv->pci_dev->dev,
+					  qdss_mem[i].size, qdss_mem[i].va,
+					  qdss_mem[i].pa);
+			qdss_mem[i].va = NULL;
+			qdss_mem[i].pa = 0;
+			qdss_mem[i].size = 0;
+			qdss_mem[i].type = 0;
+		}
+	}
+	plat_priv->qdss_mem_seg_len = 0;
+}
+
 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -1677,17 +1656,12 @@
 	if (!plat_priv)
 		return -ENODEV;
 
-	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev)) {
-		cnss_pr_info("Device is already in bad state, ignore force assert\n");
-		return 0;
-	}
-
 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
 	if (ret) {
-		cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
+		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
 				       CNSS_REASON_DEFAULT);
-		return 0;
+		return ret;
 	}
 
 	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
@@ -1703,7 +1677,7 @@
 	if (!pci_priv)
 		return;
 
-	cnss_pr_err("Timeout waiting for FW ready indication\n");
+	cnss_fatal_err("Timeout waiting for FW ready indication\n");
 
 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
 			       CNSS_REASON_TIMEOUT);
@@ -1716,10 +1690,21 @@
 	if (!pci_priv)
 		return NULL;
 
-	return pci_priv->smmu_mapping;
+	return &pci_priv->smmu_mapping;
 }
 EXPORT_SYMBOL(cnss_smmu_get_mapping);
 
+struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
+{
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+
+	if (!pci_priv)
+		return NULL;
+
+	return pci_priv->iommu_domain;
+}
+EXPORT_SYMBOL(cnss_smmu_get_domain);
+
 int cnss_smmu_map(struct device *dev,
 		  phys_addr_t paddr, uint32_t *iova_addr, size_t size)
 {
@@ -1749,7 +1734,7 @@
 		return -ENOMEM;
 	}
 
-	ret = iommu_map(pci_priv->smmu_mapping->domain, iova,
+	ret = iommu_map(pci_priv->iommu_domain, iova,
 			rounddown(paddr, PAGE_SIZE), len,
 			IOMMU_READ | IOMMU_WRITE);
 	if (ret) {
@@ -2067,6 +2052,30 @@
 	}
 };
 
+static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
+	gfp_t gfp = GFP_KERNEL;
+	u32 reg_offset;
+
+	if (in_interrupt() || irqs_disabled())
+		gfp = GFP_ATOMIC;
+
+	if (!plat_priv->qdss_reg)
+		plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
+						   sizeof(*plat_priv->qdss_reg)
+						   * array_size, gfp);
+
+	for (i = 0; qdss_csr[i].name; i++) {
+		reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
+		plat_priv->qdss_reg[i] = cnss_pci_reg_read(pci_priv,
+							   reg_offset);
+		cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
+			    plat_priv->qdss_reg[i]);
+	}
+}
+
 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
 				 enum cnss_ce_index ce)
 {
@@ -2136,9 +2145,12 @@
 		return;
 	}
 
+	cnss_pci_dump_qdss_reg(pci_priv);
+
 	ret = mhi_download_rddm_img(pci_priv->mhi_ctrl, in_panic);
 	if (ret) {
-		cnss_pr_err("Failed to download RDDM image, err = %d\n", ret);
+		cnss_fatal_err("Failed to download RDDM image, err = %d\n",
+			       ret);
 		cnss_pci_dump_registers(pci_priv);
 		return;
 	}
@@ -2234,7 +2246,7 @@
 	if (!pci_priv)
 		return;
 
-	cnss_pr_err("Timeout waiting for RDDM notification\n");
+	cnss_fatal_err("Timeout waiting for RDDM notification\n");
 
 	cnss_schedule_recovery(&pci_priv->pci_dev->dev, CNSS_REASON_TIMEOUT);
 }
@@ -2622,6 +2634,9 @@
 	struct cnss_pci_data *pci_priv;
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
 	struct resource *res;
+	struct device_node *of_node;
+	const char *iommu_dma_type;
+	u32 addr_win[2];
 
 	cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n",
 		    id->vendor, pci_dev->device);
@@ -2652,16 +2667,34 @@
 	if (ret)
 		goto unregister_subsys;
 
-	res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
-					   "smmu_iova_base");
-	if (res) {
-		if (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
-					  "qcom,smmu-s1-enable"))
-			pci_priv->smmu_s1_enable = true;
+	of_node = of_parse_phandle(pci_priv->pci_dev->dev.of_node,
+				   "qcom,iommu-group", 0);
+	if (of_node) {
+		pci_priv->iommu_domain =
+			iommu_get_domain_for_dev(&pci_dev->dev);
+		pci_priv->smmu_mapping.domain = pci_priv->iommu_domain;
+		ret = of_property_read_string(of_node, "qcom,iommu-dma",
+					      &iommu_dma_type);
+		if (!ret) {
+			if (!strcmp("fastmap", iommu_dma_type)) {
+				cnss_pr_dbg("Enabling SMMU S1 stage\n");
+				pci_priv->smmu_s1_enable = true;
+			}
+		}
 
-		pci_priv->smmu_iova_start = res->start;
-		pci_priv->smmu_iova_len = resource_size(res);
-		cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: %zu\n",
+		ret = of_property_read_u32_array(of_node,
+						 "qcom,iommu-dma-addr-pool",
+						 addr_win,
+						 ARRAY_SIZE(addr_win));
+		if (ret) {
+			cnss_pr_err("Invalid smmu size window, ret %d\n", ret);
+			of_node_put(of_node);
+			goto unregister_ramdump;
+		}
+
+		pci_priv->smmu_iova_start = addr_win[0];
+		pci_priv->smmu_iova_len = addr_win[1];
+		cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: 0x%zx\n",
 			    &pci_priv->smmu_iova_start,
 			    pci_priv->smmu_iova_len);
 
@@ -2671,22 +2704,17 @@
 		if (res) {
 			pci_priv->smmu_iova_ipa_start = res->start;
 			pci_priv->smmu_iova_ipa_len = resource_size(res);
-			cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: %zu\n",
+			cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: 0x%zx\n",
 				    &pci_priv->smmu_iova_ipa_start,
 				    pci_priv->smmu_iova_ipa_len);
 		}
-
-		ret = cnss_pci_init_smmu(pci_priv);
-		if (ret) {
-			cnss_pr_err("Failed to init SMMU, err = %d\n", ret);
-			goto unregister_ramdump;
-		}
+		of_node_put(of_node);
 	}
 
 	ret = cnss_reg_pci_event(pci_priv);
 	if (ret) {
 		cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
-		goto deinit_smmu;
+		goto unregister_ramdump;
 	}
 
 	ret = cnss_pci_enable_bus(pci_priv);
@@ -2740,10 +2768,9 @@
 	cnss_pci_disable_bus(pci_priv);
 dereg_pci_event:
 	cnss_dereg_pci_event(pci_priv);
-deinit_smmu:
-	if (pci_priv->smmu_mapping)
-		cnss_pci_deinit_smmu(pci_priv);
 unregister_ramdump:
+	pci_priv->iommu_domain = NULL;
+	pci_priv->smmu_mapping.domain = NULL;
 	cnss_unregister_ramdump(plat_priv);
 unregister_subsys:
 	cnss_unregister_subsys(plat_priv);
@@ -2761,6 +2788,7 @@
 
 	cnss_pci_free_m3_mem(pci_priv);
 	cnss_pci_free_fw_mem(pci_priv);
+	cnss_pci_free_qdss_mem(pci_priv);
 
 	switch (pci_dev->device) {
 	case QCA6290_DEVICE_ID:
@@ -2777,8 +2805,8 @@
 
 	cnss_pci_disable_bus(pci_priv);
 	cnss_dereg_pci_event(pci_priv);
-	if (pci_priv->smmu_mapping)
-		cnss_pci_deinit_smmu(pci_priv);
+	pci_priv->iommu_domain = NULL;
+	pci_priv->smmu_mapping.domain = NULL;
 	cnss_unregister_ramdump(plat_priv);
 	cnss_unregister_subsys(plat_priv);
 	plat_priv->bus_priv = NULL;
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 87e0c1b..ed28e86 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -56,7 +56,8 @@
 	struct msm_pcie_register_event msm_pci_event;
 	atomic_t auto_suspended;
 	u8 monitor_wake_intr;
-	struct dma_iommu_mapping *smmu_mapping;
+	struct dma_iommu_mapping smmu_mapping;
+	struct iommu_domain *iommu_domain;
 	u8 smmu_s1_enable;
 	dma_addr_t smmu_iova_start;
 	size_t smmu_iova_len;
@@ -122,6 +123,8 @@
 int cnss_pci_init(struct cnss_plat_data *plat_priv);
 void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
+int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv);
+void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv);
 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
 int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
 			   enum cnss_mhi_state state);
diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c
index e903155..f57dc7e 100644
--- a/drivers/net/wireless/cnss2/power.c
+++ b/drivers/net/wireless/cnss2/power.c
@@ -9,25 +9,25 @@
 #include "main.h"
 #include "debug.h"
 
-static struct cnss_vreg_info cnss_vreg_info[] = {
-	{NULL, "vdd-wlan-core", 1300000, 1300000, 0, 0},
-	{NULL, "vdd-wlan-io", 1800000, 1800000, 0, 0},
-	{NULL, "vdd-wlan-xtal-aon", 0, 0, 0, 0},
-	{NULL, "vdd-wlan-xtal", 1800000, 1800000, 0, 2},
-	{NULL, "vdd-wlan", 0, 0, 0, 0},
-	{NULL, "vdd-wlan-ctrl1", 0, 0, 0, 0},
-	{NULL, "vdd-wlan-ctrl2", 0, 0, 0, 0},
-	{NULL, "vdd-wlan-sp2t", 2700000, 2700000, 0, 0},
-	{NULL, "wlan-ant-switch", 1800000, 1800000, 0, 0},
-	{NULL, "wlan-soc-swreg", 1200000, 1200000, 0, 0},
-	{NULL, "vdd-wlan-aon", 950000, 950000, 0, 0},
-	{NULL, "vdd-wlan-dig", 950000, 952000, 0, 0},
-	{NULL, "vdd-wlan-rfa1", 1900000, 1900000, 0, 0},
-	{NULL, "vdd-wlan-rfa2", 1350000, 1350000, 0, 0},
-	{NULL, "vdd-wlan-en", 0, 0, 0, 10},
+static struct cnss_vreg_cfg cnss_vreg_list[] = {
+	{"vdd-wlan-core", 1300000, 1300000, 0, 0},
+	{"vdd-wlan-io", 1800000, 1800000, 0, 0},
+	{"vdd-wlan-xtal-aon", 0, 0, 0, 0},
+	{"vdd-wlan-xtal", 1800000, 1800000, 0, 2},
+	{"vdd-wlan", 0, 0, 0, 0},
+	{"vdd-wlan-ctrl1", 0, 0, 0, 0},
+	{"vdd-wlan-ctrl2", 0, 0, 0, 0},
+	{"vdd-wlan-sp2t", 2700000, 2700000, 0, 0},
+	{"wlan-ant-switch", 1800000, 1800000, 0, 0},
+	{"wlan-soc-swreg", 1200000, 1200000, 0, 0},
+	{"vdd-wlan-aon", 950000, 950000, 0, 0},
+	{"vdd-wlan-dig", 950000, 952000, 0, 0},
+	{"vdd-wlan-rfa1", 1900000, 1900000, 0, 0},
+	{"vdd-wlan-rfa2", 1350000, 1350000, 0, 0},
+	{"vdd-wlan-en", 0, 0, 0, 10},
 };
 
-#define CNSS_VREG_INFO_SIZE		ARRAY_SIZE(cnss_vreg_info)
+#define CNSS_VREG_INFO_SIZE		ARRAY_SIZE(cnss_vreg_list)
 #define MAX_PROP_SIZE			32
 
 #define BOOTSTRAP_GPIO			"qcom,enable-bootstrap-gpio"
@@ -39,189 +39,328 @@
 #define BOOTSTRAP_DELAY			1000
 #define WLAN_ENABLE_DELAY		1000
 
-int cnss_get_vreg(struct cnss_plat_data *plat_priv)
+static int cnss_get_vreg_single(struct cnss_plat_data *plat_priv,
+				struct cnss_vreg_info *vreg)
 {
 	int ret = 0;
-	int i;
-	struct cnss_vreg_info *vreg_info;
 	struct device *dev;
 	struct regulator *reg;
 	const __be32 *prop;
-	char prop_name[MAX_PROP_SIZE];
+	char prop_name[MAX_PROP_SIZE] = {0};
 	int len;
 
 	dev = &plat_priv->plat_dev->dev;
-
-	plat_priv->vreg_info = devm_kzalloc(dev, sizeof(cnss_vreg_info),
-					    GFP_KERNEL);
-	if (!plat_priv->vreg_info) {
-		ret = -ENOMEM;
-		goto out;
+	reg = devm_regulator_get_optional(dev, vreg->cfg.name);
+	if (IS_ERR(reg)) {
+		ret = PTR_ERR(reg);
+		if (ret == -ENODEV)
+			return ret;
+		else if (ret == -EPROBE_DEFER)
+			cnss_pr_info("EPROBE_DEFER for regulator: %s\n",
+				     vreg->cfg.name);
+		else
+			cnss_pr_err("Failed to get regulator %s, err = %d\n",
+				    vreg->cfg.name, ret);
+		return ret;
 	}
 
-	memcpy(plat_priv->vreg_info, cnss_vreg_info, sizeof(cnss_vreg_info));
+	vreg->reg = reg;
 
-	for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) {
-		vreg_info = &plat_priv->vreg_info[i];
-		reg = devm_regulator_get_optional(dev, vreg_info->name);
-		if (IS_ERR(reg)) {
-			ret = PTR_ERR(reg);
-			if (ret == -ENODEV)
-				continue;
-			else if (ret == -EPROBE_DEFER)
-				cnss_pr_info("EPROBE_DEFER for regulator: %s\n",
-					     vreg_info->name);
-			else
-				cnss_pr_err("Failed to get regulator %s, err = %d\n",
-					    vreg_info->name, ret);
-			goto out;
-		}
+	snprintf(prop_name, MAX_PROP_SIZE, "qcom,%s-info",
+		 vreg->cfg.name);
 
-		vreg_info->reg = reg;
-
-		snprintf(prop_name, MAX_PROP_SIZE, "qcom,%s-info",
-			 vreg_info->name);
-
-		prop = of_get_property(dev->of_node, prop_name, &len);
-		cnss_pr_dbg("Got regulator info, name: %s, len: %d\n",
-			    prop_name, len);
-
-		if (!prop || len != (4 * sizeof(__be32))) {
-			cnss_pr_dbg("Property %s %s, use default\n", prop_name,
-				    prop ? "invalid format" : "doesn't exist");
-		} else {
-			vreg_info->min_uv = be32_to_cpup(&prop[0]);
-			vreg_info->max_uv = be32_to_cpup(&prop[1]);
-			vreg_info->load_ua = be32_to_cpup(&prop[2]);
-			vreg_info->delay_us = be32_to_cpup(&prop[3]);
-		}
-
-		cnss_pr_dbg("Got regulator: %s, min_uv: %u, max_uv: %u, load_ua: %u, delay_us: %u\n",
-			    vreg_info->name, vreg_info->min_uv,
-			    vreg_info->max_uv, vreg_info->load_ua,
-			    vreg_info->delay_us);
+	prop = of_get_property(dev->of_node, prop_name, &len);
+	if (!prop || len != (4 * sizeof(__be32))) {
+		cnss_pr_dbg("Property %s %s, use default\n", prop_name,
+			    prop ? "invalid format" : "doesn't exist");
+	} else {
+		vreg->cfg.min_uv = be32_to_cpup(&prop[0]);
+		vreg->cfg.max_uv = be32_to_cpup(&prop[1]);
+		vreg->cfg.load_ua = be32_to_cpup(&prop[2]);
+		vreg->cfg.delay_us = be32_to_cpup(&prop[3]);
 	}
 
+	cnss_pr_dbg("Got regulator: %s, min_uv: %u, max_uv: %u, load_ua: %u, delay_us: %u\n",
+		    vreg->cfg.name, vreg->cfg.min_uv,
+		    vreg->cfg.max_uv, vreg->cfg.load_ua,
+		    vreg->cfg.delay_us);
+
 	return 0;
+}
+
+static void cnss_put_vreg_single(struct cnss_plat_data *plat_priv,
+				 struct cnss_vreg_info *vreg)
+{
+	struct device *dev = &plat_priv->plat_dev->dev;
+
+	cnss_pr_dbg("Put regulator: %s\n", vreg->cfg.name);
+	devm_regulator_put(vreg->reg);
+	devm_kfree(dev, vreg);
+}
+
+static cnss_vreg_on_single(struct cnss_vreg_info *vreg)
+{
+	int ret = 0;
+
+	if (vreg->enabled) {
+		cnss_pr_dbg("Regulator %s is already enabled\n",
+			    vreg->cfg.name);
+		return 0;
+	}
+
+	cnss_pr_dbg("Regulator %s is being enabled\n", vreg->cfg.name);
+
+	if (vreg->cfg.min_uv != 0 && vreg->cfg.max_uv != 0) {
+		ret = regulator_set_voltage(vreg->reg,
+					    vreg->cfg.min_uv,
+					    vreg->cfg.max_uv);
+
+		if (ret) {
+			cnss_pr_err("Failed to set voltage for regulator %s, min_uv: %u, max_uv: %u, err = %d\n",
+				    vreg->cfg.name, vreg->cfg.min_uv,
+				    vreg->cfg.max_uv, ret);
+			goto out;
+		}
+	}
+
+	if (vreg->cfg.load_ua) {
+		ret = regulator_set_load(vreg->reg,
+					 vreg->cfg.load_ua);
+
+		if (ret < 0) {
+			cnss_pr_err("Failed to set load for regulator %s, load: %u, err = %d\n",
+				    vreg->cfg.name, vreg->cfg.load_ua,
+				    ret);
+			goto out;
+		}
+	}
+
+	if (vreg->cfg.delay_us)
+		udelay(vreg->cfg.delay_us);
+
+	ret = regulator_enable(vreg->reg);
+	if (ret) {
+		cnss_pr_err("Failed to enable regulator %s, err = %d\n",
+			    vreg->cfg.name, ret);
+		goto out;
+	}
+	vreg->enabled = true;
+
 out:
 	return ret;
 }
 
-static int cnss_vreg_on(struct cnss_plat_data *plat_priv)
+static int cnss_vreg_off_single(struct cnss_vreg_info *vreg)
 {
 	int ret = 0;
-	struct cnss_vreg_info *vreg_info;
+
+	if (!vreg->enabled) {
+		cnss_pr_dbg("Regulator %s is already disabled\n",
+			    vreg->cfg.name);
+		return 0;
+	}
+
+	cnss_pr_dbg("Regulator %s is being disabled\n",
+		    vreg->cfg.name);
+
+	ret = regulator_disable(vreg->reg);
+	if (ret)
+		cnss_pr_err("Failed to disable regulator %s, err = %d\n",
+			    vreg->cfg.name, ret);
+
+	if (vreg->cfg.load_ua) {
+		ret = regulator_set_load(vreg->reg, 0);
+		if (ret < 0)
+			cnss_pr_err("Failed to set load for regulator %s, err = %d\n",
+				    vreg->cfg.name, ret);
+	}
+
+	if (vreg->cfg.min_uv != 0 && vreg->cfg.max_uv != 0) {
+		ret = regulator_set_voltage(vreg->reg, 0,
+					    vreg->cfg.max_uv);
+		if (ret)
+			cnss_pr_err("Failed to set voltage for regulator %s, err = %d\n",
+				    vreg->cfg.name, ret);
+	}
+	vreg->enabled = false;
+
+	return ret;
+}
+
+static struct cnss_vreg_cfg *get_vreg_list(u32 *vreg_list_size,
+					   enum cnss_vreg_type type)
+{
+	switch (type) {
+	case CNSS_VREG_PRIM:
+		*vreg_list_size = CNSS_VREG_INFO_SIZE;
+		return cnss_vreg_list;
+	default:
+		cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+		*vreg_list_size = 0;
+		return NULL;
+	}
+}
+
+static int cnss_get_vreg(struct cnss_plat_data *plat_priv,
+			 struct list_head *vreg_list,
+			 struct cnss_vreg_cfg *vreg_cfg,
+			 u32 vreg_list_size)
+{
+
+	int ret = 0;
 	int i;
+	struct cnss_vreg_info *vreg;
+	struct device *dev = &plat_priv->plat_dev->dev;
 
-	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
-		return -ENODEV;
+	if (!list_empty(vreg_list)) {
+		cnss_pr_dbg("Vregs have already been updated\n");
+		return 0;
 	}
 
-	for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) {
-		vreg_info = &plat_priv->vreg_info[i];
+	for (i = 0; i < vreg_list_size; i++) {
+		vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+		if (!vreg)
+			return -ENOMEM;
 
-		if (!vreg_info->reg)
-			continue;
-
-		cnss_pr_dbg("Regulator %s is being enabled\n", vreg_info->name);
-
-		if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) {
-			ret = regulator_set_voltage(vreg_info->reg,
-						    vreg_info->min_uv,
-						    vreg_info->max_uv);
-
-			if (ret) {
-				cnss_pr_err("Failed to set voltage for regulator %s, min_uv: %u, max_uv: %u, err = %d\n",
-					    vreg_info->name, vreg_info->min_uv,
-					    vreg_info->max_uv, ret);
-				break;
-			}
-		}
-
-		if (vreg_info->load_ua) {
-			ret = regulator_set_load(vreg_info->reg,
-						 vreg_info->load_ua);
-
-			if (ret < 0) {
-				cnss_pr_err("Failed to set load for regulator %s, load: %u, err = %d\n",
-					    vreg_info->name, vreg_info->load_ua,
-					    ret);
-				break;
-			}
-		}
-
-		if (vreg_info->delay_us)
-			udelay(vreg_info->delay_us);
-
-		ret = regulator_enable(vreg_info->reg);
-		if (ret) {
-			cnss_pr_err("Failed to enable regulator %s, err = %d\n",
-				    vreg_info->name, ret);
-			break;
-		}
-	}
-
-	if (ret) {
-		for (; i >= 0; i--) {
-			vreg_info = &plat_priv->vreg_info[i];
-
-			if (!vreg_info->reg)
+		memcpy(&vreg->cfg, &vreg_cfg[i], sizeof(vreg->cfg));
+		ret = cnss_get_vreg_single(plat_priv, vreg);
+		if (ret != 0) {
+			if (ret == -ENODEV) {
+				devm_kfree(dev, vreg);
 				continue;
-
-			regulator_disable(vreg_info->reg);
-			if (vreg_info->load_ua)
-				regulator_set_load(vreg_info->reg, 0);
-			if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0)
-				regulator_set_voltage(vreg_info->reg, 0,
-						      vreg_info->max_uv);
+			} else {
+				devm_kfree(dev, vreg);
+				return ret;
+			}
 		}
-
-		return ret;
+		list_add_tail(&vreg->list, vreg_list);
 	}
 
 	return 0;
 }
 
-static int cnss_vreg_off(struct cnss_plat_data *plat_priv)
+static void cnss_put_vreg(struct cnss_plat_data *plat_priv,
+			  struct list_head *vreg_list)
 {
-	int ret = 0;
-	struct cnss_vreg_info *vreg_info;
-	int i;
+	struct cnss_vreg_info *vreg;
 
-	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
-		return -ENODEV;
+	while (!list_empty(vreg_list)) {
+		vreg = list_first_entry(vreg_list,
+					struct cnss_vreg_info, list);
+		list_del(&vreg->list);
+		if (IS_ERR_OR_NULL(vreg->reg))
+			continue;
+		cnss_put_vreg_single(plat_priv, vreg);
+	}
+}
+
+static int cnss_vreg_on(struct cnss_plat_data *plat_priv,
+			struct list_head *vreg_list)
+{
+	struct cnss_vreg_info *vreg;
+	int ret = 0;
+
+	list_for_each_entry(vreg, vreg_list, list) {
+		if (IS_ERR_OR_NULL(vreg->reg))
+			continue;
+		ret = cnss_vreg_on_single(vreg);
+		if (ret)
+			break;
 	}
 
-	for (i = CNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
-		vreg_info = &plat_priv->vreg_info[i];
+	if (!ret)
+		return 0;
 
-		if (!vreg_info->reg)
+	list_for_each_entry_continue_reverse(vreg, vreg_list, list) {
+		if (IS_ERR_OR_NULL(vreg->reg) || !vreg->enabled)
 			continue;
 
-		cnss_pr_dbg("Regulator %s is being disabled\n",
-			    vreg_info->name);
+		cnss_vreg_off_single(vreg);
+	}
 
-		ret = regulator_disable(vreg_info->reg);
-		if (ret)
-			cnss_pr_err("Failed to disable regulator %s, err = %d\n",
-				    vreg_info->name, ret);
+	return ret;
+}
 
-		if (vreg_info->load_ua) {
-			ret = regulator_set_load(vreg_info->reg, 0);
-			if (ret < 0)
-				cnss_pr_err("Failed to set load for regulator %s, err = %d\n",
-					    vreg_info->name, ret);
-		}
+static int cnss_vreg_off(struct cnss_plat_data *plat_priv,
+			 struct list_head *vreg_list)
+{
+	struct cnss_vreg_info *vreg;
 
-		if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) {
-			ret = regulator_set_voltage(vreg_info->reg, 0,
-						    vreg_info->max_uv);
-			if (ret)
-				cnss_pr_err("Failed to set voltage for regulator %s, err = %d\n",
-					    vreg_info->name, ret);
-		}
+	list_for_each_entry_reverse(vreg, vreg_list, list) {
+		if (IS_ERR_OR_NULL(vreg->reg))
+			continue;
+
+		cnss_vreg_off_single(vreg);
+	}
+
+	return 0;
+}
+
+int cnss_get_vreg_type(struct cnss_plat_data *plat_priv,
+		       enum cnss_vreg_type type)
+{
+	struct cnss_vreg_cfg *vreg_cfg;
+	u32 vreg_list_size = 0;
+	int ret = 0;
+
+	vreg_cfg = get_vreg_list(&vreg_list_size, type);
+	if (!vreg_cfg)
+		return -EINVAL;
+
+	switch (type) {
+	case CNSS_VREG_PRIM:
+		ret = cnss_get_vreg(plat_priv, &plat_priv->vreg_list,
+				    vreg_cfg, vreg_list_size);
+		break;
+	default:
+		cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+void cnss_put_vreg_type(struct cnss_plat_data *plat_priv,
+			enum cnss_vreg_type type)
+{
+	switch (type) {
+	case CNSS_VREG_PRIM:
+		cnss_put_vreg(plat_priv, &plat_priv->vreg_list);
+		break;
+	default:
+		return;
+	}
+}
+
+int cnss_vreg_on_type(struct cnss_plat_data *plat_priv,
+		      enum cnss_vreg_type type)
+{
+	int ret = 0;
+
+	switch (type) {
+	case CNSS_VREG_PRIM:
+		ret = cnss_vreg_on(plat_priv, &plat_priv->vreg_list);
+		break;
+	default:
+		cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+int cnss_vreg_off_type(struct cnss_plat_data *plat_priv,
+		       enum cnss_vreg_type type)
+{
+	int ret = 0;
+
+	switch (type) {
+	case CNSS_VREG_PRIM:
+		ret = cnss_vreg_off(plat_priv, &plat_priv->vreg_list);
+		break;
+	default:
+		cnss_pr_err("Unsupported vreg type 0x%x\n", type);
+		return -EINVAL;
 	}
 
 	return ret;
@@ -346,7 +485,7 @@
 		return 0;
 	}
 
-	ret = cnss_vreg_on(plat_priv);
+	ret = cnss_vreg_on_type(plat_priv, CNSS_VREG_PRIM);
 	if (ret) {
 		cnss_pr_err("Failed to turn on vreg, err = %d\n", ret);
 		goto out;
@@ -361,7 +500,7 @@
 
 	return 0;
 vreg_off:
-	cnss_vreg_off(plat_priv);
+	cnss_vreg_off_type(plat_priv, CNSS_VREG_PRIM);
 out:
 	return ret;
 }
@@ -374,7 +513,7 @@
 	}
 
 	cnss_select_pinctrl_state(plat_priv, false);
-	cnss_vreg_off(plat_priv);
+	cnss_vreg_off_type(plat_priv, CNSS_VREG_PRIM);
 	plat_priv->powered_on = false;
 }
 
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 21957e0..e21f182 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -13,10 +13,12 @@
 #define WLFW_SERVICE_INS_ID_V01		1
 #define WLFW_CLIENT_ID			0x4b4e454c
 #define MAX_BDF_FILE_NAME		13
+#define BDF_FILE_NAME_PREFIX		"bdwlan"
 #define ELF_BDF_FILE_NAME		"bdwlan.elf"
 #define ELF_BDF_FILE_NAME_PREFIX	"bdwlan.e"
 #define BIN_BDF_FILE_NAME		"bdwlan.bin"
 #define BIN_BDF_FILE_NAME_PREFIX	"bdwlan.b"
+#define REGDB_FILE_NAME			"regdb.bin"
 #define DUMMY_BDF_FILE_NAME		"bdwlan.dmy"
 
 #define QMI_WLFW_TIMEOUT_MS		(plat_priv->ctrl_params.qmi_timeout)
@@ -83,6 +85,12 @@
 	req->pin_connect_result_enable = 1;
 	req->cal_done_enable_valid = 1;
 	req->cal_done_enable = 1;
+	req->qdss_trace_req_mem_enable_valid = 1;
+	req->qdss_trace_req_mem_enable = 1;
+	req->qdss_trace_save_enable_valid = 1;
+	req->qdss_trace_save_enable = 1;
+	req->qdss_trace_free_enable_valid = 1;
+	req->qdss_trace_free_enable = 1;
 
 	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
 			   wlfw_ind_register_resp_msg_v01_ei, resp);
@@ -400,7 +408,8 @@
 	return ret;
 }
 
-int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv)
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
+				 u32 bdf_type)
 {
 	struct wlfw_bdf_download_req_msg_v01 *req;
 	struct wlfw_bdf_download_resp_msg_v01 *resp;
@@ -411,8 +420,8 @@
 	unsigned int remaining;
 	int ret = 0;
 
-	cnss_pr_dbg("Sending BDF download message, state: 0x%lx\n",
-		    plat_priv->driver_state);
+	cnss_pr_dbg("Sending BDF download message, state: 0x%lx, type: %d\n",
+		    plat_priv->driver_state, bdf_type);
 
 	req = kzalloc(sizeof(*req), GFP_KERNEL);
 	if (!req)
@@ -424,7 +433,7 @@
 		return -ENOMEM;
 	}
 
-	switch (plat_priv->ctrl_params.bdf_type) {
+	switch (bdf_type) {
 	case CNSS_BDF_ELF:
 		if (plat_priv->board_info.board_id == 0xFF)
 			snprintf(filename, sizeof(filename), ELF_BDF_FILE_NAME);
@@ -434,8 +443,9 @@
 				 plat_priv->board_info.board_id);
 		else
 			snprintf(filename, sizeof(filename),
-				 ELF_BDF_FILE_NAME_PREFIX "%04x",
-				 plat_priv->board_info.board_id);
+				 BDF_FILE_NAME_PREFIX "%02x.e%02x",
+				 plat_priv->board_info.board_id >> 8 & 0xFF,
+				 plat_priv->board_info.board_id & 0xFF);
 		break;
 	case CNSS_BDF_BIN:
 		if (plat_priv->board_info.board_id == 0xFF)
@@ -446,8 +456,12 @@
 				 plat_priv->board_info.board_id);
 		else
 			snprintf(filename, sizeof(filename),
-				 BIN_BDF_FILE_NAME_PREFIX "%04x",
-				 plat_priv->board_info.board_id);
+				 BDF_FILE_NAME_PREFIX "%02x.b%02x",
+				 plat_priv->board_info.board_id >> 8 & 0xFF,
+				 plat_priv->board_info.board_id & 0xFF);
+		break;
+	case CNSS_BDF_REGDB:
+		snprintf(filename, sizeof(filename), REGDB_FILE_NAME);
 		break;
 	case CNSS_BDF_DUMMY:
 		cnss_pr_dbg("CNSS_BDF_DUMMY is set, sending dummy BDF\n");
@@ -542,7 +556,8 @@
 	if (plat_priv->ctrl_params.bdf_type != CNSS_BDF_DUMMY)
 		release_firmware(fw_entry);
 err_req_fw:
-	CNSS_ASSERT(0);
+	if (bdf_type != CNSS_BDF_REGDB)
+		CNSS_ASSERT(0);
 	kfree(req);
 	kfree(resp);
 	return ret;
@@ -1195,6 +1210,82 @@
 	return ret;
 }
 
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
+{
+	struct wlfw_qdss_trace_mem_info_req_msg_v01 *req;
+	struct wlfw_qdss_trace_mem_info_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	int ret = 0;
+	int i;
+
+	cnss_pr_dbg("Sending QDSS trace mem info, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->mem_seg_len = plat_priv->qdss_mem_seg_len;
+	for (i = 0; i < req->mem_seg_len; i++) {
+		cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+			    qdss_mem[i].va, &qdss_mem[i].pa,
+			    qdss_mem[i].size, qdss_mem[i].type);
+
+		req->mem_seg[i].addr = qdss_mem[i].pa;
+		req->mem_seg[i].size = qdss_mem[i].size;
+		req->mem_seg[i].type = qdss_mem[i].type;
+	}
+
+	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+			   wlfw_qdss_trace_mem_info_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		cnss_pr_err("Fail to initialize txn for QDSS trace mem request: err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+			       QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01,
+			       WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_qdss_trace_mem_info_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		cnss_pr_err("Fail to send QDSS trace mem info request: err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+	if (ret < 0) {
+		cnss_pr_err("Fail to wait for response of QDSS trace mem info request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("QDSS trace mem info request failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+	kfree(req);
+	kfree(resp);
+	return 0;
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
 unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
 {
 	cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS);
@@ -1330,6 +1421,118 @@
 			       0, NULL);
 }
 
+static void cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_wlfw,
+						struct sockaddr_qrtr *sq,
+						struct qmi_txn *txn,
+						const void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+	const struct wlfw_qdss_trace_req_mem_ind_msg_v01 *ind_msg = data;
+	int i;
+
+	cnss_pr_dbg("Received QMI WLFW QDSS trace request mem indication\n");
+
+	if (!txn) {
+		cnss_pr_err("Spurious indication\n");
+		return;
+	}
+
+	if (plat_priv->qdss_mem_seg_len) {
+		cnss_pr_err("Ignore double allocation for QDSS trace, current len %u\n",
+			    plat_priv->qdss_mem_seg_len);
+		return;
+	}
+
+	plat_priv->qdss_mem_seg_len = ind_msg->mem_seg_len;
+	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+		cnss_pr_dbg("QDSS requests for memory, size: 0x%zx, type: %u\n",
+			    ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
+		plat_priv->qdss_mem[i].type = ind_msg->mem_seg[i].type;
+		plat_priv->qdss_mem[i].size = ind_msg->mem_seg[i].size;
+	}
+
+	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
+			       0, NULL);
+}
+
+static void cnss_wlfw_qdss_trace_save_ind_cb(struct qmi_handle *qmi_wlfw,
+					     struct sockaddr_qrtr *sq,
+					     struct qmi_txn *txn,
+					     const void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+	const struct wlfw_qdss_trace_save_ind_msg_v01 *ind_msg = data;
+	struct cnss_qmi_event_qdss_trace_save_data *event_data;
+	int i = 0;
+
+	cnss_pr_dbg("Received QMI WLFW QDSS trace save indication\n");
+
+	if (!txn) {
+		cnss_pr_err("Spurious indication\n");
+		return;
+	}
+
+	cnss_pr_dbg("QDSS_trace_save info: source %u, total_size %u, file_name_valid %u, file_name %s\n",
+		    ind_msg->source, ind_msg->total_size,
+		    ind_msg->file_name_valid, ind_msg->file_name);
+
+	if (ind_msg->source == 1)
+		return;
+
+	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+	if (!event_data)
+		return;
+
+	if (ind_msg->mem_seg_valid) {
+		if (ind_msg->mem_seg_len > QDSS_TRACE_SEG_LEN_MAX) {
+			cnss_pr_err("Invalid seg len %u\n",
+				    ind_msg->mem_seg_len);
+			goto free_event_data;
+		}
+		cnss_pr_dbg("QDSS_trace_save seg len %u\n",
+			    ind_msg->mem_seg_len);
+		event_data->mem_seg_len = ind_msg->mem_seg_len;
+		for (i = 0; i < ind_msg->mem_seg_len; i++) {
+			event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr;
+			event_data->mem_seg[i].size = ind_msg->mem_seg[i].size;
+			cnss_pr_dbg("seg-%d: addr 0x%llx size 0x%x\n",
+				    i, ind_msg->mem_seg[i].addr,
+				    ind_msg->mem_seg[i].size);
+		}
+	}
+
+	event_data->total_size = ind_msg->total_size;
+
+	if (ind_msg->file_name_valid)
+		strlcpy(event_data->file_name, ind_msg->file_name,
+			QDSS_TRACE_FILE_NAME_MAX + 1);
+	else
+		strlcpy(event_data->file_name, "qdss_trace",
+			QDSS_TRACE_FILE_NAME_MAX + 1);
+
+	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE,
+			       0, event_data);
+
+	return;
+
+free_event_data:
+	kfree(event_data);
+}
+
+static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw,
+					     struct sockaddr_qrtr *sq,
+					     struct qmi_txn *txn,
+					     const void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+
+	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
+			       0, NULL);
+}
+
 static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
 	{
 		.type = QMI_INDICATION,
@@ -1374,6 +1577,30 @@
 		.decoded_size = sizeof(struct wlfw_cal_done_ind_msg_v01),
 		.fn = cnss_wlfw_cal_done_ind_cb
 	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01,
+		.ei = wlfw_qdss_trace_req_mem_ind_msg_v01_ei,
+		.decoded_size =
+		sizeof(struct wlfw_qdss_trace_req_mem_ind_msg_v01),
+		.fn = cnss_wlfw_qdss_trace_req_mem_ind_cb
+	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01,
+		.ei = wlfw_qdss_trace_save_ind_msg_v01_ei,
+		.decoded_size =
+		sizeof(struct wlfw_qdss_trace_save_ind_msg_v01),
+		.fn = cnss_wlfw_qdss_trace_save_ind_cb
+	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_QDSS_TRACE_FREE_IND_V01,
+		.ei = wlfw_qdss_trace_free_ind_msg_v01_ei,
+		.decoded_size =
+		sizeof(struct wlfw_qdss_trace_free_ind_msg_v01),
+		.fn = cnss_wlfw_qdss_trace_free_ind_cb
+	},
 	{}
 };
 
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
index ab58953..784aadc 100644
--- a/drivers/net/wireless/cnss2/qmi.h
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -8,15 +8,30 @@
 
 struct cnss_plat_data;
 
-#ifdef CONFIG_CNSS2_QMI
-#include "wlan_firmware_service_v01.h"
-#include "coexistence_service_v01.h"
-
 struct cnss_qmi_event_server_arrive_data {
 	unsigned int node;
 	unsigned int port;
 };
 
+#define QDSS_TRACE_SEG_LEN_MAX 32
+#define QDSS_TRACE_FILE_NAME_MAX 16
+
+struct cnss_mem_seg {
+	u64 addr;
+	u32 size;
+};
+
+struct cnss_qmi_event_qdss_trace_save_data {
+	u32 total_size;
+	u32 mem_seg_len;
+	struct cnss_mem_seg mem_seg[QDSS_TRACE_SEG_LEN_MAX];
+	char file_name[QDSS_TRACE_FILE_NAME_MAX + 1];
+};
+
+#ifdef CONFIG_CNSS2_QMI
+#include "wlan_firmware_service_v01.h"
+#include "coexistence_service_v01.h"
+
 int cnss_qmi_init(struct cnss_plat_data *plat_priv);
 void cnss_qmi_deinit(struct cnss_plat_data *plat_priv);
 unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv);
@@ -24,7 +39,8 @@
 int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv);
-int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
+				 u32 bdf_type);
 int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
 				  enum cnss_driver_mode mode);
@@ -45,7 +61,7 @@
 void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv);
 int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv);
 int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv);
-
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv);
 #else
 #define QMI_WLFW_TIMEOUT_MS		10000
 
@@ -86,7 +102,8 @@
 	return 0;
 }
 
-static inline int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv)
+static inline int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
+					       u32 bdf_type)
 {
 	return 0;
 }
@@ -163,6 +180,9 @@
 
 static inline
 int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
+
+static inline
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
 {
 	return 0;
 }
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
index d06cb95..d65447e 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -1221,6 +1221,24 @@
 					   num_macs),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   voltage_mv_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   voltage_mv),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.array_type       = NO_ARRAY,
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
index a5346c8..cc41f83 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -380,9 +380,11 @@
 	char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
 	u8 num_macs_valid;
 	u8 num_macs;
+	u8 voltage_mv_valid;
+	u32 voltage_mv;
 };
 
-#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 214
 extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
 
 struct wlfw_bdf_download_req_msg_v01 {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 55594c9..47dbd2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -442,7 +442,7 @@
  * Support for Nss x BW (or RU) matrix:
  *	(0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
  * Each entry contains 2 QAM thresholds for 8us and 16us:
- *	0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
+ *	0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
  * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
  *	QAM_tx < QAM_th1            --> PPE=0us
  *	QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 4d49a1a..16c6c7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -868,6 +868,15 @@
 	int ret, i, j;
 	u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
 
+	/*
+	 * This command is not supported on earlier firmware versions.
+	 * Unfortunately, we don't have a TLV API flag to rely on, so
+	 * rely on the major version which is in the first byte of
+	 * ucode_ver.
+	 */
+	if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+		return 0;
+
 	ret = iwl_mvm_sar_get_wgds_table(mvm);
 	if (ret < 0) {
 		IWL_DEBUG_RADIO(mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index afed549..0f357e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1997,7 +1997,13 @@
 	if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
 		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
 
-	/* If PPE Thresholds exist, parse them into a FW-familiar format */
+	/*
+	 * Initialize the PPE thresholds to "None" (7), as described in Table
+	 * 9-262ac of 80211.ax/D3.0.
+	 */
+	memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
+
+	/* If PPE Thresholds exist, parse them into a FW-familiar format. */
 	if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
 	    IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
 		u8 nss = (sta->he_cap.ppe_thres[0] &
@@ -2938,7 +2944,8 @@
 			iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
 		}
 
-		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+				     false);
 		ret = iwl_mvm_update_sta(mvm, vif, sta);
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTHORIZED) {
@@ -2954,7 +2961,8 @@
 		/* enable beacon filtering */
 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
 
-		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+				     true);
 
 		ret = 0;
 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index b3987a0..6b65ad6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1685,7 +1685,7 @@
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
 /* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
 int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
 void rs_update_last_rssi(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 8169d14..d1c1a80 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -98,8 +98,12 @@
 {
 	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
 	u8 supp = 0;
 
+	if (he_cap && he_cap->has_he)
+		return 0;
+
 	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
 		supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
 	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index f2830b5..6b9c670 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1280,7 +1280,7 @@
 		       (unsigned long)(lq_sta->last_tx +
 				       (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
 		IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
-		iwl_mvm_rs_rate_init(mvm, sta, info->band);
+		iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
 		return;
 	}
 	lq_sta->last_tx = jiffies;
@@ -2870,9 +2870,8 @@
 static void rs_initialize_lq(struct iwl_mvm *mvm,
 			     struct ieee80211_sta *sta,
 			     struct iwl_lq_sta *lq_sta,
-			     enum nl80211_band band)
+			     enum nl80211_band band, bool update)
 {
-	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_scale_tbl_info *tbl;
 	struct rs_rate *rate;
 	u8 active_tbl = 0;
@@ -2901,8 +2900,7 @@
 	rs_set_expected_tpt_table(lq_sta, tbl);
 	rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
 	/* TODO restore station should remember the lq cmd */
-	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq,
-			    mvmsta->sta_state < IEEE80211_STA_AUTHORIZED);
+	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
 }
 
 static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
@@ -3155,7 +3153,7 @@
  * Called after adding a new station to initialize rate scaling
  */
 static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			     enum nl80211_band band)
+			     enum nl80211_band band, bool update)
 {
 	int i, j;
 	struct ieee80211_hw *hw = mvm->hw;
@@ -3235,7 +3233,7 @@
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	iwl_mvm_reset_frame_stats(mvm);
 #endif
-	rs_initialize_lq(mvm, sta, lq_sta, band);
+	rs_initialize_lq(mvm, sta, lq_sta, band, update);
 }
 
 static void rs_drv_rate_update(void *mvm_r,
@@ -3255,7 +3253,7 @@
 	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
 		ieee80211_stop_tx_ba_session(sta, tid);
 
-	iwl_mvm_rs_rate_init(mvm, sta, sband->band);
+	iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
@@ -4112,12 +4110,12 @@
 };
 
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum nl80211_band band)
+			  enum nl80211_band band, bool update)
 {
 	if (iwl_mvm_has_tlc_offload(mvm))
 		rs_fw_rate_init(mvm, sta, band);
 	else
-		rs_drv_rate_init(mvm, sta, band);
+		rs_drv_rate_init(mvm, sta, band, update);
 }
 
 int iwl_mvm_rate_control_register(void)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index d2cf484..8e7f993 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -420,7 +420,7 @@
 
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum nl80211_band band);
+			  enum nl80211_band band, bool init);
 
 /* Notify RS about Tx status */
 void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index b002a7a..6a53494 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -900,20 +900,19 @@
 
 /**
  * iwl_mvm_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- *        after station has been added.
+ * @sync: This command can be sent synchronously.
  *
  * The link quality command is sent as the last step of station creation.
  * This is the special case in which init is set and we call a callback in
  * this case to clear the state indicating that station creation is in
  * progress.
  */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
 {
 	struct iwl_host_cmd cmd = {
 		.id = LQ_CMD,
 		.len = { sizeof(struct iwl_lq_cmd), },
-		.flags = init ? 0 : CMD_ASYNC,
+		.flags = sync ? 0 : CMD_ASYNC,
 		.data = { lq, },
 	};
 
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index b150da4..5d65500 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -518,6 +518,56 @@
 	{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
+	{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
 	{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index e2addd8..5d75c97 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,11 +696,10 @@
 				"Send delba to tid=%d, %pM\n",
 				tid, rx_reor_tbl_ptr->ta);
 			mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
-			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-					       flags);
-			return;
+			goto exit;
 		}
 	}
+exit:
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 8e63d14..5380fba 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,8 +103,6 @@
  * There could be holes in the buffer, which are skipped by the function.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -113,21 +111,25 @@
 {
 	int pkt_to_send, i;
 	void *rx_tmp_ptr;
+	unsigned long flags;
 
 	pkt_to_send = (start_win > tbl->start_win) ?
 		      min((start_win - tbl->start_win), tbl->win_size) :
 		      tbl->win_size;
 
 	for (i = 0; i < pkt_to_send; ++i) {
+		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		rx_tmp_ptr = NULL;
 		if (tbl->rx_reorder_ptr[i]) {
 			rx_tmp_ptr = tbl->rx_reorder_ptr[i];
 			tbl->rx_reorder_ptr[i] = NULL;
 		}
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		if (rx_tmp_ptr)
 			mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
 	}
 
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	/*
 	 * We don't have a circular buffer, hence use rotation to simulate
 	 * circular buffer
@@ -138,6 +140,7 @@
 	}
 
 	tbl->start_win = start_win;
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -147,8 +150,6 @@
  * The start window is adjusted automatically when a hole is located.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -156,15 +157,22 @@
 {
 	int i, j, xchg;
 	void *rx_tmp_ptr;
+	unsigned long flags;
 
 	for (i = 0; i < tbl->win_size; ++i) {
-		if (!tbl->rx_reorder_ptr[i])
+		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+		if (!tbl->rx_reorder_ptr[i]) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			break;
+		}
 		rx_tmp_ptr = tbl->rx_reorder_ptr[i];
 		tbl->rx_reorder_ptr[i] = NULL;
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
 	}
 
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	/*
 	 * We don't have a circular buffer, hence use rotation to simulate
 	 * circular buffer
@@ -177,6 +185,7 @@
 		}
 	}
 	tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -184,8 +193,6 @@
  *
  * The function stops the associated timer and dispatches all the
  * pending packets in the Rx reorder table before deletion.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -211,7 +218,11 @@
 
 	del_timer_sync(&tbl->timer_context.timer);
 	tbl->timer_context.timer_is_set = false;
+
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	list_del(&tbl->list);
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
 	kfree(tbl->rx_reorder_ptr);
 	kfree(tbl);
 
@@ -224,17 +235,22 @@
 /*
  * This function returns the pointer to an entry in Rx reordering
  * table which matches the given TA/TID pair.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
 {
 	struct mwifiex_rx_reorder_tbl *tbl;
+	unsigned long flags;
 
-	list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
-		if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+	list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
+		if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			return tbl;
+		}
+	}
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return NULL;
 }
@@ -251,9 +267,14 @@
 		return;
 
 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
-	list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
-		if (!memcmp(tbl->ta, ta, ETH_ALEN))
+	list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+		if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			mwifiex_del_rx_reorder_entry(priv, tbl);
+			spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+		}
+	}
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return;
@@ -262,18 +283,24 @@
 /*
  * This function finds the last sequence number used in the packets
  * buffered in Rx reordering table.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static int
 mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
 {
 	struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
+	struct mwifiex_private *priv = ctx->priv;
+	unsigned long flags;
 	int i;
 
-	for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
-		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+	for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
+		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			return i;
+		}
+	}
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return -1;
 }
@@ -291,22 +318,17 @@
 	struct reorder_tmr_cnxt *ctx =
 		from_timer(ctx, t, timer);
 	int start_win, seq_num;
-	unsigned long flags;
 
 	ctx->timer_is_set = false;
-	spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
 	seq_num = mwifiex_11n_find_last_seq_num(ctx);
 
-	if (seq_num < 0) {
-		spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
+	if (seq_num < 0)
 		return;
-	}
 
 	mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
 	start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
 	mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
 						 start_win);
-	spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -333,14 +355,11 @@
 	 * If we get a TID, ta pair which is already present dispatch all the
 	 * the packets and move the window size until the ssn
 	 */
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
 	if (tbl) {
 		mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		return;
 	}
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 	/* if !tbl then create one */
 	new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
 	if (!new_node)
@@ -551,20 +570,16 @@
 	int prev_start_win, start_win, end_win, win_size;
 	u16 pkt_index;
 	bool init_window_shift = false;
-	unsigned long flags;
 	int ret = 0;
 
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
 	if (!tbl) {
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		if (pkt_type != PKT_TYPE_BAR)
 			mwifiex_11n_dispatch_pkt(priv, payload);
 		return ret;
 	}
 
 	if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		mwifiex_11n_dispatch_pkt(priv, payload);
 		return ret;
 	}
@@ -651,8 +666,6 @@
 	if (!tbl->timer_context.timer_is_set ||
 	    prev_start_win != tbl->start_win)
 		mwifiex_11n_rxreorder_timer_restart(tbl);
-
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 	return ret;
 }
 
@@ -681,18 +694,14 @@
 		    peer_mac, tid, initiator);
 
 	if (cleanup_rx_reorder_tbl) {
-		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 								 peer_mac);
 		if (!tbl) {
-			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-					       flags);
 			mwifiex_dbg(priv->adapter, EVENT,
 				    "event: TID, TA not found in table\n");
 			return;
 		}
 		mwifiex_del_rx_reorder_entry(priv, tbl);
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 	} else {
 		ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
 		if (!ptx_tbl) {
@@ -726,7 +735,6 @@
 	int tid, win_size;
 	struct mwifiex_rx_reorder_tbl *tbl;
 	uint16_t block_ack_param_set;
-	unsigned long flags;
 
 	block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
 
@@ -740,20 +748,17 @@
 		mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
 			    add_ba_rsp->peer_mac_addr, tid);
 
-		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 						     add_ba_rsp->peer_mac_addr);
 		if (tbl)
 			mwifiex_del_rx_reorder_entry(priv, tbl);
 
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		return 0;
 	}
 
 	win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
 		    >> BLOCKACKPARAM_WINSIZE_POS;
 
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 					     add_ba_rsp->peer_mac_addr);
 	if (tbl) {
@@ -764,7 +769,6 @@
 		else
 			tbl->amsdu = false;
 	}
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	mwifiex_dbg(priv->adapter, CMD,
 		    "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -804,8 +808,11 @@
 
 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	list_for_each_entry_safe(del_tbl_ptr, tmp_node,
-				 &priv->rx_reorder_tbl_ptr, list)
+				 &priv->rx_reorder_tbl_ptr, list) {
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
+		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+	}
 	INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
@@ -929,7 +936,6 @@
 	int tlv_buf_left = len;
 	int ret;
 	u8 *tmp;
-	unsigned long flags;
 
 	mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
 			 event_buf, len);
@@ -949,18 +955,14 @@
 			    tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
 			    tlv_bitmap_len);
 
-		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		rx_reor_tbl_ptr =
 			mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
 						       tlv_rxba->mac);
 		if (!rx_reor_tbl_ptr) {
-			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-					       flags);
 			mwifiex_dbg(priv->adapter, ERROR,
 				    "Can not find rx_reorder_tbl!");
 			return;
 		}
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 		for (i = 0; i < tlv_bitmap_len; i++) {
 			for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index a83c5af..5ce85d5 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -421,15 +421,12 @@
 		spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 	}
 
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	if (!priv->ap_11n_enabled ||
 	    (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
 	    (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
 		ret = mwifiex_handle_uap_rx_forward(priv, skb);
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		return ret;
 	}
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	/* Reorder and send to kernel */
 	pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 7cdb3e7..0a3e046 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -681,6 +681,7 @@
 	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
 	hw->max_rates = 1;
 	hw->max_report_rates = 7;
 	hw->max_rate_tries = 1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
index 374cc65..16e6b69 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
@@ -799,7 +799,7 @@
 
 	/* enable detection*/
 	mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
-	mt76_wr(dev, 0x212c, 0x0c350001);
+	mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001);
 }
 
 void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
@@ -842,7 +842,11 @@
 		mt76_wr(dev, MT_BBP(DFS, 0), 0);
 		/* clear detector status */
 		mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
-		mt76_wr(dev, 0x212c, 0);
+		if (mt76_chip(&dev->mt76) == 0x7610 ||
+		    mt76_chip(&dev->mt76) == 0x7630)
+			mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081);
+		else
+			mt76_wr(dev, MT_BBP(IBI, 11), 0);
 
 		mt76x2_irq_disable(dev, MT_INT_GPTIMER);
 		mt76_rmw_field(dev, MT_INT_TIMER_EN,
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index af48d43..20447fd 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -385,7 +385,12 @@
 
 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
 		struct ieee80211_txq *txq = sta->txq[i];
-		struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+		struct mt76_txq *mtxq;
+
+		if (!txq)
+			continue;
+
+		mtxq = (struct mt76_txq *)txq->drv_priv;
 
 		spin_lock_bh(&mtxq->hwq->lock);
 		mtxq->send_bar = mtxq->aggr && send_bar;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index f4122c8..ef9b502 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -2289,6 +2289,7 @@
 
 	if (rtl_c2h_fast_cmd(hw, skb)) {
 		rtl_c2h_content_parsing(hw, skb);
+		kfree_skb(skb);
 		return;
 	}
 
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 67213f1..0a9eac9 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -78,6 +78,10 @@
 	if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
 		return -EINVAL;
 
+	/* will be unlocked in cw1200_scan_work() */
+	down(&priv->scan.lock);
+	mutex_lock(&priv->conf_mutex);
+
 	frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
 		req->ie_len);
 	if (!frame.skb)
@@ -86,19 +90,15 @@
 	if (req->ie_len)
 		skb_put_data(frame.skb, req->ie, req->ie_len);
 
-	/* will be unlocked in cw1200_scan_work() */
-	down(&priv->scan.lock);
-	mutex_lock(&priv->conf_mutex);
-
 	ret = wsm_set_template_frame(priv, &frame);
 	if (!ret) {
 		/* Host want to be the probe responder. */
 		ret = wsm_set_probe_responder(priv, true);
 	}
 	if (ret) {
+		dev_kfree_skb(frame.skb);
 		mutex_unlock(&priv->conf_mutex);
 		up(&priv->scan.lock);
-		dev_kfree_skb(frame.skb);
 		return ret;
 	}
 
@@ -120,10 +120,9 @@
 		++priv->scan.n_ssids;
 	}
 
-	mutex_unlock(&priv->conf_mutex);
-
 	if (frame.skb)
 		dev_kfree_skb(frame.skb);
+	mutex_unlock(&priv->conf_mutex);
 	queue_work(priv->workqueue, &priv->scan.work);
 	return 0;
 }
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 750bea3..627df16 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -164,6 +164,12 @@
 	}
 
 	sdio_claim_host(func);
+	/*
+	 * To guarantee that the SDIO card is power cycled, as required to make
+	 * the FW programming to succeed, let's do a brute force HW reset.
+	 */
+	mmc_hw_reset(card->host);
+
 	sdio_enable_func(func);
 	sdio_release_host(func);
 
@@ -174,20 +180,13 @@
 {
 	struct sdio_func *func = dev_to_sdio_func(glue->dev);
 	struct mmc_card *card = func->card;
-	int error;
 
 	sdio_claim_host(func);
 	sdio_disable_func(func);
 	sdio_release_host(func);
 
 	/* Let runtime PM know the card is powered off */
-	error = pm_runtime_put(&card->dev);
-	if (error < 0 && error != -EBUSY) {
-		dev_err(&card->dev, "%s failed: %i\n", __func__, error);
-
-		return error;
-	}
-
+	pm_runtime_put(&card->dev);
 	return 0;
 }
 
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 64b2186..3a93e4d 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -530,8 +530,10 @@
 	SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
 	dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
 
-	if (!dev->ieee80211_ptr)
+	if (!dev->ieee80211_ptr) {
+		err = -ENOMEM;
 		goto remove_handler;
+	}
 
 	dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
 	dev->ieee80211_ptr->wiphy = common_wiphy;
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 0ccb021..10d580c 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -454,6 +454,8 @@
 	if (xenvif_hash_cache_size == 0)
 		return;
 
+	BUG_ON(vif->hash.cache.count);
+
 	spin_lock_init(&vif->hash.cache.lock);
 	INIT_LIST_HEAD(&vif->hash.cache.list);
 }
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f6ae23f..82add0a 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -153,6 +153,13 @@
 {
 	struct xenvif *vif = netdev_priv(dev);
 	unsigned int size = vif->hash.size;
+	unsigned int num_queues;
+
+	/* If queues are not set up internally - always return 0
+	 * as the packet going to be dropped anyway */
+	num_queues = READ_ONCE(vif->num_queues);
+	if (num_queues < 1)
+		return 0;
 
 	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
 		return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3621e05..d5081ff 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1072,11 +1072,6 @@
 		skb_frag_size_set(&frags[i], len);
 	}
 
-	/* Copied all the bits from the frag list -- free it. */
-	skb_frag_list_init(skb);
-	xenvif_skb_zerocopy_prepare(queue, nskb);
-	kfree_skb(nskb);
-
 	/* Release all the original (foreign) frags. */
 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
 		skb_frag_unref(skb, f);
@@ -1145,6 +1140,8 @@
 		xenvif_fill_frags(queue, skb);
 
 		if (unlikely(skb_has_frag_list(skb))) {
+			struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+			xenvif_skb_zerocopy_prepare(queue, nskb);
 			if (xenvif_handle_frag_list(queue, skb)) {
 				if (net_ratelimit())
 					netdev_err(queue->vif->dev,
@@ -1153,6 +1150,9 @@
 				kfree_skb(skb);
 				continue;
 			}
+			/* Copied all the bits from the frag list -- free it. */
+			skb_frag_list_init(skb);
+			kfree_skb(nskb);
 		}
 
 		skb->dev      = queue->vif->dev;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f17f602..5b97cc9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -905,7 +905,7 @@
 		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
-			BUG_ON(pull_to <= skb_headlen(skb));
+			BUG_ON(pull_to < skb_headlen(skb));
 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 		}
 		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 354bece..2db1bd1 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -1295,7 +1295,7 @@
 	gpio_free(platform_data->clkreq_gpio);
 err_ese_gpio:
 	/* optional gpio, not sure was configured in probe */
-	if (nqx_dev->ese_gpio > 0)
+	if (gpio_is_valid(platform_data->ese_gpio))
 		gpio_free(platform_data->ese_gpio);
 err_firm_gpio:
 	gpio_free(platform_data->firm_gpio);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2082ae0..1d432c5 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -309,8 +309,11 @@
 	blk_cleanup_queue(q);
 }
 
-static void pmem_freeze_queue(void *q)
+static void pmem_freeze_queue(struct percpu_ref *ref)
 {
+	struct request_queue *q;
+
+	q = container_of(ref, typeof(*q), q_usage_counter);
 	blk_freeze_queue_start(q);
 }
 
@@ -402,6 +405,7 @@
 
 	pmem->pfn_flags = PFN_DEV;
 	pmem->pgmap.ref = &q->q_usage_counter;
+	pmem->pgmap.kill = pmem_freeze_queue;
 	if (is_nd_pfn(dev)) {
 		if (setup_pagemap_fsdax(dev, &pmem->pgmap))
 			return -ENOMEM;
@@ -427,13 +431,6 @@
 		memcpy(&bb_res, &nsio->res, sizeof(bb_res));
 	}
 
-	/*
-	 * At release time the queue must be frozen before
-	 * devm_memremap_pages is unwound
-	 */
-	if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
-		return -ENOMEM;
-
 	if (IS_ERR(addr))
 		return PTR_ERR(addr);
 	pmem->virt_addr = addr;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index e5bddae..2cdb303 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1182,6 +1182,7 @@
 	 * effects say only one namespace is affected.
 	 */
 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+		mutex_lock(&ctrl->scan_lock);
 		nvme_start_freeze(ctrl);
 		nvme_wait_freeze(ctrl);
 	}
@@ -1210,8 +1211,10 @@
 	 */
 	if (effects & NVME_CMD_EFFECTS_LBCC)
 		nvme_update_formats(ctrl);
-	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
+	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
 		nvme_unfreeze(ctrl);
+		mutex_unlock(&ctrl->scan_lock);
+	}
 	if (effects & NVME_CMD_EFFECTS_CCC)
 		nvme_init_identify(ctrl);
 	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@@ -2095,7 +2098,7 @@
 
 	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
 	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
-			"nqn.2014.08.org.nvmexpress:%4x%4x",
+			"nqn.2014.08.org.nvmexpress:%04x%04x",
 			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
 	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
 	off += sizeof(id->sn);
@@ -3292,6 +3295,7 @@
 	if (nvme_identify_ctrl(ctrl, &id))
 		return;
 
+	mutex_lock(&ctrl->scan_lock);
 	nn = le32_to_cpu(id->nn);
 	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
 	    !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -3300,6 +3304,7 @@
 	}
 	nvme_scan_ns_sequential(ctrl, nn);
 out_free_id:
+	mutex_unlock(&ctrl->scan_lock);
 	kfree(id);
 	down_write(&ctrl->namespaces_rwsem);
 	list_sort(NULL, &ctrl->namespaces, ns_cmp);
@@ -3535,6 +3540,7 @@
 
 	ctrl->state = NVME_CTRL_NEW;
 	spin_lock_init(&ctrl->lock);
+	mutex_init(&ctrl->scan_lock);
 	INIT_LIST_HEAD(&ctrl->namespaces);
 	init_rwsem(&ctrl->namespaces_rwsem);
 	ctrl->dev = dev;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index c27af27..da8f5ad 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -531,8 +531,7 @@
 	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
 	ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
 		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
-	if (!(ctrl->anacap & (1 << 6)))
-		ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
+	ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
 
 	if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
 		dev_err(ctrl->device,
@@ -556,6 +555,7 @@
 	return 0;
 out_free_ana_log_buf:
 	kfree(ctrl->ana_log_buf);
+	ctrl->ana_log_buf = NULL;
 out:
 	return error;
 }
@@ -563,5 +563,6 @@
 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
 {
 	kfree(ctrl->ana_log_buf);
+	ctrl->ana_log_buf = NULL;
 }
 
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 60220de..e82cdae 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -148,6 +148,7 @@
 	enum nvme_ctrl_state state;
 	bool identified;
 	spinlock_t lock;
+	struct mutex scan_lock;
 	const struct nvme_ctrl_ops *ops;
 	struct request_queue *admin_q;
 	struct request_queue *connect_q;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d668682..7b9ef8e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -908,9 +908,11 @@
 
 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
 {
-	if (++nvmeq->cq_head == nvmeq->q_depth) {
+	if (nvmeq->cq_head == nvmeq->q_depth - 1) {
 		nvmeq->cq_head = 0;
 		nvmeq->cq_phase = !nvmeq->cq_phase;
+	} else {
+		nvmeq->cq_head++;
 	}
 }
 
@@ -1727,8 +1729,9 @@
 		struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
 		size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
 
-		dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
-				le64_to_cpu(desc->addr));
+		dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
+			       le64_to_cpu(desc->addr),
+			       DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
 	}
 
 	kfree(dev->host_mem_desc_bufs);
@@ -1794,8 +1797,9 @@
 	while (--i >= 0) {
 		size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
 
-		dma_free_coherent(dev->dev, size, bufs[i],
-				le64_to_cpu(descs[i].addr));
+		dma_free_attrs(dev->dev, size, bufs[i],
+			       le64_to_cpu(descs[i].addr),
+			       DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
 	}
 
 	kfree(bufs);
@@ -2256,6 +2260,27 @@
 	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
 		nvme_dev_disable(dev, false);
 
+	mutex_lock(&dev->shutdown_lock);
+	result = nvme_pci_enable(dev);
+	if (result)
+		goto out_unlock;
+
+	result = nvme_pci_configure_admin_queue(dev);
+	if (result)
+		goto out_unlock;
+
+	result = nvme_alloc_admin_tags(dev);
+	if (result)
+		goto out_unlock;
+
+	/*
+	 * Limit the max command size to prevent iod->sg allocations going
+	 * over a single page.
+	 */
+	dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+	dev->ctrl.max_segments = NVME_MAX_SEGS;
+	mutex_unlock(&dev->shutdown_lock);
+
 	/*
 	 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
 	 * initializing procedure here.
@@ -2266,25 +2291,6 @@
 		goto out;
 	}
 
-	result = nvme_pci_enable(dev);
-	if (result)
-		goto out;
-
-	result = nvme_pci_configure_admin_queue(dev);
-	if (result)
-		goto out;
-
-	result = nvme_alloc_admin_tags(dev);
-	if (result)
-		goto out;
-
-	/*
-	 * Limit the max command size to prevent iod->sg allocations going
-	 * over a single page.
-	 */
-	dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
-	dev->ctrl.max_segments = NVME_MAX_SEGS;
-
 	result = nvme_init_identify(&dev->ctrl);
 	if (result)
 		goto out;
@@ -2348,6 +2354,8 @@
 	nvme_start_ctrl(&dev->ctrl);
 	return;
 
+ out_unlock:
+	mutex_unlock(&dev->shutdown_lock);
  out:
 	nvme_remove_dead_ctrl(dev, result);
 }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index b6a28de..0939a4e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1672,18 +1672,28 @@
 nvme_rdma_timeout(struct request *rq, bool reserved)
 {
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+	struct nvme_rdma_queue *queue = req->queue;
+	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
 
-	dev_warn(req->queue->ctrl->ctrl.device,
-		 "I/O %d QID %d timeout, reset controller\n",
-		 rq->tag, nvme_rdma_queue_idx(req->queue));
+	dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
+		 rq->tag, nvme_rdma_queue_idx(queue));
 
-	/* queue error recovery */
-	nvme_rdma_error_recovery(req->queue->ctrl);
+	if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+		/*
+		 * Teardown immediately if controller times out while starting
+		 * or we are already started error recovery. all outstanding
+		 * requests are completed on shutdown, so we return BLK_EH_DONE.
+		 */
+		flush_work(&ctrl->err_work);
+		nvme_rdma_teardown_io_queues(ctrl, false);
+		nvme_rdma_teardown_admin_queue(ctrl, false);
+		return BLK_EH_DONE;
+	}
 
-	/* fail with DNR on cmd timeout */
-	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+	dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+	nvme_rdma_error_recovery(ctrl);
 
-	return BLK_EH_DONE;
+	return BLK_EH_RESET_TIMER;
 }
 
 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index e57f390..08f997a 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -139,6 +139,10 @@
 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
+				struct nvmet_rdma_rsp *r);
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
+				struct nvmet_rdma_rsp *r);
 
 static const struct nvmet_fabrics_ops nvmet_rdma_ops;
 
@@ -182,9 +186,17 @@
 	spin_unlock_irqrestore(&queue->rsps_lock, flags);
 
 	if (unlikely(!rsp)) {
-		rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+		int ret;
+
+		rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
 		if (unlikely(!rsp))
 			return NULL;
+		ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
+		if (unlikely(ret)) {
+			kfree(rsp);
+			return NULL;
+		}
+
 		rsp->allocated = true;
 	}
 
@@ -196,7 +208,8 @@
 {
 	unsigned long flags;
 
-	if (rsp->allocated) {
+	if (unlikely(rsp->allocated)) {
+		nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
 		kfree(rsp);
 		return;
 	}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 70f5fd0..3f21ea6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -115,9 +115,6 @@
 }
 #endif
 
-static struct device_node **phandle_cache;
-static u32 phandle_cache_mask;
-
 /*
  * Assumptions behind phandle_cache implementation:
  *   - phandle property values are in a contiguous range of 1..n
@@ -126,6 +123,66 @@
  *   - the phandle lookup overhead reduction provided by the cache
  *     will likely be less
  */
+
+static struct device_node **phandle_cache;
+static u32 phandle_cache_mask;
+
+/*
+ * Caller must hold devtree_lock.
+ */
+static void __of_free_phandle_cache(void)
+{
+	u32 cache_entries = phandle_cache_mask + 1;
+	u32 k;
+
+	if (!phandle_cache)
+		return;
+
+	for (k = 0; k < cache_entries; k++)
+		of_node_put(phandle_cache[k]);
+
+	kfree(phandle_cache);
+	phandle_cache = NULL;
+}
+
+int of_free_phandle_cache(void)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&devtree_lock, flags);
+
+	__of_free_phandle_cache();
+
+	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+	return 0;
+}
+#if !defined(CONFIG_MODULES)
+late_initcall_sync(of_free_phandle_cache);
+#endif
+
+/*
+ * Caller must hold devtree_lock.
+ */
+void __of_free_phandle_cache_entry(phandle handle)
+{
+	phandle masked_handle;
+	struct device_node *np;
+
+	if (!handle)
+		return;
+
+	masked_handle = handle & phandle_cache_mask;
+
+	if (phandle_cache) {
+		np = phandle_cache[masked_handle];
+		if (np && handle == np->phandle) {
+			of_node_put(np);
+			phandle_cache[masked_handle] = NULL;
+		}
+	}
+}
+
 void of_populate_phandle_cache(void)
 {
 	unsigned long flags;
@@ -135,8 +192,7 @@
 
 	raw_spin_lock_irqsave(&devtree_lock, flags);
 
-	kfree(phandle_cache);
-	phandle_cache = NULL;
+	__of_free_phandle_cache();
 
 	for_each_of_allnodes(np)
 		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
@@ -154,30 +210,15 @@
 		goto out;
 
 	for_each_of_allnodes(np)
-		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
+		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
+			of_node_get(np);
 			phandle_cache[np->phandle & phandle_cache_mask] = np;
+		}
 
 out:
 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 }
 
-int of_free_phandle_cache(void)
-{
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&devtree_lock, flags);
-
-	kfree(phandle_cache);
-	phandle_cache = NULL;
-
-	raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
-	return 0;
-}
-#if !defined(CONFIG_MODULES)
-late_initcall_sync(of_free_phandle_cache);
-#endif
-
 void __init of_core_init(void)
 {
 	struct device_node *np;
@@ -1150,13 +1191,23 @@
 		if (phandle_cache[masked_handle] &&
 		    handle == phandle_cache[masked_handle]->phandle)
 			np = phandle_cache[masked_handle];
+		if (np && of_node_check_flag(np, OF_DETACHED)) {
+			WARN_ON(1); /* did not uncache np on node removal */
+			of_node_put(np);
+			phandle_cache[masked_handle] = NULL;
+			np = NULL;
+		}
 	}
 
 	if (!np) {
 		for_each_of_allnodes(np)
-			if (np->phandle == handle) {
-				if (phandle_cache)
+			if (np->phandle == handle &&
+			    !of_node_check_flag(np, OF_DETACHED)) {
+				if (phandle_cache) {
+					/* will put when removed from cache */
+					of_node_get(np);
 					phandle_cache[masked_handle] = np;
+				}
 				break;
 			}
 	}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 40b9051..2587428 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -221,7 +221,8 @@
 		return -ENODEV;
 
 	/* Name & Type */
-	csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
+	/* %p eats all alphanum characters, so %c must be used here */
+	csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T',
 			 dev->of_node->type);
 	tsize = csize;
 	len -= csize;
@@ -300,7 +301,7 @@
 	if ((!dev) || (!dev->of_node))
 		return;
 
-	add_uevent_var(env, "OF_NAME=%s", dev->of_node->name);
+	add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
 	add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
 	if (dev->of_node->type && strcmp("<NULL>", dev->of_node->type) != 0)
 		add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index f4f8ed9..45c0b1f 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -268,13 +268,13 @@
 	}
 
 	of_node_set_flag(np, OF_DETACHED);
+
+	/* race with of_find_node_by_phandle() prevented by devtree_lock */
+	__of_free_phandle_cache_entry(np->phandle);
 }
 
 /**
  * of_detach_node() - "Unplug" a node from the device tree.
- *
- * The caller must hold a reference to the node.  The memory associated with
- * the node is not freed until its refcount goes to zero.
  */
 int of_detach_node(struct device_node *np)
 {
@@ -330,6 +330,25 @@
 	if (!of_node_check_flag(node, OF_DYNAMIC))
 		return;
 
+	if (of_node_check_flag(node, OF_OVERLAY)) {
+
+		if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
+			/* premature refcount of zero, do not free memory */
+			pr_err("ERROR: memory leak before free overlay changeset,  %pOF\n",
+			       node);
+			return;
+		}
+
+		/*
+		 * If node->properties non-empty then properties were added
+		 * to this node either by different overlay that has not
+		 * yet been removed, or by a non-overlay mechanism.
+		 */
+		if (node->properties)
+			pr_err("ERROR: %s(), unexpected properties in %pOF\n",
+			       __func__, node);
+	}
+
 	property_list_free(node->properties);
 	property_list_free(node->deadprops);
 
@@ -434,6 +453,16 @@
 
 static void __of_changeset_entry_destroy(struct of_changeset_entry *ce)
 {
+	if (ce->action == OF_RECONFIG_ATTACH_NODE &&
+	    of_node_check_flag(ce->np, OF_OVERLAY)) {
+		if (kref_read(&ce->np->kobj.kref) > 1) {
+			pr_err("ERROR: memory leak, expected refcount 1 instead of %d, of_node_get()/of_node_put() unbalanced - destroy cset entry: attach overlay node %pOF\n",
+			       kref_read(&ce->np->kobj.kref), ce->np);
+		} else {
+			of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET);
+		}
+	}
+
 	of_node_put(ce->np);
 	list_del(&ce->node);
 	kfree(ce);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 02ad93a..e12f274 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -274,6 +274,135 @@
 }
 EXPORT_SYMBOL_GPL(of_irq_parse_raw);
 
+int of_irq_domain_map(const struct irq_fwspec *in, struct irq_fwspec *out)
+{
+	char *stem_name;
+	char *cells_name, *map_name = NULL, *mask_name = NULL;
+	char *pass_name = NULL;
+	struct device_node *cur, *new = NULL;
+	const __be32 *map, *mask, *pass;
+	static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
+	static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
+	__be32 initial_match_array[MAX_PHANDLE_ARGS];
+	const __be32 *match_array = initial_match_array;
+	int i, ret, map_len, match;
+	u32 in_size, out_size;
+
+	stem_name = "";
+	cells_name = "#interrupt-cells";
+
+	ret = -ENOMEM;
+	map_name = kasprintf(GFP_KERNEL, "irqdomain%s-map", stem_name);
+	if (!map_name)
+		goto free;
+
+	mask_name = kasprintf(GFP_KERNEL, "irqdomain%s-map-mask", stem_name);
+	if (!mask_name)
+		goto free;
+
+	pass_name = kasprintf(GFP_KERNEL, "irqdomain%s-map-pass-thru", stem_name);
+	if (!pass_name)
+		goto free;
+
+	/* Get the #interrupt-cells property */
+	cur = to_of_node(in->fwnode);
+	ret = of_property_read_u32(cur, cells_name, &in_size);
+	if (ret < 0)
+		goto put;
+
+	/* Precalculate the match array - this simplifies match loop */
+	for (i = 0; i < in_size; i++)
+		initial_match_array[i] = cpu_to_be32(in->param[i]);
+
+	ret = -EINVAL;
+	/* Get the irqdomain-map property */
+	map = of_get_property(cur, map_name, &map_len);
+	if (!map) {
+		ret = 0;
+		goto free;
+	}
+	map_len /= sizeof(u32);
+
+	/* Get the irqdomain-map-mask property (optional) */
+	mask = of_get_property(cur, mask_name, NULL);
+	if (!mask)
+		mask = dummy_mask;
+	/* Iterate through irqdomain-map property */
+	match = 0;
+	while (map_len > (in_size + 1) && !match) {
+		/* Compare specifiers */
+		match = 1;
+		for (i = 0; i < in_size; i++, map_len--)
+			match &= !((match_array[i] ^ *map++) & mask[i]);
+
+		of_node_put(new);
+		new = of_find_node_by_phandle(be32_to_cpup(map));
+		map++;
+		map_len--;
+
+		/* Check if not found */
+		if (!new)
+			goto put;
+
+		if (!of_device_is_available(new))
+			match = 0;
+
+		ret = of_property_read_u32(new, cells_name, &out_size);
+		if (ret)
+			goto put;
+
+		/* Check for malformed properties */
+		if (WARN_ON(out_size > MAX_PHANDLE_ARGS))
+			goto put;
+		if (map_len < out_size)
+			goto put;
+
+		/* Move forward by new node's #interrupt-cells amount */
+		map += out_size;
+		map_len -= out_size;
+	}
+
+	if (!match) {
+		ret = -EINVAL;
+		goto put;
+	}
+
+	/* Get the irqdomain-map-pass-thru property (optional) */
+	pass = of_get_property(cur, pass_name, NULL);
+	if (!pass)
+		pass = dummy_pass;
+
+	/*
+	 * Successfully parsed a irqdomain-map translation; copy new
+	 * specifier into the out structure, keeping the
+	 * bits specified in irqdomain-map-pass-thru.
+	 */
+	match_array = map - out_size;
+	for (i = 0; i < out_size; i++) {
+		__be32 val = *(map - out_size + i);
+
+		out->param[i] = in->param[i];
+		if (i < in_size) {
+			val &= ~pass[i];
+			val |= cpu_to_be32(out->param[i]) & pass[i];
+		}
+
+		out->param[i] = be32_to_cpu(val);
+	}
+	out->param_count = in_size = out_size;
+	out->fwnode = of_node_to_fwnode(new);
+put:
+	of_node_put(cur);
+	of_node_put(new);
+free:
+	kfree(mask_name);
+	kfree(map_name);
+	kfree(pass_name);
+
+	return ret;
+}
+EXPORT_SYMBOL(of_irq_domain_map);
+
 /**
  * of_irq_parse_one - Resolve an interrupt for a device
  * @device: the device whose interrupt is to be resolved
@@ -350,6 +479,8 @@
 int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 {
 	int irq = of_irq_get(dev, index);
+	u32 trigger_type;
+	struct of_phandle_args oirq;
 
 	if (irq < 0)
 		return irq;
@@ -367,8 +498,17 @@
 		of_property_read_string_index(dev, "interrupt-names", index,
 					      &name);
 
+		trigger_type = irqd_get_trigger_type(irq_get_irq_data(irq));
+
+		of_irq_parse_one(dev, index, &oirq);
+
+		if (!trigger_type &&
+			of_device_is_compatible(oirq.np, "arm,gic-v3"))
+			pr_err("IRQ TYPE should not be NONE for %s\n",
+							dev->full_name);
+
 		r->start = r->end = irq;
-		r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq));
+		r->flags = IORESOURCE_IRQ | trigger_type;
 		r->name = name ? name : of_node_full_name(dev);
 	}
 
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index 7a0a189..c72eef9 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -133,6 +133,9 @@
 	}
 	if (!name)
 		return -ENOMEM;
+
+	of_node_get(np);
+
 	rc = kobject_add(&np->kobj, parent, "%s", name);
 	kfree(name);
 	if (rc)
@@ -159,6 +162,5 @@
 		kobject_del(&np->kobj);
 	}
 
-	/* finally remove the kobj_init ref */
 	of_node_put(np);
 }
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index e92391d..5ad1342 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -97,8 +97,8 @@
 		return rc;
 	}
 
-	dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
-		child->name, addr);
+	dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
+		child, addr);
 	return 0;
 }
 
@@ -127,8 +127,8 @@
 		return rc;
 	}
 
-	dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
-		child->name, addr);
+	dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n",
+		child, addr);
 	return 0;
 }
 
@@ -263,8 +263,8 @@
 				continue;
 
 			/* be noisy to encourage people to set reg property */
-			dev_info(&mdio->dev, "scan phy %s at address %i\n",
-				 child->name, addr);
+			dev_info(&mdio->dev, "scan phy %pOFn at address %i\n",
+				 child, addr);
 
 			if (of_mdiobus_child_is_phy(child)) {
 				rc = of_mdiobus_register_phy(mdio, child, addr);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 2411ed3..f5b4522 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -168,8 +168,8 @@
 		np = of_get_next_parent(np);
 	}
 	if (np && r)
-		pr_warn("Invalid \"numa-node-id\" property in node %s\n",
-			np->name);
+		pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n",
+			np);
 	of_node_put(np);
 
 	/*
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 216175d..f5da842 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -76,6 +76,10 @@
 int of_resolve_phandles(struct device_node *tree);
 #endif
 
+#if defined(CONFIG_OF_DYNAMIC)
+void __of_free_phandle_cache_entry(phandle handle);
+#endif
+
 #if defined(CONFIG_OF_OVERLAY)
 void of_overlay_mutex_lock(void);
 void of_overlay_mutex_unlock(void);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index eda57ef..9808aae 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -24,6 +24,26 @@
 #include "of_private.h"
 
 /**
+ * struct target - info about current target node as recursing through overlay
+ * @np:			node where current level of overlay will be applied
+ * @in_livetree:	@np is a node in the live devicetree
+ *
+ * Used in the algorithm to create the portion of a changeset that describes
+ * an overlay fragment, which is a devicetree subtree.  Initially @np is a node
+ * in the live devicetree where the overlay subtree is targeted to be grafted
+ * into.  When recursing to the next level of the overlay subtree, the target
+ * also recurses to the next level of the live devicetree, as long as overlay
+ * subtree node also exists in the live devicetree.  When a node in the overlay
+ * subtree does not exist at the same level in the live devicetree, target->np
+ * points to a newly allocated node, and all subsequent targets in the subtree
+ * will be newly allocated nodes.
+ */
+struct target {
+	struct device_node *np;
+	bool in_livetree;
+};
+
+/**
  * struct fragment - info about fragment nodes in overlay expanded device tree
  * @target:	target of the overlay operation
  * @overlay:	pointer to the __overlay__ node
@@ -72,8 +92,7 @@
 }
 
 static int build_changeset_next_level(struct overlay_changeset *ovcs,
-		struct device_node *target_node,
-		const struct device_node *overlay_node);
+		struct target *target, const struct device_node *overlay_node);
 
 /*
  * of_resolve_phandles() finds the largest phandle in the live tree.
@@ -257,14 +276,17 @@
 /**
  * add_changeset_property() - add @overlay_prop to overlay changeset
  * @ovcs:		overlay changeset
- * @target_node:	where to place @overlay_prop in live tree
+ * @target:		where @overlay_prop will be placed
  * @overlay_prop:	property to add or update, from overlay tree
  * @is_symbols_prop:	1 if @overlay_prop is from node "/__symbols__"
  *
- * If @overlay_prop does not already exist in @target_node, add changeset entry
- * to add @overlay_prop in @target_node, else add changeset entry to update
+ * If @overlay_prop does not already exist in live devicetree, add changeset
+ * entry to add @overlay_prop in @target, else add changeset entry to update
  * value of @overlay_prop.
  *
+ * @target may be either in the live devicetree or in a new subtree that
+ * is contained in the changeset.
+ *
  * Some special properties are not updated (no error returned).
  *
  * Update of property in symbols node is not allowed.
@@ -273,20 +295,22 @@
  * invalid @overlay.
  */
 static int add_changeset_property(struct overlay_changeset *ovcs,
-		struct device_node *target_node,
-		struct property *overlay_prop,
+		struct target *target, struct property *overlay_prop,
 		bool is_symbols_prop)
 {
 	struct property *new_prop = NULL, *prop;
 	int ret = 0;
 
-	prop = of_find_property(target_node, overlay_prop->name, NULL);
-
 	if (!of_prop_cmp(overlay_prop->name, "name") ||
 	    !of_prop_cmp(overlay_prop->name, "phandle") ||
 	    !of_prop_cmp(overlay_prop->name, "linux,phandle"))
 		return 0;
 
+	if (target->in_livetree)
+		prop = of_find_property(target->np, overlay_prop->name, NULL);
+	else
+		prop = NULL;
+
 	if (is_symbols_prop) {
 		if (prop)
 			return -EINVAL;
@@ -299,10 +323,10 @@
 		return -ENOMEM;
 
 	if (!prop)
-		ret = of_changeset_add_property(&ovcs->cset, target_node,
+		ret = of_changeset_add_property(&ovcs->cset, target->np,
 						new_prop);
 	else
-		ret = of_changeset_update_property(&ovcs->cset, target_node,
+		ret = of_changeset_update_property(&ovcs->cset, target->np,
 						   new_prop);
 
 	if (ret) {
@@ -315,14 +339,14 @@
 
 /**
  * add_changeset_node() - add @node (and children) to overlay changeset
- * @ovcs:		overlay changeset
- * @target_node:	where to place @node in live tree
- * @node:		node from within overlay device tree fragment
+ * @ovcs:	overlay changeset
+ * @target:	where @node will be placed in live tree or changeset
+ * @node:	node from within overlay device tree fragment
  *
- * If @node does not already exist in @target_node, add changeset entry
- * to add @node in @target_node.
+ * If @node does not already exist in @target, add changeset entry
+ * to add @node in @target.
  *
- * If @node already exists in @target_node, and the existing node has
+ * If @node already exists in @target, and the existing node has
  * a phandle, the overlay node is not allowed to have a phandle.
  *
  * If @node has child nodes, add the children recursively via
@@ -355,36 +379,46 @@
  * invalid @overlay.
  */
 static int add_changeset_node(struct overlay_changeset *ovcs,
-		struct device_node *target_node, struct device_node *node)
+		struct target *target, struct device_node *node)
 {
 	const char *node_kbasename;
 	struct device_node *tchild;
+	struct target target_child;
 	int ret = 0;
 
 	node_kbasename = kbasename(node->full_name);
 
-	for_each_child_of_node(target_node, tchild)
+	for_each_child_of_node(target->np, tchild)
 		if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name)))
 			break;
 
 	if (!tchild) {
-		tchild = __of_node_dup(node, node_kbasename);
+		tchild = __of_node_dup(NULL, node_kbasename);
 		if (!tchild)
 			return -ENOMEM;
 
-		tchild->parent = target_node;
+		tchild->parent = target->np;
+		of_node_set_flag(tchild, OF_OVERLAY);
 
 		ret = of_changeset_attach_node(&ovcs->cset, tchild);
 		if (ret)
 			return ret;
 
-		return build_changeset_next_level(ovcs, tchild, node);
+		target_child.np = tchild;
+		target_child.in_livetree = false;
+
+		ret = build_changeset_next_level(ovcs, &target_child, node);
+		of_node_put(tchild);
+		return ret;
 	}
 
-	if (node->phandle && tchild->phandle)
+	if (node->phandle && tchild->phandle) {
 		ret = -EINVAL;
-	else
-		ret = build_changeset_next_level(ovcs, tchild, node);
+	} else {
+		target_child.np = tchild;
+		target_child.in_livetree = target->in_livetree;
+		ret = build_changeset_next_level(ovcs, &target_child, node);
+	}
 	of_node_put(tchild);
 
 	return ret;
@@ -393,7 +427,7 @@
 /**
  * build_changeset_next_level() - add level of overlay changeset
  * @ovcs:		overlay changeset
- * @target_node:	where to place @overlay_node in live tree
+ * @target:		where to place @overlay_node in live tree
  * @overlay_node:	node from within an overlay device tree fragment
  *
  * Add the properties (if any) and nodes (if any) from @overlay_node to the
@@ -406,27 +440,26 @@
  * invalid @overlay_node.
  */
 static int build_changeset_next_level(struct overlay_changeset *ovcs,
-		struct device_node *target_node,
-		const struct device_node *overlay_node)
+		struct target *target, const struct device_node *overlay_node)
 {
 	struct device_node *child;
 	struct property *prop;
 	int ret;
 
 	for_each_property_of_node(overlay_node, prop) {
-		ret = add_changeset_property(ovcs, target_node, prop, 0);
+		ret = add_changeset_property(ovcs, target, prop, 0);
 		if (ret) {
 			pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
-				 target_node, prop->name, ret);
+				 target->np, prop->name, ret);
 			return ret;
 		}
 	}
 
 	for_each_child_of_node(overlay_node, child) {
-		ret = add_changeset_node(ovcs, target_node, child);
+		ret = add_changeset_node(ovcs, target, child);
 		if (ret) {
-			pr_debug("Failed to apply node @%pOF/%s, err=%d\n",
-				 target_node, child->name, ret);
+			pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
+				 target->np, child, ret);
 			of_node_put(child);
 			return ret;
 		}
@@ -439,17 +472,17 @@
  * Add the properties from __overlay__ node to the @ovcs->cset changeset.
  */
 static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
-		struct device_node *target_node,
+		struct target *target,
 		const struct device_node *overlay_symbols_node)
 {
 	struct property *prop;
 	int ret;
 
 	for_each_property_of_node(overlay_symbols_node, prop) {
-		ret = add_changeset_property(ovcs, target_node, prop, 1);
+		ret = add_changeset_property(ovcs, target, prop, 1);
 		if (ret) {
 			pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
-				 target_node, prop->name, ret);
+				 target->np, prop->name, ret);
 			return ret;
 		}
 	}
@@ -472,6 +505,7 @@
 static int build_changeset(struct overlay_changeset *ovcs)
 {
 	struct fragment *fragment;
+	struct target target;
 	int fragments_count, i, ret;
 
 	/*
@@ -486,7 +520,9 @@
 	for (i = 0; i < fragments_count; i++) {
 		fragment = &ovcs->fragments[i];
 
-		ret = build_changeset_next_level(ovcs, fragment->target,
+		target.np = fragment->target;
+		target.in_livetree = true;
+		ret = build_changeset_next_level(ovcs, &target,
 						 fragment->overlay);
 		if (ret) {
 			pr_debug("apply failed '%pOF'\n", fragment->target);
@@ -496,7 +532,10 @@
 
 	if (ovcs->symbols_fragment) {
 		fragment = &ovcs->fragments[ovcs->count - 1];
-		ret = build_changeset_symbols_node(ovcs, fragment->target,
+
+		target.np = fragment->target;
+		target.in_livetree = true;
+		ret = build_changeset_symbols_node(ovcs, &target,
 						   fragment->overlay);
 		if (ret) {
 			pr_debug("apply failed '%pOF'\n", fragment->target);
@@ -514,7 +553,7 @@
  * 1) "target" property containing the phandle of the target
  * 2) "target-path" property containing the path of the target
  */
-static struct device_node *find_target_node(struct device_node *info_node)
+static struct device_node *find_target(struct device_node *info_node)
 {
 	struct device_node *node;
 	const char *path;
@@ -620,7 +659,7 @@
 
 		fragment = &fragments[cnt];
 		fragment->overlay = overlay_node;
-		fragment->target = find_target_node(node);
+		fragment->target = find_target(node);
 		if (!fragment->target) {
 			of_node_put(fragment->overlay);
 			ret = -EINVAL;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 7bd0af3..7c4abf5 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -92,8 +92,8 @@
 		 */
 		reg = of_get_property(node, "reg", NULL);
 		if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
-			dev_set_name(dev, dev_name(dev) ? "%llx.%s:%s" : "%llx.%s",
-				     (unsigned long long)addr, node->name,
+			dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
+				     (unsigned long long)addr, node,
 				     dev_name(dev));
 			return;
 		}
@@ -143,8 +143,8 @@
 			WARN_ON(rc);
 		}
 		if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
-			pr_debug("not all legacy IRQ resources mapped for %s\n",
-				 np->name);
+			pr_debug("not all legacy IRQ resources mapped for %pOFn\n",
+				 np);
 	}
 
 	dev->dev.of_node = of_node_get(np);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index f46828e..43720c2d 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -806,6 +806,7 @@
 
 	if (!of_device_is_available(remote)) {
 		pr_debug("not available for remote node\n");
+		of_node_put(remote);
 		return NULL;
 	}
 
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 41b4971..7f42314 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -212,8 +212,8 @@
 
 	for_each_child_of_node(np, child) {
 		if (child->parent != np) {
-			pr_err("Child node %s links to wrong parent %s\n",
-				 child->name, np->name);
+			pr_err("Child node %pOFn links to wrong parent %pOFn\n",
+				 child, np);
 			rc = -EINVAL;
 			goto put_child;
 		}
@@ -1046,16 +1046,16 @@
 	for_each_child_of_node(np, child) {
 		for_each_child_of_node(child, grandchild)
 			unittest(of_find_device_by_node(grandchild),
-				 "Could not create device for node '%s'\n",
-				 grandchild->name);
+				 "Could not create device for node '%pOFn'\n",
+				 grandchild);
 	}
 
 	of_platform_depopulate(&test_bus->dev);
 	for_each_child_of_node(np, child) {
 		for_each_child_of_node(child, grandchild)
 			unittest(!of_find_device_by_node(grandchild),
-				 "device didn't get destroyed '%s'\n",
-				 grandchild->name);
+				 "device didn't get destroyed '%pOFn'\n",
+				 grandchild);
 	}
 
 	platform_device_unregister(test_bus);
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 31ff03d..f3433bf 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -191,12 +191,12 @@
 	if (IS_ERR(opp_table))
 		return 0;
 
-	count = opp_table->regulator_count;
-
 	/* Regulator may not be required for the device */
-	if (!count)
+	if (!opp_table->regulators)
 		goto put_opp_table;
 
+	count = opp_table->regulator_count;
+
 	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
 	if (!uV)
 		goto put_opp_table;
@@ -976,6 +976,9 @@
 	struct regulator *reg;
 	int i;
 
+	if (!opp_table->regulators)
+		return true;
+
 	for (i = 0; i < opp_table->regulator_count; i++) {
 		reg = opp_table->regulators[i];
 
@@ -1263,7 +1266,7 @@
 	struct dev_pm_set_opp_data *data;
 	int len, count = opp_table->regulator_count;
 
-	if (WARN_ON(!count))
+	if (WARN_ON(!opp_table->regulators))
 		return -EINVAL;
 
 	/* space for set_opp_data */
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 975050a..3826b44 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -66,6 +66,7 @@
 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
 
 /* PCIe Root Complex registers (memory-mapped) */
+#define PCIE_RC_IMX6_MSI_CAP			0x50
 #define PCIE_RC_LCR				0x7c
 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1
 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2	0x2
@@ -682,6 +683,7 @@
 	struct resource *dbi_base;
 	struct device_node *node = dev->of_node;
 	int ret;
+	u16 val;
 
 	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
 	if (!imx6_pcie)
@@ -816,6 +818,14 @@
 	if (ret < 0)
 		return ret;
 
+	if (pci_msi_enabled()) {
+		val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
+					PCI_MSI_FLAGS);
+		val |= PCI_MSI_FLAGS_ENABLE;
+		dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
+				   val);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 29a0575..0fa9e8f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -99,9 +99,6 @@
 					       (i * MAX_MSI_IRQS_PER_CTRL) +
 					       pos);
 			generic_handle_irq(irq);
-			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
-						(i * MSI_REG_CTRL_BLOCK_SIZE),
-					    4, 1 << pos);
 			pos++;
 		}
 	}
@@ -168,8 +165,8 @@
 		bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
 
 		pp->irq_status[ctrl] &= ~(1 << bit);
-		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
-				    pp->irq_status[ctrl]);
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+				    ~pp->irq_status[ctrl]);
 	}
 
 	raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -191,8 +188,8 @@
 		bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
 
 		pp->irq_status[ctrl] |= 1 << bit;
-		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
-				    pp->irq_status[ctrl]);
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+				    ~pp->irq_status[ctrl]);
 	}
 
 	raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -200,13 +197,22 @@
 
 static void dw_pci_bottom_ack(struct irq_data *d)
 {
-	struct msi_desc *msi = irq_data_get_msi_desc(d);
-	struct pcie_port *pp;
+	struct pcie_port *pp  = irq_data_get_irq_chip_data(d);
+	unsigned int res, bit, ctrl;
+	unsigned long flags;
 
-	pp = msi_desc_to_pci_sysdata(msi);
+	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
 
 	if (pp->ops->msi_irq_ack)
 		pp->ops->msi_irq_ack(d->hwirq, pp);
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
 }
 
 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -658,10 +664,15 @@
 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
 
 	/* Initialize IRQ Status array */
-	for (ctrl = 0; ctrl < num_ctrls; ctrl++)
-		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
 					(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
-				    4, &pp->irq_status[ctrl]);
+				    4, ~0);
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+					(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+				    4, ~0);
+		pp->irq_status[ctrl] = 0;
+	}
 
 	/* Setup RC BARs */
 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
diff --git a/drivers/pci/controller/pci-msm-msi.c b/drivers/pci/controller/pci-msm-msi.c
index 782140b..4aabcfd 100644
--- a/drivers/pci/controller/pci-msm-msi.c
+++ b/drivers/pci/controller/pci-msm-msi.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
 
 #include <linux/interrupt.h>
 #include <linux/iommu.h>
@@ -55,10 +55,42 @@
 	chained_irq_exit(chip, desc);
 }
 
+static void msm_msi_mask_irq(struct irq_data *data)
+{
+	struct irq_data *parent_data;
+
+	if (!data->parent_data)
+		return;
+
+	parent_data = irq_get_irq_data(data->parent_data->hwirq);
+	if (!parent_data || !parent_data->chip)
+		return;
+
+	pci_msi_mask_irq(data);
+	parent_data->chip->irq_mask(parent_data);
+}
+
+static void msm_msi_unmask_irq(struct irq_data *data)
+{
+	struct irq_data *parent_data;
+
+	if (!data->parent_data)
+		return;
+
+	parent_data = irq_get_irq_data(data->parent_data->hwirq);
+	if (!parent_data || !parent_data->chip)
+		return;
+
+	parent_data->chip->irq_unmask(parent_data);
+	pci_msi_unmask_irq(data);
+}
+
 static struct irq_chip msm_msi_irq_chip = {
 	.name = "msm_pci_msi",
-	.irq_mask = pci_msi_mask_irq,
-	.irq_unmask = pci_msi_unmask_irq,
+	.irq_enable = msm_msi_unmask_irq,
+	.irq_disable = msm_msi_mask_irq,
+	.irq_mask = msm_msi_mask_irq,
+	.irq_unmask = msm_msi_unmask_irq,
 };
 
 static int msm_msi_domain_prepare(struct irq_domain *domain, struct device *dev,
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
index d25a75e..c1d3850 100644
--- a/drivers/pci/controller/pci-msm.c
+++ b/drivers/pci/controller/pci-msm.c
@@ -551,6 +551,7 @@
 	uint32_t wr_halt_size;
 	uint32_t slv_addr_space_size;
 	uint32_t phy_status_offset;
+	uint32_t phy_status_bit;
 	uint32_t phy_power_down_offset;
 	uint32_t cpl_timeout;
 	uint32_t current_bdf;
@@ -1197,6 +1198,8 @@
 		dev->slv_addr_space_size);
 	PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n",
 		dev->phy_status_offset);
+	PCIE_DBG_FS(dev, "phy_status_bit: %u\n",
+		dev->phy_status_bit);
 	PCIE_DBG_FS(dev, "phy_power_down_offset: 0x%x\n",
 		dev->phy_power_down_offset);
 	PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
@@ -3217,7 +3220,8 @@
 	if (dev->rumi)
 		return true;
 
-	if (readl_relaxed(dev->phy + dev->phy_status_offset) & BIT(6))
+	if (readl_relaxed(dev->phy + dev->phy_status_offset) &
+		BIT(dev->phy_status_bit))
 		return false;
 	else
 		return true;
@@ -5717,6 +5721,11 @@
 	PCIE_DBG(pcie_dev, "RC%d: phy-status-offset: 0x%x.\n", pcie_dev->rc_idx,
 		pcie_dev->phy_status_offset);
 
+	of_property_read_u32(pdev->dev.of_node, "qcom,phy-status-bit",
+				&pcie_dev->phy_status_bit);
+	PCIE_DBG(pcie_dev, "RC%d: phy-status-bit: %u.\n", pcie_dev->rc_idx,
+		pcie_dev->phy_status_bit);
+
 	of_property_read_u32(of_node, "qcom,phy-power-down-offset",
 				&pcie_dev->phy_power_down_offset);
 	PCIE_DBG(pcie_dev, "RC%d: phy-power-down-offset: 0x%x.\n",
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index bef17c3..33f3f47 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1251,30 +1251,29 @@
 		return 0;
 	}
 
-	if (!pm || !pm->runtime_suspend)
-		return -ENOSYS;
-
 	pci_dev->state_saved = false;
-	error = pm->runtime_suspend(dev);
-	if (error) {
+	if (pm && pm->runtime_suspend) {
+		error = pm->runtime_suspend(dev);
 		/*
 		 * -EBUSY and -EAGAIN is used to request the runtime PM core
 		 * to schedule a new suspend, so log the event only with debug
 		 * log level.
 		 */
-		if (error == -EBUSY || error == -EAGAIN)
+		if (error == -EBUSY || error == -EAGAIN) {
 			dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
 				pm->runtime_suspend, error);
-		else
+			return error;
+		} else if (error) {
 			dev_err(dev, "can't suspend (%pf returned %d)\n",
 				pm->runtime_suspend, error);
-
-		return error;
+			return error;
+		}
 	}
 
 	pci_fixup_device(pci_fixup_suspend, pci_dev);
 
-	if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+	if (pm && pm->runtime_suspend
+	    && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
 	    && pci_dev->current_state != PCI_UNKNOWN) {
 		WARN_ONCE(pci_dev->current_state != prev,
 			"PCI PM: State of device not saved by %pF\n",
@@ -1292,7 +1291,7 @@
 
 static int pci_pm_runtime_resume(struct device *dev)
 {
-	int rc;
+	int rc = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
@@ -1306,14 +1305,12 @@
 	if (!pci_dev->driver)
 		return 0;
 
-	if (!pm || !pm->runtime_resume)
-		return -ENOSYS;
-
 	pci_fixup_device(pci_fixup_resume_early, pci_dev);
 	pci_enable_wake(pci_dev, PCI_D0, false);
 	pci_fixup_device(pci_fixup_resume, pci_dev);
 
-	rc = pm->runtime_resume(dev);
+	if (pm && pm->runtime_resume)
+		rc = pm->runtime_resume(dev);
 
 	pci_dev->runtime_d3cold = false;
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index afc4680..7eb1549 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6113,7 +6113,8 @@
 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
-				disable_acs_redir_param = str + 18;
+				disable_acs_redir_param =
+					kstrdup(str + 18, GFP_KERNEL);
 			} else {
 				printk(KERN_ERR "PCI: Unknown option `%s'\n",
 						str);
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 54a8b30..37d0c15 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -800,6 +800,7 @@
 {
 	int ret;
 	int nr_idxs;
+	unsigned int event_flags;
 	struct switchtec_ioctl_event_ctl ctl;
 
 	if (copy_from_user(&ctl, uctl, sizeof(ctl)))
@@ -821,7 +822,9 @@
 		else
 			return -EINVAL;
 
+		event_flags = ctl.flags;
 		for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
+			ctl.flags = event_flags;
 			ret = event_ctl(stdev, &ctl);
 			if (ret < 0)
 				return ret;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 54ec278..e1a77b2 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -927,6 +927,11 @@
 
 	idx = atomic_inc_return(&pmu_idx);
 	name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
+	if (!name) {
+		dev_err(dev, "failed to allocate name for pmu %d\n", idx);
+		return -ENOMEM;
+	}
+
 	return perf_pmu_register(&spe_pmu->pmu, name, -1);
 }
 
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 1b10ea0..69372e2 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -30,8 +30,8 @@
 #define DDRC_FLUX_RCMD          0x38c
 #define DDRC_PRE_CMD            0x3c0
 #define DDRC_ACT_CMD            0x3c4
-#define DDRC_BNK_CHG            0x3c8
 #define DDRC_RNK_CHG            0x3cc
+#define DDRC_RW_CHG             0x3d0
 #define DDRC_EVENT_CTRL         0x6C0
 #define DDRC_INT_MASK		0x6c8
 #define DDRC_INT_STATUS		0x6cc
@@ -51,7 +51,7 @@
 
 static const u32 ddrc_reg_off[] = {
 	DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
-	DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
+	DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
 };
 
 /*
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index d4dcd39..881078f 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -126,6 +126,7 @@
 	bool dedicated_clocks;
 	bool enable_pmu_unk1;
 	bool phy0_dual_route;
+	int missing_phys;
 };
 
 struct sun4i_usb_phy_data {
@@ -646,6 +647,9 @@
 	if (args->args[0] >= data->cfg->num_phys)
 		return ERR_PTR(-ENODEV);
 
+	if (data->cfg->missing_phys & BIT(args->args[0]))
+		return ERR_PTR(-ENODEV);
+
 	return data->phys[args->args[0]].phy;
 }
 
@@ -741,6 +745,9 @@
 		struct sun4i_usb_phy *phy = data->phys + i;
 		char name[16];
 
+		if (data->cfg->missing_phys & BIT(i))
+			continue;
+
 		snprintf(name, sizeof(name), "usb%d_vbus", i);
 		phy->vbus = devm_regulator_get_optional(dev, name);
 		if (IS_ERR(phy->vbus)) {
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c
index 6fd6e07..09a77e5 100644
--- a/drivers/phy/qualcomm/phy-ath79-usb.c
+++ b/drivers/phy/qualcomm/phy-ath79-usb.c
@@ -31,7 +31,7 @@
 
 	err = reset_control_deassert(priv->reset);
 	if (err && priv->no_suspend_override)
-		reset_control_assert(priv->no_suspend_override);
+		reset_control_deassert(priv->no_suspend_override);
 
 	return err;
 }
@@ -69,7 +69,7 @@
 	if (!priv)
 		return -ENOMEM;
 
-	priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy");
+	priv->reset = devm_reset_control_get(&pdev->dev, "phy");
 	if (IS_ERR(priv->reset))
 		return PTR_ERR(priv->reset);
 
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index 62744320..029ee04 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -221,10 +221,12 @@
 	__ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
 				   &phy_common->ref_clk_parent, false);
 
-	err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
-				   &phy_common->ref_clk);
-	if (err)
-		goto out;
+	/*
+	 * Some platforms may not have the ON/OFF control for reference clock,
+	 * hence this clock may be optional.
+	 */
+	__ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
+				   &phy_common->ref_clk, false);
 
 	/*
 	 * "ref_aux_clk" is optional and only supported by certain
@@ -414,11 +416,17 @@
 		}
 	}
 
-	ret = clk_prepare_enable(phy->ref_clk);
-	if (ret) {
-		dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
-				__func__, ret);
-		goto out_disable_parent;
+	/*
+	 * "ref_clk" is optional clock hence make sure that clk reference
+	 * is available before trying to enable the clock.
+	 */
+	if (phy->ref_clk) {
+		ret = clk_prepare_enable(phy->ref_clk);
+		if (ret) {
+			dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
+					__func__, ret);
+			goto out_disable_parent;
+		}
 	}
 
 	/*
@@ -482,7 +490,14 @@
 		 */
 		if (phy->ref_aux_clk)
 			clk_disable_unprepare(phy->ref_aux_clk);
-		clk_disable_unprepare(phy->ref_clk);
+
+		/*
+		 * "ref_clk" is optional clock hence make sure that clk
+		 * reference is available before trying to disable the clock.
+		 */
+		if (phy->ref_clk)
+			clk_disable_unprepare(phy->ref_clk);
+
 		/*
 		 * "ref_clk_parent" is optional clock hence make sure that clk
 		 * reference is available before trying to disable the clock.
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index fa53091..08925d2 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -90,7 +90,7 @@
 	struct gpio_chip gpio_chip;
 	struct pinctrl_gpio_range gpio_range;
 
-	spinlock_t irq_lock[BCM2835_NUM_BANKS];
+	raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
 };
 
 /* pins are just named GPIO0..GPIO53 */
@@ -461,10 +461,10 @@
 	unsigned bank = GPIO_REG_OFFSET(gpio);
 	unsigned long flags;
 
-	spin_lock_irqsave(&pc->irq_lock[bank], flags);
+	raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
 	set_bit(offset, &pc->enabled_irq_map[bank]);
 	bcm2835_gpio_irq_config(pc, gpio, true);
-	spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+	raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
 }
 
 static void bcm2835_gpio_irq_disable(struct irq_data *data)
@@ -476,12 +476,12 @@
 	unsigned bank = GPIO_REG_OFFSET(gpio);
 	unsigned long flags;
 
-	spin_lock_irqsave(&pc->irq_lock[bank], flags);
+	raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
 	bcm2835_gpio_irq_config(pc, gpio, false);
 	/* Clear events that were latched prior to clearing event sources */
 	bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
 	clear_bit(offset, &pc->enabled_irq_map[bank]);
-	spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+	raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
 }
 
 static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
@@ -584,7 +584,7 @@
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&pc->irq_lock[bank], flags);
+	raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
 
 	if (test_bit(offset, &pc->enabled_irq_map[bank]))
 		ret = __bcm2835_gpio_irq_set_type_enabled(pc, gpio, type);
@@ -596,7 +596,7 @@
 	else
 		irq_set_handler_locked(data, handle_level_irq);
 
-	spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+	raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
 
 	return ret;
 }
@@ -1047,7 +1047,7 @@
 		for_each_set_bit(offset, &events, 32)
 			bcm2835_gpio_wr(pc, GPEDS0 + i * 4, BIT(offset));
 
-		spin_lock_init(&pc->irq_lock[i]);
+		raw_spin_lock_init(&pc->irq_lock[i]);
 	}
 
 	err = gpiochip_add_data(&pc->gpio_chip, pc);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 6d31ad7..b7e272d 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1524,7 +1524,7 @@
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 			DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
-			DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{
@@ -1532,7 +1532,7 @@
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
-			DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{
@@ -1540,7 +1540,7 @@
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
-			DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{
@@ -1548,7 +1548,7 @@
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
-			DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
 		},
 	},
 	{}
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 4f3ab18..c8eff70 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -191,7 +191,8 @@
 		case PIN_CONFIG_BIAS_DISABLE:
 			dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
 
-			meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+			meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg,
+					       &bit);
 			ret = regmap_update_bits(pc->reg_pullen, reg,
 						 BIT(bit), 0);
 			if (ret)
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 8646617..e482672 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -807,7 +807,9 @@
 	"BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
 	"BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
 	"BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
+};
 
+static const char * const gpio_aobus_groups[] = {
 	"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
 	"GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
 	"GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
@@ -1030,6 +1032,7 @@
 };
 
 static struct meson_pmx_func meson8_aobus_functions[] = {
+	FUNCTION(gpio_aobus),
 	FUNCTION(uart_ao),
 	FUNCTION(remote),
 	FUNCTION(i2c_slave_ao),
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 647ad15..91cffc0 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -646,16 +646,18 @@
 	"BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
 	"BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
 
-	"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
-	"GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
-	"GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
-	"GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N",
-
 	"DIF_0_P", "DIF_0_N", "DIF_1_P", "DIF_1_N",
 	"DIF_2_P", "DIF_2_N", "DIF_3_P", "DIF_3_N",
 	"DIF_4_P", "DIF_4_N"
 };
 
+static const char * const gpio_aobus_groups[] = {
+	"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
+	"GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
+	"GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
+	"GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N"
+};
+
 static const char * const sd_a_groups[] = {
 	"sd_d0_a", "sd_d1_a", "sd_d2_a", "sd_d3_a", "sd_clk_a",
 	"sd_cmd_a"
@@ -871,6 +873,7 @@
 };
 
 static struct meson_pmx_func meson8b_aobus_functions[] = {
+	FUNCTION(gpio_aobus),
 	FUNCTION(uart_ao),
 	FUNCTION(uart_ao_b),
 	FUNCTION(i2c_slave_ao),
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index a7f3706..3d05bc1 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -34,14 +34,12 @@
 	MAX77620_PIN_PP_DRV,
 };
 
-enum max77620_pinconf_param {
-	MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
-	MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
-	MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
-	MAX77620_SUSPEND_FPS_SOURCE,
-	MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
-	MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
-};
+#define MAX77620_ACTIVE_FPS_SOURCE		(PIN_CONFIG_END + 1)
+#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS	(PIN_CONFIG_END + 2)
+#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS	(PIN_CONFIG_END + 3)
+#define MAX77620_SUSPEND_FPS_SOURCE		(PIN_CONFIG_END + 4)
+#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS	(PIN_CONFIG_END + 5)
+#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS	(PIN_CONFIG_END + 6)
 
 struct max77620_pin_function {
 	const char *name;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index cf73a40..cecbce2 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -832,8 +832,13 @@
 		break;
 
 	case MCP_TYPE_S18:
+		one_regmap_config =
+			devm_kmemdup(dev, &mcp23x17_regmap,
+				sizeof(struct regmap_config), GFP_KERNEL);
+		if (!one_regmap_config)
+			return -ENOMEM;
 		mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
-					       &mcp23x17_regmap);
+					       one_regmap_config);
 		mcp->reg_shift = 1;
 		mcp->chip.ngpio = 16;
 		mcp->chip.label = "mcp23s18";
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index cbf58a1..4d87d75 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -1166,7 +1166,6 @@
 	}
 
 	/* Register GPIO controller */
-	pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
 	pctl->gpio.base = -1;
 	pctl->gpio.ngpio = pctl->data->npins;
 	pctl->gpio.get_direction = sx150x_gpio_get_direction;
@@ -1180,6 +1179,10 @@
 	pctl->gpio.of_node = dev->of_node;
 #endif
 	pctl->gpio.can_sleep = true;
+	pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
+	if (!pctl->gpio.label)
+		return -ENOMEM;
+
 	/*
 	 * Setting multiple pins is not safe when all pins are not
 	 * handled by the same regmap register. The oscio pin (present
@@ -1200,13 +1203,15 @@
 
 	/* Add Interrupt support if an irq is specified */
 	if (client->irq > 0) {
-		pctl->irq_chip.name = devm_kstrdup(dev, client->name,
-						   GFP_KERNEL);
 		pctl->irq_chip.irq_mask = sx150x_irq_mask;
 		pctl->irq_chip.irq_unmask = sx150x_irq_unmask;
 		pctl->irq_chip.irq_set_type = sx150x_irq_set_type;
 		pctl->irq_chip.irq_bus_lock = sx150x_irq_bus_lock;
 		pctl->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
+		pctl->irq_chip.name = devm_kstrdup(dev, client->name,
+						   GFP_KERNEL);
+		if (!pctl->irq_chip.name)
+			return -ENOMEM;
 
 		pctl->irq.masked = ~0;
 		pctl->irq.sense = 0;
diff --git a/drivers/pinctrl/qcom/pinctrl-kona.c b/drivers/pinctrl/qcom/pinctrl-kona.c
index 4fa8bb4..c61623e 100644
--- a/drivers/pinctrl/qcom/pinctrl-kona.c
+++ b/drivers/pinctrl/qcom/pinctrl-kona.c
@@ -113,6 +113,18 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
+
+#define QUP_I3C_0_MODE_OFFSET	0x9BB000
+#define QUP_I3C_1_MODE_OFFSET	0x9BC000
+#define QUP_I3C_8_MODE_OFFSET	0x5BA000
+#define QUP_I3C_14_MODE_OFFSET	0x5BB000
+
+#define QUP_I3C(qup_mode, qup_offset)					\
+	{						\
+		.mode = qup_mode,			\
+		.offset = qup_offset,			\
+	}
+
 static const struct pinctrl_pin_desc kona_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
@@ -1698,6 +1710,13 @@
 	[183] = UFS_RESET(ufs_reset, 0x5b8000),
 };
 
+static struct pinctrl_qup kona_qup_regs[] = {
+	[0] = QUP_I3C(0, QUP_I3C_0_MODE_OFFSET),
+	[1] = QUP_I3C(1, QUP_I3C_1_MODE_OFFSET),
+	[2] = QUP_I3C(8, QUP_I3C_8_MODE_OFFSET),
+	[3] = QUP_I3C(14, QUP_I3C_14_MODE_OFFSET),
+};
+
 static const struct msm_pinctrl_soc_data kona_pinctrl = {
 	.pins = kona_pins,
 	.npins = ARRAY_SIZE(kona_pins),
@@ -1706,6 +1725,8 @@
 	.groups = kona_groups,
 	.ngroups = ARRAY_SIZE(kona_groups),
 	.ngpios = 180,
+	.qup_regs = kona_qup_regs,
+	.nqup_regs = ARRAY_SIZE(kona_qup_regs),
 };
 
 static int kona_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 5a5cd95..6cb2feb 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013, Sony Mobile Communications AB.
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
 #include <linux/reboot.h>
 #include <linux/pm.h>
 #include <linux/log2.h>
+#include <linux/bitmap.h>
 
 #include "../core.h"
 #include "../pinconf.h"
@@ -39,6 +40,7 @@
 
 #define MAX_NR_GPIO 300
 #define PS_HOLD_OFFSET 0x820
+#define QUP_MASK       GENMASK(5, 0)
 
 /**
  * struct msm_pinctrl - state for a pinctrl-msm device
@@ -1058,6 +1060,42 @@
 	.resume = msm_pinctrl_resume,
 };
 
+int msm_qup_write(u32 mode, u32 val)
+{
+	int i;
+	struct pinctrl_qup *regs = msm_pinctrl_data->soc->qup_regs;
+	int num_regs =  msm_pinctrl_data->soc->nqup_regs;
+
+	/*Iterate over modes*/
+	for (i = 0; i < num_regs; i++) {
+		if (regs[i].mode == mode) {
+			writel_relaxed(val & QUP_MASK,
+				 msm_pinctrl_data->regs + regs[i].offset);
+			return 0;
+		}
+	}
+
+	return -ENOENT;
+}
+
+int msm_qup_read(unsigned int mode)
+{
+	int i, val;
+	struct pinctrl_qup *regs = msm_pinctrl_data->soc->qup_regs;
+	int num_regs =  msm_pinctrl_data->soc->nqup_regs;
+
+	/*Iterate over modes*/
+	for (i = 0; i < num_regs; i++) {
+		if (regs[i].mode == mode) {
+			val = readl_relaxed(msm_pinctrl_data->regs +
+							 regs[i].offset);
+			return val & QUP_MASK;
+		}
+	}
+
+	return -ENOENT;
+}
+
 int msm_pinctrl_probe(struct platform_device *pdev,
 		      const struct msm_pinctrl_soc_data *soc_data)
 {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index a1a5aab..58abe01 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -13,6 +13,8 @@
 #ifndef __PINCTRL_MSM_H__
 #define __PINCTRL_MSM_H__
 
+#include <linux/pinctrl/qcom-pinctrl.h>
+
 struct pinctrl_pin_desc;
 
 /**
@@ -99,6 +101,16 @@
 	unsigned intr_detection_width:5;
 };
 
+/*
+ * struct pinctrl_qup - Qup mode configuration
+ * @mode:	Qup i3c mode
+ * @offset:	Offset of the register
+ */
+struct pinctrl_qup {
+	u32 mode;
+	u32 offset;
+};
+
 /**
  * struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration
  * @pins:	    An array describing all pins the pin controller affects.
@@ -119,6 +131,8 @@
 	unsigned ngroups;
 	unsigned ngpios;
 	bool pull_no_keeper;
+	struct pinctrl_qup *qup_regs;
+	unsigned int nqup_regs;
 };
 
 int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
index aa8b581..ef4268c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
@@ -588,7 +588,7 @@
 static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
 	.pins = h6_pins,
 	.npins = ARRAY_SIZE(h6_pins),
-	.irq_banks = 3,
+	.irq_banks = 4,
 	.irq_bank_map = h6_irq_bank_map,
 	.irq_read_needs_mux = true,
 };
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index b6fd483..e5d5b1a 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -575,6 +575,7 @@
 
 int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
 {
+	u8 event_type;
 	u32 host_event;
 	int ret;
 
@@ -594,11 +595,22 @@
 		return ret;
 
 	if (wake_event) {
+		event_type = ec_dev->event_data.event_type;
 		host_event = cros_ec_get_host_event(ec_dev);
 
-		/* Consider non-host_event as wake event */
-		*wake_event = !host_event ||
-			      !!(host_event & ec_dev->host_event_wake_mask);
+		/*
+		 * Sensor events need to be parsed by the sensor sub-device.
+		 * Defer them, and don't report the wakeup here.
+		 */
+		if (event_type == EC_MKBP_EVENT_SENSOR_FIFO)
+			*wake_event = false;
+		/* Masked host-events should not count as wake events. */
+		else if (host_event &&
+			 !(host_event & ec_dev->host_event_wake_mask))
+			*wake_event = false;
+		/* Consider all other events as wake events. */
+		else
+			*wake_event = true;
 	}
 
 	return ret;
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 56bd2c4..a5105aa 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -86,6 +86,15 @@
 	  Kernel and user-space processes can call the IPA driver
 	  to configure IPA core.
 
+config IPA_DEBUG
+	bool "IPA DEBUG for non-perf build"
+	depends on IPA3
+	help
+	  This driver support more debug info for non-perf build.
+	  If you use the non-perf build and want to have more debug
+	  info enabled, then this flag can be enabled.
+	  It is not suggested to enable this flag for perf build.
+
 config IPA_WDI_UNIFIED_API
 	bool "IPA WDI unified API support"
 	depends on IPA3
@@ -134,6 +143,15 @@
 	  the MHI device without AP involvement, with the exception of
 	  power management.
 
+config IPA3_MHI_PRIME_MANAGER
+	tristate "IPA3_MHI Prime Manager driver"
+	depends on IPA3
+	help
+	  This driver functionality is to setup MHI Prime channels between Host and
+	  modem and enable the ability for MHI Prime communication.
+	  Once the configuration is done modem will communicate directly with
+	  the Host without AP involvement for tethering data offload.
+
 config IPA_UT
 	tristate "IPA Unit-Test Framework and Test Suites"
 	depends on IPA3 && DEBUG_FS
@@ -182,4 +200,42 @@
 	  module is used to configure and read the configuration from the
 	  Serial Engines.
 
+config IPA3_REGDUMP
+	bool "Dump or collect IPA/GSI register values on Linux crash"
+	depends on IPA3
+	help
+	  This option is to be used when the saving of IPA register state is
+	  desired upon a fatal system exception. When an exception occurs,
+	  an IPA register collection algorithm will be run in the context of
+	  the exception handler.  A predefined set of registers will be read
+	  and their values will be placed into a static hierarchical data
+	  structure that can be perused post crash.
+
+choice
+	prompt "Platform whose registers are to be dumped/collected"
+	depends on IPA3_REGDUMP
+	help
+	  The choices within represent the possible platforms this build is
+	  intended for. The choices are mutually exclusive.  By selecting
+	  one, you effect the inclusion path used, such that the relevant
+	  register definitions will be found.  Each platform has unique
+	  register definitions.
+
+config IPA3_REGDUMP_IPA_4_5
+	bool "The 4.5 IPA"
+	depends on IPA3_REGDUMP
+	depends on ARCH_KONA
+	help
+	  Set this to enable the 4.5 IPA's registers to be dumped/collected.
+
+endchoice
+
+config IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS
+	int "The number of extra endp registers for remaining pipes"
+	depends on IPA3_REGDUMP
+	default 0
+	help
+	  If the platform has extra endpoint registers for remaining
+	  pipes, please express how many here.
+
 endmenu
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 28a3c2a..985bcf2 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -205,7 +205,7 @@
 			return;
 		}
 
-		GSIDBG("GSI wait on chan_hld=%lu irqtyp=%lu state=%u intr=%u\n",
+		GSIDBG("GSI wait on chan_hld=%lu irqtyp=%u state=%u intr=%u\n",
 			chan_hdl,
 			type,
 			ctx->state,
@@ -2313,7 +2313,13 @@
 		return -GSI_STATUS_NODEV;
 	}
 	memset(ctx, 0, sizeof(*ctx));
-	user_data_size = props->ring_len / props->re_size;
+
+	/* For IPA offloaded WDI channels not required user_data pointer */
+	if (props->prot != GSI_CHAN_PROT_WDI2 &&
+		props->prot != GSI_CHAN_PROT_WDI3)
+		user_data_size = props->ring_len / props->re_size;
+	else
+		user_data_size = props->re_size;
 	/*
 	 * GCI channels might have OOO event completions up to GSI_VEID_MAX.
 	 * user_data needs to be large enough to accommodate those.
@@ -2496,6 +2502,7 @@
 	mutex_unlock(&ctx->mlock);
 	return GSI_STATUS_SUCCESS;
 }
+EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
 
 static void __gsi_read_channel_scratch(unsigned long chan_hdl,
 		union __packed gsi_channel_scratch * val)
@@ -3611,6 +3618,7 @@
 	struct gsi_chan_ctx *ctx;
 	enum gsi_chan_mode curr;
 	unsigned long flags;
+	enum gsi_chan_mode chan_mode;
 
 	if (!gsi_ctx) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
@@ -3682,13 +3690,20 @@
 					GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(
 							gsi_ctx->per.ee));
 				spin_unlock_irqrestore(&gsi_ctx->slock, flags);
-				spin_lock_irqsave(&ctx->ring.slock, flags);
-				atomic_set(
-					&ctx->poll_mode, GSI_CHAN_MODE_POLL);
+				spin_lock_irqsave(&ctx->evtr->ring.slock,
+									flags);
+				chan_mode = atomic_xchg(&ctx->poll_mode,
+						GSI_CHAN_MODE_POLL);
 				spin_unlock_irqrestore(
-					&ctx->ring.slock, flags);
+					&ctx->evtr->ring.slock, flags);
 				ctx->stats.poll_pending_irq++;
-				return -GSI_STATUS_PENDING_IRQ;
+				GSIDBG("In IEOB WA pnd cnt = %d prvmode = %d\n",
+						ctx->stats.poll_pending_irq,
+						chan_mode);
+				if (chan_mode == GSI_CHAN_MODE_POLL)
+					return GSI_STATUS_SUCCESS;
+				else
+					return -GSI_STATUS_PENDING_IRQ;
 			}
 		}
 		ctx->stats.poll_to_callback++;
@@ -4158,7 +4173,7 @@
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
 		return;
 	}
-	GSIDBG("reg dump ch id %d\n", chan_hdl);
+	GSIDBG("reg dump ch id %ld\n", chan_hdl);
 	val = gsi_readl(gsi_ctx->base +
 		GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
 			gsi_ctx->per.ee));
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index b22c40e..4ff09cc 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -29,7 +29,7 @@
 #define GSI_EVT_RING_MAX  24
 #define GSI_NO_EVT_ERINDEX 31
 
-#define gsi_readl(c)	({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define gsi_readl(c)	(readl(c))
 #define gsi_writel(v, c)	({ __iowmb(); writel_relaxed((v), (c)); })
 
 #define GSI_IPC_LOGGING(buf, fmt, args...) \
diff --git a/drivers/platform/msm/gsi/gsi_emulation.h b/drivers/platform/msm/gsi/gsi_emulation.h
index 837b584..eead5ef 100644
--- a/drivers/platform/msm/gsi/gsi_emulation.h
+++ b/drivers/platform/msm/gsi/gsi_emulation.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #if !defined(_GSI_EMULATION_H_)
@@ -12,7 +12,7 @@
 # include "gsi_reg.h"
 # include "gsi_emulation_stubs.h"
 
-# define gsi_emu_readl(c)     ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+# define gsi_emu_readl(c)     (readl(c))
 # define gsi_emu_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); })
 
 # define CNTRLR_BASE 0
diff --git a/drivers/platform/msm/gsi/gsi_emulation_stubs.h b/drivers/platform/msm/gsi/gsi_emulation_stubs.h
index 0b74c0f..4b0dead 100644
--- a/drivers/platform/msm/gsi/gsi_emulation_stubs.h
+++ b/drivers/platform/msm/gsi/gsi_emulation_stubs.h
@@ -1,13 +1,12 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #if !defined(_GSI_EMULATION_STUBS_H_)
 # define _GSI_EMULATION_STUBS_H_
 
 # include <asm/barrier.h>
-# define __iormb()       rmb() /* used in gsi.h */
 # define __iowmb()       wmb() /* used in gsi.h */
 
 #endif /* #if !defined(_GSI_EMULATION_STUBS_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 140e3b6..0f86ca0 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -201,6 +201,11 @@
 	__stringify(IPA_CLIENT_WIGIG4_CONS),
 	__stringify(RESERVERD_PROD_94),
 	__stringify(IPA_CLIENT_APPS_WAN_COAL_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_PROD),
+	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_TETH_PROD),
+	__stringify(IPA_CLIENT_MHI_PRIME_TETH_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_DPL_PROD),
 };
 
 /**
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 4246f1a..11c3717 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/mutex.h>
@@ -1910,6 +1910,15 @@
 		goto connect_dl_fail;
 	}
 
+	/* MHIP pipe enablement */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_enable(params->teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to connect MHIP channel\n");
+			goto connect_dl_fail;
+		}
+	}
+
 	/* Connect tethering protocol */
 	result = ipa3_usb_connect_teth_prot(params->teth_prot);
 	if (result) {
@@ -2403,6 +2412,14 @@
 		if (orig_state != IPA_USB_SUSPENDED) {
 			spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
 				flags);
+			/* Stop UL MHIP channel */
+			if (ipa3_is_mhip_offload_enabled()) {
+				result = ipa_mpm_mhip_ul_data_stop(teth_prot);
+				if (result) {
+					IPA_USB_ERR("fail UL MHIPData stop\n");
+					goto bad_params;
+				}
+			}
 			/* Stop UL channel */
 			result = ipa3_xdci_disconnect(ul_clnt_hdl,
 				true,
@@ -2422,6 +2439,14 @@
 			teth_prot);
 	if (result)
 		goto bad_params;
+	/* Stop UL/DL MHIP channels */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to disconnect MHIP channel\n");
+			goto bad_params;
+		}
+	}
 
 	/* Disconnect tethering protocol */
 	result = ipa3_usb_disconnect_teth_prot(teth_prot);
@@ -2732,7 +2757,14 @@
 			&ipa3_usb_notify_remote_wakeup_work);
 	}
 	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
+	/* Stop MHIP channel */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to disconnect MHIP channel\n");
+			goto release_prod_fail;
+		}
+	}
 	IPA_USB_DBG_LOW("exit\n");
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index d1d462b..3fbd673 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -13,3 +13,11 @@
 obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
 
 obj-$(CONFIG_IPA3_MHI_PROXY) += ipa_mhi_proxy.o
+
+obj-$(CONFIG_IPA3_MHI_PRIME_MANAGER) += ipa_mpm.o
+
+ipat-$(CONFIG_IPA3_REGDUMP) += dump/ipa_reg_dump.o
+
+ccflags-$(CONFIG_IPA3_REGDUMP) += -Idrivers/platform/msm/ipa/ipa_v3/dump
+
+ccflags-$(CONFIG_IPA3_REGDUMP_IPA_4_5) += -Idrivers/platform/msm/ipa/ipa_v3/dump/ipa4.5
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio.h
new file mode 100644
index 0000000..1699699
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio.h
@@ -0,0 +1,2392 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_GSI_HWIO_H_)
+#define _GSI_HWIO_H_
+/*
+ *
+ * HWIO register definitions to follow:
+ *
+ */
+#define GSI_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00004000)
+#define GSI_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00004000)
+#define GSI_REG_BASE_OFFS 0x00004000
+#define HWIO_GSI_CFG_ADDR (GSI_REG_BASE + 0x00000000)
+#define HWIO_GSI_CFG_PHYS (GSI_REG_BASE_PHYS + 0x00000000)
+#define HWIO_GSI_CFG_OFFS (GSI_REG_BASE_OFFS + 0x00000000)
+#define HWIO_GSI_CFG_RMSK 0xf3f
+#define HWIO_GSI_CFG_ATTR 0x3
+#define HWIO_GSI_CFG_IN in_dword_masked(HWIO_GSI_CFG_ADDR, \
+					HWIO_GSI_CFG_RMSK)
+#define HWIO_GSI_CFG_INM(m) in_dword_masked(HWIO_GSI_CFG_ADDR, m)
+#define HWIO_GSI_CFG_OUT(v) out_dword(HWIO_GSI_CFG_ADDR, v)
+#define HWIO_GSI_CFG_OUTM(m, v) out_dword_masked_ns(HWIO_GSI_CFG_ADDR, \
+						    m, \
+						    v, \
+						    HWIO_GSI_CFG_IN)
+#define HWIO_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00
+#define HWIO_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8
+#define HWIO_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define HWIO_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define HWIO_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define HWIO_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
+#define HWIO_GSI_CFG_UC_IS_MCS_BMSK 0x8
+#define HWIO_GSI_CFG_UC_IS_MCS_SHFT 0x3
+#define HWIO_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
+#define HWIO_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2
+#define HWIO_GSI_CFG_MCS_ENABLE_BMSK 0x2
+#define HWIO_GSI_CFG_MCS_ENABLE_SHFT 0x1
+#define HWIO_GSI_CFG_GSI_ENABLE_BMSK 0x1
+#define HWIO_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define HWIO_GSI_MANAGER_MCS_CODE_VER_ADDR (GSI_REG_BASE + 0x00000008)
+#define HWIO_GSI_MANAGER_MCS_CODE_VER_PHYS (GSI_REG_BASE_PHYS + 0x00000008)
+#define HWIO_GSI_MANAGER_MCS_CODE_VER_OFFS (GSI_REG_BASE_OFFS + 0x00000008)
+#define HWIO_GSI_ZEROS_ADDR (GSI_REG_BASE + 0x00000010)
+#define HWIO_GSI_ZEROS_PHYS (GSI_REG_BASE_PHYS + 0x00000010)
+#define HWIO_GSI_ZEROS_OFFS (GSI_REG_BASE_OFFS + 0x00000010)
+#define HWIO_GSI_PERIPH_BASE_ADDR_LSB_ADDR (GSI_REG_BASE + 0x00000018)
+#define HWIO_GSI_PERIPH_BASE_ADDR_LSB_PHYS (GSI_REG_BASE_PHYS + 0x00000018)
+#define HWIO_GSI_PERIPH_BASE_ADDR_LSB_OFFS (GSI_REG_BASE_OFFS + 0x00000018)
+#define HWIO_GSI_PERIPH_BASE_ADDR_MSB_ADDR (GSI_REG_BASE + 0x0000001c)
+#define HWIO_GSI_PERIPH_BASE_ADDR_MSB_PHYS (GSI_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_GSI_PERIPH_BASE_ADDR_MSB_OFFS (GSI_REG_BASE_OFFS + 0x0000001c)
+#define HWIO_GSI_PERIPH_PENDING_ADDR (GSI_REG_BASE + 0x00000020)
+#define HWIO_GSI_PERIPH_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x00000020)
+#define HWIO_GSI_PERIPH_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x00000020)
+#define HWIO_GSI_MOQA_CFG_ADDR (GSI_REG_BASE + 0x00000030)
+#define HWIO_GSI_MOQA_CFG_PHYS (GSI_REG_BASE_PHYS + 0x00000030)
+#define HWIO_GSI_MOQA_CFG_OFFS (GSI_REG_BASE_OFFS + 0x00000030)
+#define HWIO_GSI_REE_CFG_ADDR (GSI_REG_BASE + 0x00000038)
+#define HWIO_GSI_REE_CFG_PHYS (GSI_REG_BASE_PHYS + 0x00000038)
+#define HWIO_GSI_REE_CFG_OFFS (GSI_REG_BASE_OFFS + 0x00000038)
+#define HWIO_GSI_REE_CFG_RMSK 0xff03
+#define HWIO_GSI_REE_CFG_ATTR 0x3
+#define HWIO_GSI_REE_CFG_IN in_dword_masked(HWIO_GSI_REE_CFG_ADDR, \
+					    HWIO_GSI_REE_CFG_RMSK)
+#define HWIO_GSI_REE_CFG_INM(m) in_dword_masked(HWIO_GSI_REE_CFG_ADDR, m)
+#define HWIO_GSI_REE_CFG_OUT(v) out_dword(HWIO_GSI_REE_CFG_ADDR, v)
+#define HWIO_GSI_REE_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_GSI_REE_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_GSI_REE_CFG_IN)
+#define HWIO_GSI_REE_CFG_MAX_BURST_SIZE_BMSK 0xff00
+#define HWIO_GSI_REE_CFG_MAX_BURST_SIZE_SHFT 0x8
+#define HWIO_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_BMSK 0x2
+#define HWIO_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_SHFT 0x1
+#define HWIO_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_BMSK 0x1
+#define HWIO_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_SHFT 0x0
+#define HWIO_GSI_CGC_CTRL_ADDR (GSI_REG_BASE + 0x00000060)
+#define HWIO_GSI_CGC_CTRL_PHYS (GSI_REG_BASE_PHYS + 0x00000060)
+#define HWIO_GSI_CGC_CTRL_OFFS (GSI_REG_BASE_OFFS + 0x00000060)
+#define HWIO_GSI_MSI_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000080)
+#define HWIO_GSI_MSI_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000080)
+#define HWIO_GSI_MSI_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000080)
+#define HWIO_GSI_EVENT_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000084)
+#define HWIO_GSI_EVENT_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000084)
+#define HWIO_GSI_EVENT_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000084)
+#define HWIO_GSI_DATA_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000088)
+#define HWIO_GSI_DATA_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000088)
+#define HWIO_GSI_DATA_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000088)
+#define HWIO_GSI_TRE_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000090)
+#define HWIO_GSI_TRE_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000090)
+#define HWIO_GSI_TRE_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000a0)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000a0)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000a0)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000a4)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000a4)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000a4)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000a8)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000a8)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000a8)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000ac)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000ac)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000ac)
+#define HWIO_IC_GEN_INT_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000b0)
+#define HWIO_IC_GEN_INT_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000b0)
+#define HWIO_IC_GEN_INT_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000b0)
+#define HWIO_IC_GEN_INT_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000b4)
+#define HWIO_IC_GEN_INT_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000b4)
+#define HWIO_IC_GEN_INT_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000b4)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000b8)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000b8)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000b8)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000bc)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000bc)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000bc)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000c0)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000c0)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000c0)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000c4)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000c4)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000c4)
+#define HWIO_IC_TLV_STOP_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000c8)
+#define HWIO_IC_TLV_STOP_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000c8)
+#define HWIO_IC_TLV_STOP_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000c8)
+#define HWIO_IC_TLV_STOP_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000cc)
+#define HWIO_IC_TLV_STOP_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000cc)
+#define HWIO_IC_TLV_STOP_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000cc)
+#define HWIO_IC_TLV_RESET_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000d0)
+#define HWIO_IC_TLV_RESET_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_IC_TLV_RESET_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000d0)
+#define HWIO_IC_TLV_RESET_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000d4)
+#define HWIO_IC_TLV_RESET_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000d4)
+#define HWIO_IC_TLV_RESET_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000d4)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000d8)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					      0x000000d8)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					      0x000000d8)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000dc)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					      0x000000dc)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					      0x000000dc)
+#define HWIO_IC_READ_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000e0)
+#define HWIO_IC_READ_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000e0)
+#define HWIO_IC_READ_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000e0)
+#define HWIO_IC_READ_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000e4)
+#define HWIO_IC_READ_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000e4)
+#define HWIO_IC_READ_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000e4)
+#define HWIO_IC_WRITE_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000e8)
+#define HWIO_IC_WRITE_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000e8)
+#define HWIO_IC_WRITE_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000e8)
+#define HWIO_IC_WRITE_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000ec)
+#define HWIO_IC_WRITE_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000ec)
+#define HWIO_IC_WRITE_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000ec)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_LSB_ADDR (GSI_REG_BASE + \
+						  0x000000f0)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+						  0x000000f0)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+						  0x000000f0)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_MSB_ADDR (GSI_REG_BASE + \
+						  0x000000f4)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+						  0x000000f4)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+						  0x000000f4)
+#define HWIO_IC_INT_WEIGHT_REE_ADDR (GSI_REG_BASE + 0x00000100)
+#define HWIO_IC_INT_WEIGHT_REE_PHYS (GSI_REG_BASE_PHYS + 0x00000100)
+#define HWIO_IC_INT_WEIGHT_REE_OFFS (GSI_REG_BASE_OFFS + 0x00000100)
+#define HWIO_IC_INT_WEIGHT_EVT_ENG_ADDR (GSI_REG_BASE + 0x00000104)
+#define HWIO_IC_INT_WEIGHT_EVT_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000104)
+#define HWIO_IC_INT_WEIGHT_EVT_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000104)
+#define HWIO_IC_INT_WEIGHT_INT_ENG_ADDR (GSI_REG_BASE + 0x00000108)
+#define HWIO_IC_INT_WEIGHT_INT_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000108)
+#define HWIO_IC_INT_WEIGHT_INT_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000108)
+#define HWIO_IC_INT_WEIGHT_CSR_ADDR (GSI_REG_BASE + 0x0000010c)
+#define HWIO_IC_INT_WEIGHT_CSR_PHYS (GSI_REG_BASE_PHYS + 0x0000010c)
+#define HWIO_IC_INT_WEIGHT_CSR_OFFS (GSI_REG_BASE_OFFS + 0x0000010c)
+#define HWIO_IC_INT_WEIGHT_TLV_ENG_ADDR (GSI_REG_BASE + 0x00000110)
+#define HWIO_IC_INT_WEIGHT_TLV_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000110)
+#define HWIO_IC_INT_WEIGHT_TLV_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000110)
+#define HWIO_IC_INT_WEIGHT_TIMER_ENG_ADDR (GSI_REG_BASE + 0x00000114)
+#define HWIO_IC_INT_WEIGHT_TIMER_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000114)
+#define HWIO_IC_INT_WEIGHT_TIMER_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000114)
+#define HWIO_IC_INT_WEIGHT_DB_ENG_ADDR (GSI_REG_BASE + 0x00000118)
+#define HWIO_IC_INT_WEIGHT_DB_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IC_INT_WEIGHT_DB_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IC_INT_WEIGHT_RD_WR_ENG_ADDR (GSI_REG_BASE + 0x0000011c)
+#define HWIO_IC_INT_WEIGHT_RD_WR_ENG_PHYS (GSI_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IC_INT_WEIGHT_RD_WR_ENG_OFFS (GSI_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IC_INT_WEIGHT_UCONTROLLER_ENG_ADDR (GSI_REG_BASE + 0x00000120)
+#define HWIO_IC_INT_WEIGHT_UCONTROLLER_ENG_PHYS (GSI_REG_BASE_PHYS + \
+						 0x00000120)
+#define HWIO_IC_INT_WEIGHT_UCONTROLLER_ENG_OFFS (GSI_REG_BASE_OFFS + \
+						 0x00000120)
+#define HWIO_IC_INT_WEIGHT_SDMA_ADDR (GSI_REG_BASE + 0x00000124)
+#define HWIO_IC_INT_WEIGHT_SDMA_PHYS (GSI_REG_BASE_PHYS + 0x00000124)
+#define HWIO_IC_INT_WEIGHT_SDMA_OFFS (GSI_REG_BASE_OFFS + 0x00000124)
+#define HWIO_GSI_SDMA_CFG_ADDR (GSI_REG_BASE + 0x0000003c)
+#define HWIO_GSI_SDMA_CFG_PHYS (GSI_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_GSI_SDMA_CFG_OFFS (GSI_REG_BASE_OFFS + 0x0000003c)
+#define HWIO_GSI_SDMA_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000094)
+#define HWIO_GSI_SDMA_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000094)
+#define HWIO_GSI_SDMA_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000094)
+#define HWIO_GSI_SDMA_SG_IOVEC_LSB_n_ADDR(n) (GSI_REG_BASE + 0x00000140 + \
+					      0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_LSB_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00000140 + 0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_LSB_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00000140 + 0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_MSB_n_ADDR(n) (GSI_REG_BASE + 0x00000144 + \
+					      0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_MSB_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00000144 + 0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_MSB_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00000144 + 0x8 * (n))
+#define HWIO_GSI_MANAGER_EE_QOS_n_ADDR(n) (GSI_REG_BASE + 0x00000300 + \
+					   0x4 * (n))
+#define HWIO_GSI_MANAGER_EE_QOS_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00000300 + 0x4 * (n))
+#define HWIO_GSI_MANAGER_EE_QOS_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00000300 + 0x4 * (n))
+#define HWIO_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						    0x00000200)
+#define HWIO_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS +	\
+						    0x00000200)
+#define HWIO_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS +	\
+						    0x00000200)
+#define HWIO_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						    0x00000204)
+#define HWIO_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS +	\
+						    0x00000204)
+#define HWIO_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS +	\
+						    0x00000204)
+#define HWIO_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						      0x00000208)
+#define HWIO_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						      0x00000208)
+#define HWIO_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						      0x00000208)
+#define HWIO_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						      0x0000020c)
+#define HWIO_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						      0x0000020c)
+#define HWIO_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						      0x0000020c)
+#define HWIO_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						     0x00000240)
+#define HWIO_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						     0x00000240)
+#define HWIO_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						     0x00000240)
+#define HWIO_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						      0x00000244)
+#define HWIO_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						      0x00000244)
+#define HWIO_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						      0x00000244)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						       0x00000248)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						       0x00000248)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						       0x00000248)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_ADDR (GSI_REG_BASE + \
+							0x0000024c)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS \
+							+ 0x0000024c)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS \
+							+ 0x0000024c)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_ADDR (GSI_REG_BASE + \
+							0x00000250)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS \
+							+ 0x00000250)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS \
+							+ 0x00000250)
+#define HWIO_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_ADDR (GSI_REG_BASE \
+							     + 0x00000254)
+#define HWIO_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		GSI_REG_BASE_PHYS + 0x00000254)
+#define HWIO_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		GSI_REG_BASE_OFFS + 0x00000254)
+#define HWIO_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_ADDR (GSI_REG_BASE \
+							     + 0x00000258)
+#define HWIO_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		GSI_REG_BASE_PHYS + 0x00000258)
+#define HWIO_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		GSI_REG_BASE_OFFS + 0x00000258)
+#define HWIO_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_ADDR (GSI_REG_BASE + \
+							  0x0000025c)
+#define HWIO_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_PHYS ( \
+		GSI_REG_BASE_PHYS + 0x0000025c)
+#define HWIO_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_OFFS ( \
+		GSI_REG_BASE_OFFS + 0x0000025c)
+#define HWIO_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						       0x00000260)
+#define HWIO_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						       0x00000260)
+#define HWIO_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						       0x00000260)
+#define HWIO_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						       0x00000264)
+#define HWIO_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						       0x00000264)
+#define HWIO_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						       0x00000264)
+#define HWIO_GSI_IRAM_PTR_CH_CMD_ADDR (GSI_REG_BASE + 0x00000400)
+#define HWIO_GSI_IRAM_PTR_CH_CMD_PHYS (GSI_REG_BASE_PHYS + 0x00000400)
+#define HWIO_GSI_IRAM_PTR_CH_CMD_OFFS (GSI_REG_BASE_OFFS + 0x00000400)
+#define HWIO_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR (GSI_REG_BASE + 0x00000404)
+#define HWIO_GSI_IRAM_PTR_EE_GENERIC_CMD_PHYS (GSI_REG_BASE_PHYS + \
+					       0x00000404)
+#define HWIO_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS (GSI_REG_BASE_OFFS + \
+					       0x00000404)
+#define HWIO_GSI_IRAM_PTR_TLV_CH_NOT_FULL_ADDR (GSI_REG_BASE + 0x00000408)
+#define HWIO_GSI_IRAM_PTR_TLV_CH_NOT_FULL_PHYS (GSI_REG_BASE_PHYS + \
+						0x00000408)
+#define HWIO_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS (GSI_REG_BASE_OFFS + \
+						0x00000408)
+#define HWIO_GSI_IRAM_PTR_CH_DB_ADDR (GSI_REG_BASE + 0x00000418)
+#define HWIO_GSI_IRAM_PTR_CH_DB_PHYS (GSI_REG_BASE_PHYS + 0x00000418)
+#define HWIO_GSI_IRAM_PTR_CH_DB_OFFS (GSI_REG_BASE_OFFS + 0x00000418)
+#define HWIO_GSI_IRAM_PTR_EV_DB_ADDR (GSI_REG_BASE + 0x0000041c)
+#define HWIO_GSI_IRAM_PTR_EV_DB_PHYS (GSI_REG_BASE_PHYS + 0x0000041c)
+#define HWIO_GSI_IRAM_PTR_EV_DB_OFFS (GSI_REG_BASE_OFFS + 0x0000041c)
+#define HWIO_GSI_IRAM_PTR_NEW_RE_ADDR (GSI_REG_BASE + 0x00000420)
+#define HWIO_GSI_IRAM_PTR_NEW_RE_PHYS (GSI_REG_BASE_PHYS + 0x00000420)
+#define HWIO_GSI_IRAM_PTR_NEW_RE_OFFS (GSI_REG_BASE_OFFS + 0x00000420)
+#define HWIO_GSI_IRAM_PTR_CH_DIS_COMP_ADDR (GSI_REG_BASE + 0x00000424)
+#define HWIO_GSI_IRAM_PTR_CH_DIS_COMP_PHYS (GSI_REG_BASE_PHYS + 0x00000424)
+#define HWIO_GSI_IRAM_PTR_CH_DIS_COMP_OFFS (GSI_REG_BASE_OFFS + 0x00000424)
+#define HWIO_GSI_IRAM_PTR_CH_EMPTY_ADDR (GSI_REG_BASE + 0x00000428)
+#define HWIO_GSI_IRAM_PTR_CH_EMPTY_PHYS (GSI_REG_BASE_PHYS + 0x00000428)
+#define HWIO_GSI_IRAM_PTR_CH_EMPTY_OFFS (GSI_REG_BASE_OFFS + 0x00000428)
+#define HWIO_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR (GSI_REG_BASE + 0x0000042c)
+#define HWIO_GSI_IRAM_PTR_EVENT_GEN_COMP_PHYS (GSI_REG_BASE_PHYS + \
+					       0x0000042c)
+#define HWIO_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS (GSI_REG_BASE_OFFS + \
+					       0x0000042c)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_ADDR (GSI_REG_BASE + \
+						   0x00000430)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_PHYS (GSI_REG_BASE_PHYS + \
+						   0x00000430)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS (GSI_REG_BASE_OFFS + \
+						   0x00000430)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_ADDR (GSI_REG_BASE + \
+						   0x00000434)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_PHYS (GSI_REG_BASE_PHYS + \
+						   0x00000434)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS (GSI_REG_BASE_OFFS + \
+						   0x00000434)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_ADDR (GSI_REG_BASE + \
+						   0x00000438)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_PHYS (GSI_REG_BASE_PHYS + \
+						   0x00000438)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS (GSI_REG_BASE_OFFS + \
+						   0x00000438)
+#define HWIO_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR (GSI_REG_BASE + 0x0000043c)
+#define HWIO_GSI_IRAM_PTR_TIMER_EXPIRED_PHYS (GSI_REG_BASE_PHYS + \
+					      0x0000043c)
+#define HWIO_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS (GSI_REG_BASE_OFFS + \
+					      0x0000043c)
+#define HWIO_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR (GSI_REG_BASE + 0x00000440)
+#define HWIO_GSI_IRAM_PTR_WRITE_ENG_COMP_PHYS (GSI_REG_BASE_PHYS + \
+					       0x00000440)
+#define HWIO_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS (GSI_REG_BASE_OFFS + \
+					       0x00000440)
+#define HWIO_GSI_IRAM_PTR_READ_ENG_COMP_ADDR (GSI_REG_BASE + 0x00000444)
+#define HWIO_GSI_IRAM_PTR_READ_ENG_COMP_PHYS (GSI_REG_BASE_PHYS + \
+					      0x00000444)
+#define HWIO_GSI_IRAM_PTR_READ_ENG_COMP_OFFS (GSI_REG_BASE_OFFS + \
+					      0x00000444)
+#define HWIO_GSI_IRAM_PTR_UC_GP_INT_ADDR (GSI_REG_BASE + 0x00000448)
+#define HWIO_GSI_IRAM_PTR_UC_GP_INT_PHYS (GSI_REG_BASE_PHYS + 0x00000448)
+#define HWIO_GSI_IRAM_PTR_UC_GP_INT_OFFS (GSI_REG_BASE_OFFS + 0x00000448)
+#define HWIO_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR (GSI_REG_BASE + 0x0000044c)
+#define HWIO_GSI_IRAM_PTR_INT_MOD_STOPPED_PHYS (GSI_REG_BASE_PHYS + \
+						0x0000044c)
+#define HWIO_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS (GSI_REG_BASE_OFFS + \
+						0x0000044c)
+#define HWIO_GSI_IRAM_PTR_SDMA_INT_n_ADDR(n) (GSI_REG_BASE + 0x00000450 + \
+					      0x4 * (n))
+#define HWIO_GSI_IRAM_PTR_SDMA_INT_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00000450 + 0x4 * (n))
+#define HWIO_GSI_IRAM_PTR_SDMA_INT_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00000450 + 0x4 * (n))
+#define HWIO_GSI_INST_RAM_n_ADDR(n) (GSI_REG_BASE + 0x0001b000 + 0x4 * (n))
+#define HWIO_GSI_INST_RAM_n_PHYS(n) (GSI_REG_BASE_PHYS + 0x0001b000 + \
+				     0x4 * (n))
+#define HWIO_GSI_INST_RAM_n_OFFS(n) (GSI_REG_BASE_OFFS + 0x0001b000 + \
+				     0x4 * (n))
+#define HWIO_GSI_SHRAM_n_ADDR(n) (GSI_REG_BASE + 0x00002000 + 0x4 * (n))
+#define HWIO_GSI_SHRAM_n_PHYS(n) (GSI_REG_BASE_PHYS + 0x00002000 + 0x4 * \
+				  (n))
+#define HWIO_GSI_SHRAM_n_OFFS(n) (GSI_REG_BASE_OFFS + 0x00002000 + 0x4 * \
+				  (n))
+#define HWIO_GSI_SHRAM_n_RMSK 0xffffffff
+#define HWIO_GSI_SHRAM_n_MAXn 1343
+#define HWIO_GSI_SHRAM_n_ATTR 0x3
+#define HWIO_GSI_SHRAM_n_INI(n) in_dword_masked(HWIO_GSI_SHRAM_n_ADDR( \
+							n), \
+						HWIO_GSI_SHRAM_n_RMSK)
+#define HWIO_GSI_SHRAM_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_GSI_SHRAM_n_ADDR(n), \
+		mask)
+#define HWIO_GSI_SHRAM_n_OUTI(n, val) out_dword(HWIO_GSI_SHRAM_n_ADDR( \
+							n), val)
+#define HWIO_GSI_SHRAM_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_GSI_SHRAM_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_GSI_SHRAM_n_INI(n))
+#define HWIO_GSI_SHRAM_n_SHRAM_BMSK 0xffffffff
+#define HWIO_GSI_SHRAM_n_SHRAM_SHFT 0x0
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k) (GSI_REG_BASE + \
+						    0x00003800 + 0x80 *	\
+						    (n) + 0x4 * (k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_PHYS(n, k) (GSI_REG_BASE_PHYS +	\
+						    0x00003800 + 0x80 *	\
+						    (n) + 0x4 * (k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(n, k) (GSI_REG_BASE_OFFS +	\
+						    0x00003800 + 0x80 *	\
+						    (n) + 0x4 * (k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK 0x3f
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXn 2
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXk 22
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ATTR 0x3
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, k) in_dword_masked( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k), \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK)
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k), \
+		mask)
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTI2(n, k, val) out_dword( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k), \
+		val)
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTMI2(n, k, mask, \
+					       val) out_dword_masked_ns( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n,	\
+						     k), \
+		mask, \
+		val, \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0
+#define HWIO_GSI_TEST_BUS_SEL_ADDR (GSI_REG_BASE + 0x00001000)
+#define HWIO_GSI_TEST_BUS_SEL_PHYS (GSI_REG_BASE_PHYS + 0x00001000)
+#define HWIO_GSI_TEST_BUS_SEL_OFFS (GSI_REG_BASE_OFFS + 0x00001000)
+#define HWIO_GSI_TEST_BUS_SEL_RMSK 0xf00ff
+#define HWIO_GSI_TEST_BUS_SEL_ATTR 0x3
+#define HWIO_GSI_TEST_BUS_SEL_IN in_dword_masked( \
+		HWIO_GSI_TEST_BUS_SEL_ADDR, \
+		HWIO_GSI_TEST_BUS_SEL_RMSK)
+#define HWIO_GSI_TEST_BUS_SEL_INM(m) in_dword_masked( \
+		HWIO_GSI_TEST_BUS_SEL_ADDR, \
+		m)
+#define HWIO_GSI_TEST_BUS_SEL_OUT(v) out_dword(HWIO_GSI_TEST_BUS_SEL_ADDR, \
+					       v)
+#define HWIO_GSI_TEST_BUS_SEL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_GSI_TEST_BUS_SEL_ADDR, \
+		m, \
+		v, \
+		HWIO_GSI_TEST_BUS_SEL_IN)
+#define HWIO_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_BMSK 0xf0000
+#define HWIO_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_SHFT 0x10
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_BMSK 0xff
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SHFT 0x0
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_ZEROS_FVAL 0x0
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_0_FVAL 0x1
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_1_FVAL 0x2
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_2_FVAL 0x3
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_3_FVAL 0x4
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_4_FVAL 0x5
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_DB_ENG_FVAL 0x9
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_0_FVAL 0xb
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_1_FVAL 0xc
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_2_FVAL 0xd
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_3_FVAL 0xe
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_4_FVAL 0xf
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_5_FVAL 0x10
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_6_FVAL 0x11
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_7_FVAL 0x12
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_0_FVAL 0x13
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_1_FVAL 0x14
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_2_FVAL 0x15
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_3_FVAL 0x16
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_4_FVAL 0x17
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_5_FVAL 0x18
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_0_FVAL 0x1b
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_1_FVAL 0x1c
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_2_FVAL 0x1d
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_0_FVAL 0x1f
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_1_FVAL 0x20
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_2_FVAL 0x21
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_3_FVAL 0x22
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_4_FVAL 0x23
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_0_FVAL 0x27
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_1_FVAL 0x28
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_2_FVAL 0x29
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_3_FVAL 0x2a
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_0_FVAL 0x2b
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_1_FVAL 0x2c
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_2_FVAL 0x2d
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_3_FVAL 0x2e
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_0_FVAL 0x33
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_1_FVAL 0x34
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_2_FVAL 0x35
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_3_FVAL 0x36
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_FVAL 0x3a
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SDMA_0_FVAL 0x3c
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SMDA_1_FVAL 0x3d
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_1_FVAL 0x3e
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_2_FVAL 0x3f
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_5_FVAL 0x40
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_5_FVAL 0x41
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_3_FVAL 0x42
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TLV_0_FVAL 0x43
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_8_FVAL 0x44
+#define HWIO_GSI_TEST_BUS_REG_ADDR (GSI_REG_BASE + 0x00001008)
+#define HWIO_GSI_TEST_BUS_REG_PHYS (GSI_REG_BASE_PHYS + 0x00001008)
+#define HWIO_GSI_TEST_BUS_REG_OFFS (GSI_REG_BASE_OFFS + 0x00001008)
+#define HWIO_GSI_TEST_BUS_REG_RMSK 0xffffffff
+#define HWIO_GSI_TEST_BUS_REG_ATTR 0x1
+#define HWIO_GSI_TEST_BUS_REG_IN in_dword_masked( \
+		HWIO_GSI_TEST_BUS_REG_ADDR, \
+		HWIO_GSI_TEST_BUS_REG_RMSK)
+#define HWIO_GSI_TEST_BUS_REG_INM(m) in_dword_masked( \
+		HWIO_GSI_TEST_BUS_REG_ADDR, \
+		m)
+#define HWIO_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_BMSK 0xffffffff
+#define HWIO_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_SHFT 0x0
+#define HWIO_GSI_DEBUG_BUSY_REG_ADDR (GSI_REG_BASE + 0x00001010)
+#define HWIO_GSI_DEBUG_BUSY_REG_PHYS (GSI_REG_BASE_PHYS + 0x00001010)
+#define HWIO_GSI_DEBUG_BUSY_REG_OFFS (GSI_REG_BASE_OFFS + 0x00001010)
+#define HWIO_GSI_DEBUG_EVENT_PENDING_ADDR (GSI_REG_BASE + 0x00001014)
+#define HWIO_GSI_DEBUG_EVENT_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x00001014)
+#define HWIO_GSI_DEBUG_EVENT_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x00001014)
+#define HWIO_GSI_DEBUG_TIMER_PENDING_ADDR (GSI_REG_BASE + 0x00001018)
+#define HWIO_GSI_DEBUG_TIMER_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x00001018)
+#define HWIO_GSI_DEBUG_TIMER_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x00001018)
+#define HWIO_GSI_DEBUG_RD_WR_PENDING_ADDR (GSI_REG_BASE + 0x0000101c)
+#define HWIO_GSI_DEBUG_RD_WR_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x0000101c)
+#define HWIO_GSI_DEBUG_RD_WR_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x0000101c)
+#define HWIO_GSI_DEBUG_COUNTER_CFGn_ADDR(n) (GSI_REG_BASE + 0x00001200 + \
+					     0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTER_CFGn_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00001200 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTER_CFGn_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00001200 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTERn_ADDR(n) (GSI_REG_BASE + 0x00001240 + 0x4 * \
+					 (n))
+#define HWIO_GSI_DEBUG_COUNTERn_PHYS(n) (GSI_REG_BASE_PHYS + 0x00001240 + \
+					 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTERn_OFFS(n) (GSI_REG_BASE_OFFS + 0x00001240 + \
+					 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTERn_RMSK 0xffff
+#define HWIO_GSI_DEBUG_COUNTERn_MAXn 7
+#define HWIO_GSI_DEBUG_COUNTERn_ATTR 0x1
+#define HWIO_GSI_DEBUG_COUNTERn_INI(n) in_dword_masked(	\
+		HWIO_GSI_DEBUG_COUNTERn_ADDR(n), \
+		HWIO_GSI_DEBUG_COUNTERn_RMSK)
+#define HWIO_GSI_DEBUG_COUNTERn_INMI(n, mask) in_dword_masked( \
+		HWIO_GSI_DEBUG_COUNTERn_ADDR(n), \
+		mask)
+#define HWIO_GSI_DEBUG_COUNTERn_COUNTER_VALUE_BMSK 0xffff
+#define HWIO_GSI_DEBUG_COUNTERn_COUNTER_VALUE_SHFT 0x0
+#define HWIO_GSI_DEBUG_PC_FROM_SW_ADDR (GSI_REG_BASE + 0x00001040)
+#define HWIO_GSI_DEBUG_PC_FROM_SW_PHYS (GSI_REG_BASE_PHYS + 0x00001040)
+#define HWIO_GSI_DEBUG_PC_FROM_SW_OFFS (GSI_REG_BASE_OFFS + 0x00001040)
+#define HWIO_GSI_DEBUG_SW_STALL_ADDR (GSI_REG_BASE + 0x00001044)
+#define HWIO_GSI_DEBUG_SW_STALL_PHYS (GSI_REG_BASE_PHYS + 0x00001044)
+#define HWIO_GSI_DEBUG_SW_STALL_OFFS (GSI_REG_BASE_OFFS + 0x00001044)
+#define HWIO_GSI_DEBUG_PC_FOR_DEBUG_ADDR (GSI_REG_BASE + 0x00001048)
+#define HWIO_GSI_DEBUG_PC_FOR_DEBUG_PHYS (GSI_REG_BASE_PHYS + 0x00001048)
+#define HWIO_GSI_DEBUG_PC_FOR_DEBUG_OFFS (GSI_REG_BASE_OFFS + 0x00001048)
+#define HWIO_GSI_DEBUG_QSB_LOG_SEL_ADDR (GSI_REG_BASE + 0x00001050)
+#define HWIO_GSI_DEBUG_QSB_LOG_SEL_PHYS (GSI_REG_BASE_PHYS + 0x00001050)
+#define HWIO_GSI_DEBUG_QSB_LOG_SEL_OFFS (GSI_REG_BASE_OFFS + 0x00001050)
+#define HWIO_GSI_DEBUG_QSB_LOG_CLR_ADDR (GSI_REG_BASE + 0x00001058)
+#define HWIO_GSI_DEBUG_QSB_LOG_CLR_PHYS (GSI_REG_BASE_PHYS + 0x00001058)
+#define HWIO_GSI_DEBUG_QSB_LOG_CLR_OFFS (GSI_REG_BASE_OFFS + 0x00001058)
+#define HWIO_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR (GSI_REG_BASE + 0x00001060)
+#define HWIO_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_PHYS (GSI_REG_BASE_PHYS + \
+						 0x00001060)
+#define HWIO_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_OFFS (GSI_REG_BASE_OFFS + \
+						 0x00001060)
+#define HWIO_GSI_DEBUG_QSB_LOG_0_ADDR (GSI_REG_BASE + 0x00001064)
+#define HWIO_GSI_DEBUG_QSB_LOG_0_PHYS (GSI_REG_BASE_PHYS + 0x00001064)
+#define HWIO_GSI_DEBUG_QSB_LOG_0_OFFS (GSI_REG_BASE_OFFS + 0x00001064)
+#define HWIO_GSI_DEBUG_QSB_LOG_1_ADDR (GSI_REG_BASE + 0x00001068)
+#define HWIO_GSI_DEBUG_QSB_LOG_1_PHYS (GSI_REG_BASE_PHYS + 0x00001068)
+#define HWIO_GSI_DEBUG_QSB_LOG_1_OFFS (GSI_REG_BASE_OFFS + 0x00001068)
+#define HWIO_GSI_DEBUG_QSB_LOG_2_ADDR (GSI_REG_BASE + 0x0000106c)
+#define HWIO_GSI_DEBUG_QSB_LOG_2_PHYS (GSI_REG_BASE_PHYS + 0x0000106c)
+#define HWIO_GSI_DEBUG_QSB_LOG_2_OFFS (GSI_REG_BASE_OFFS + 0x0000106c)
+#define HWIO_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR(n) (GSI_REG_BASE + \
+						      0x00001070 + 0x4 * \
+						      (n))
+#define HWIO_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_PHYS(n) (GSI_REG_BASE_PHYS + \
+						      0x00001070 + 0x4 * \
+						      (n))
+#define HWIO_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_OFFS(n) (GSI_REG_BASE_OFFS + \
+						      0x00001070 + 0x4 * \
+						      (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_WRITE_ADDR(n) (GSI_REG_BASE + 0x00001080 + \
+					      0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_WRITE_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00001080 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_WRITE_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00001080 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_ADDR(n) (GSI_REG_BASE + 0x00001100 + \
+					     0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00001100 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00001100 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_RMSK 0xffffffff
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn 31
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_ATTR 0x1
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_INI(n) in_dword_masked( \
+		HWIO_GSI_DEBUG_SW_RF_n_READ_ADDR(n), \
+		HWIO_GSI_DEBUG_SW_RF_n_READ_RMSK)
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_INMI(n, mask) in_dword_masked( \
+		HWIO_GSI_DEBUG_SW_RF_n_READ_ADDR(n), \
+		mask)
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_RF_REG_BMSK 0xffffffff
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_RF_REG_SHFT 0x0
+#define HWIO_GSI_DEBUG_EE_n_CH_k_VP_TABLE_ADDR(n, k) (GSI_REG_BASE + \
+						      0x00001400 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						      0x00001400 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						      0x00001400 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k) (GSI_REG_BASE + \
+						      0x00001600 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						      0x00001600 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						      0x00001600 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK 0x3f
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXn 3
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXk 19
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ATTR 0x1
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INI2(n, k) in_dword_masked( \
+		HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k), \
+		HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK)
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INMI2(n, k, \
+						mask) in_dword_masked( \
+		HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, \
+						       k), \
+		mask)
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_BMSK 0x1f
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_SHFT 0x0
+#define HWIO_GSI_DEBUG_SDMA_TRANS_DB_n_ADDR(n) (GSI_REG_BASE + \
+						0x00001800 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SDMA_TRANS_DB_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+						0x00001800 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SDMA_TRANS_DB_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+						0x00001800 + 0x4 * (n))
+#define HWIO_GSI_UC_SRC_IRQ_ADDR (GSI_REG_BASE + 0x00000500)
+#define HWIO_GSI_UC_SRC_IRQ_PHYS (GSI_REG_BASE_PHYS + 0x00000500)
+#define HWIO_GSI_UC_SRC_IRQ_OFFS (GSI_REG_BASE_OFFS + 0x00000500)
+#define HWIO_GSI_UC_SRC_IRQ_MSK_ADDR (GSI_REG_BASE + 0x00000504)
+#define HWIO_GSI_UC_SRC_IRQ_MSK_PHYS (GSI_REG_BASE_PHYS + 0x00000504)
+#define HWIO_GSI_UC_SRC_IRQ_MSK_OFFS (GSI_REG_BASE_OFFS + 0x00000504)
+#define HWIO_GSI_UC_SRC_IRQ_CLR_ADDR (GSI_REG_BASE + 0x00000508)
+#define HWIO_GSI_UC_SRC_IRQ_CLR_PHYS (GSI_REG_BASE_PHYS + 0x00000508)
+#define HWIO_GSI_UC_SRC_IRQ_CLR_OFFS (GSI_REG_BASE_OFFS + 0x00000508)
+#define HWIO_GSI_ACC_ARGS_n_ADDR(n) (GSI_REG_BASE + 0x0000050c + 0x4 * (n))
+#define HWIO_GSI_ACC_ARGS_n_PHYS(n) (GSI_REG_BASE_PHYS + 0x0000050c + \
+				     0x4 * (n))
+#define HWIO_GSI_ACC_ARGS_n_OFFS(n) (GSI_REG_BASE_OFFS + 0x0000050c + \
+				     0x4 * (n))
+#define HWIO_GSI_ACC_ROUTINE_ADDR (GSI_REG_BASE + 0x00000524)
+#define HWIO_GSI_ACC_ROUTINE_PHYS (GSI_REG_BASE_PHYS + 0x00000524)
+#define HWIO_GSI_ACC_ROUTINE_OFFS (GSI_REG_BASE_OFFS + 0x00000524)
+#define HWIO_GSI_ACC_GO_ADDR (GSI_REG_BASE + 0x00000528)
+#define HWIO_GSI_ACC_GO_PHYS (GSI_REG_BASE_PHYS + 0x00000528)
+#define HWIO_GSI_ACC_GO_OFFS (GSI_REG_BASE_OFFS + 0x00000528)
+#define HWIO_GSI_ACC_2_UC_MCS_STTS_ADDR (GSI_REG_BASE + 0x0000052c)
+#define HWIO_GSI_ACC_2_UC_MCS_STTS_PHYS (GSI_REG_BASE_PHYS + 0x0000052c)
+#define HWIO_GSI_ACC_2_UC_MCS_STTS_OFFS (GSI_REG_BASE_OFFS + 0x0000052c)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_LSB_ADDR (GSI_REG_BASE + 0x00000530)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_LSB_PHYS (GSI_REG_BASE_PHYS + \
+						0x00000530)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_LSB_OFFS (GSI_REG_BASE_OFFS + \
+						0x00000530)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_MSB_ADDR (GSI_REG_BASE + 0x00000534)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_MSB_PHYS (GSI_REG_BASE_PHYS + \
+						0x00000534)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_MSB_OFFS (GSI_REG_BASE_OFFS + \
+						0x00000534)
+#define HWIO_GSI_IC_2_UC_MCS_VLD_ADDR (GSI_REG_BASE + 0x00000538)
+#define HWIO_GSI_IC_2_UC_MCS_VLD_PHYS (GSI_REG_BASE_PHYS + 0x00000538)
+#define HWIO_GSI_IC_2_UC_MCS_VLD_OFFS (GSI_REG_BASE_OFFS + 0x00000538)
+#define HWIO_GSI_IC_2_UC_MCS_PC_ADDR (GSI_REG_BASE + 0x0000053c)
+#define HWIO_GSI_IC_2_UC_MCS_PC_PHYS (GSI_REG_BASE_PHYS + 0x0000053c)
+#define HWIO_GSI_IC_2_UC_MCS_PC_OFFS (GSI_REG_BASE_OFFS + 0x0000053c)
+#define HWIO_GSI_IC_2_UC_MCS_ARGS_n_ADDR(n) (GSI_REG_BASE + 0x00000540 + \
+					     0x4 * (n))
+#define HWIO_GSI_IC_2_UC_MCS_ARGS_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00000540 + 0x4 * (n))
+#define HWIO_GSI_IC_2_UC_MCS_ARGS_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00000540 + 0x4 * (n))
+#define HWIO_GSI_UC_TLV_IN_VLD_ADDR (GSI_REG_BASE + 0x00000558)
+#define HWIO_GSI_UC_TLV_IN_VLD_PHYS (GSI_REG_BASE_PHYS + 0x00000558)
+#define HWIO_GSI_UC_TLV_IN_VLD_OFFS (GSI_REG_BASE_OFFS + 0x00000558)
+#define HWIO_GSI_UC_TLV_IN_ROUTINE_ADDR (GSI_REG_BASE + 0x0000055c)
+#define HWIO_GSI_UC_TLV_IN_ROUTINE_PHYS (GSI_REG_BASE_PHYS + 0x0000055c)
+#define HWIO_GSI_UC_TLV_IN_ROUTINE_OFFS (GSI_REG_BASE_OFFS + 0x0000055c)
+#define HWIO_GSI_UC_TLV_IN_ARGS_n_ADDR(n) (GSI_REG_BASE + 0x00000560 + \
+					   0x4 * (n))
+#define HWIO_GSI_UC_TLV_IN_ARGS_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00000560 + 0x4 * (n))
+#define HWIO_GSI_UC_TLV_IN_ARGS_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00000560 + 0x4 * (n))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k) (GSI_REG_BASE + 0x0000f000 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f000 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f000 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_RMSK 0xfff7ffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STARTED_FVAL 0x2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOPPED_FVAL 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOP_IN_PROC_FVAL 0x4
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ERROR_FVAL 0xf
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK 0x2000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT 0xd
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_INBOUND_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_OUTBOUND_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MHI_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XHCI_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_GPI_FVAL 0x2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XDCI_FVAL 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k) (GSI_REG_BASE + 0x0000f004 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f004 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f004 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k) (GSI_REG_BASE + 0x0000f008 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f008 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f008 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k) (GSI_REG_BASE + 0x0000f00c + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f00c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f00c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k) (GSI_REG_BASE + 0x0000f010 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f010 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f010 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k) (GSI_REG_BASE + 0x0000f014 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f014 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f014 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_5_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k) (GSI_REG_BASE + 0x0000f018 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f018 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f018 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_6_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k) (GSI_REG_BASE + 0x0000f01c + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f01c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f01c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_7_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k) (GSI_REG_BASE +	\
+							 0x0000f054 + \
+							 0x4000 * (n) +	\
+							 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_PHYS(n, \
+						  k) (GSI_REG_BASE_PHYS + \
+						      0x0000f054 + \
+						      0x4000 * (n) + \
+						      0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(n, \
+						  k) (GSI_REG_BASE_OFFS + \
+						      0x0000f054 + \
+						      0x4000 * (n) + \
+						      0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INMI2(n, k, \
+						   mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, \
+							  k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTMI2(n, k, mask,	\
+						    val) \
+	out_dword_masked_ns(HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR( \
+				    n, \
+				    k), mask, val, \
+			    HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, k) (GSI_REG_BASE + \
+							  0x0000f058 + \
+							  0x4000 * (n) + \
+							  0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_PHYS(n, \
+						   k) (GSI_REG_BASE_PHYS + \
+						       0x0000f058 + \
+						       0x4000 * (n) + \
+						       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(n, \
+						   k) (GSI_REG_BASE_OFFS + \
+						       0x0000f058 + \
+						       0x4000 * (n) + \
+						       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, \
+								       k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k) (GSI_REG_BASE + 0x0000f05c + \
+					   0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_QOS_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					   0x0000f05c + 0x4000 * (n) + \
+					   0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_QOS_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					   0x0000f05c + 0x4000 * (n) + \
+					   0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_QOS_RMSK 0xff3f0f
+#define HWIO_EE_n_GSI_CH_k_QOS_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_QOS_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_QOS_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_QOS_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_QOS_RMSK)
+#define HWIO_EE_n_GSI_CH_k_QOS_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_QOS_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_QOS_OUTMI2(n, k, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_QOS_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK 0xff0000
+#define HWIO_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_USE_PREFETCH_BUFS_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_ESCAPE_BUF_ONLY_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SMART_PRE_FETCH_FVAL 0x2
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_FREE_PRE_FETCH_FVAL 0x3
+#define HWIO_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define HWIO_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_ONE_PREFETCH_SEG_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_TWO_PREFETCH_SEG_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define HWIO_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f060 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f060 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f060 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f064 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f064 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f064 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f068 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f068 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f068 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f06c + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f06c + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f06c + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_ADDR(n, k) (GSI_REG_BASE + \
+							0x0000f070 + \
+							0x4000 * (n) + \
+							0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_PHYS(n, \
+						 k) (GSI_REG_BASE_PHYS + \
+						     0x0000f070 + 0x4000 * \
+						     (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_OFFS(n, \
+						 k) (GSI_REG_BASE_OFFS + \
+						     0x0000f070 + 0x4000 * \
+						     (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k) (GSI_REG_BASE + 0x00010000 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010000 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010000 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_RMSK 0xfff1ffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_MSI_FVAL 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_IRQ_FVAL 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_MHI_EV_FVAL 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XHCI_EV_FVAL 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_GPI_EV_FVAL 0x2
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XDCI_FVAL 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k) (GSI_REG_BASE + 0x00010004 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010004 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010004 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k) (GSI_REG_BASE + 0x00010008 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010008 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010008 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k) (GSI_REG_BASE + 0x0001000c + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x0001000c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x0001000c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k) (GSI_REG_BASE + 0x00010010 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010010 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010010 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k) (GSI_REG_BASE + 0x00010014 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010014 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010014 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_5_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k) (GSI_REG_BASE + 0x00010018 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010018 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010018 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_6_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k) (GSI_REG_BASE + 0x0001001c + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x0001001c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x0001001c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_7_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k) (GSI_REG_BASE + 0x00010020 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010020 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010020 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k) (GSI_REG_BASE + 0x00010024 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010024 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010024 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k) (GSI_REG_BASE + 0x00010028 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x00010028 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x00010028 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k) (GSI_REG_BASE + 0x0001002c + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0001002c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0001002c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k) (GSI_REG_BASE + 0x00010030 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x00010030 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x00010030 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k) (GSI_REG_BASE + 0x00010034 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x00010034 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x00010034 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k) (GSI_REG_BASE + \
+						0x00010048 + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						0x00010048 + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						0x00010048 + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_MAXk 19
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k) in_dword_masked(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k),	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_RMSK)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k),	\
+		mask)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_OUTI2(n, k, val) out_dword(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k),	\
+		val)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_OUTMI2(n, k, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, \
+						 k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k) (GSI_REG_BASE + \
+						0x0001004c + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						0x0001004c + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						0x0001004c + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_MAXk 19
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k) in_dword_masked(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k),	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_RMSK)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k),	\
+		mask)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_OUTI2(n, k, val) out_dword(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k),	\
+		val)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_OUTMI2(n, k, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, \
+						 k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_0_ADDR(n, k) (GSI_REG_BASE + \
+						  0x00011000 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						  0x00011000 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						  0x00011000 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_1_ADDR(n, k) (GSI_REG_BASE + \
+						  0x00011004 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						  0x00011004 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						  0x00011004 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_0_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x00011100 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x00011100 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x00011100 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_1_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x00011104 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x00011104 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x00011104 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_STATUS_ADDR(n) (GSI_REG_BASE + 0x00012000 + 0x4000 * \
+				      (n))
+#define HWIO_EE_n_GSI_STATUS_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012000 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_GSI_STATUS_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012000 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_GSI_STATUS_RMSK 0x1
+#define HWIO_EE_n_GSI_STATUS_MAXn 2
+#define HWIO_EE_n_GSI_STATUS_ATTR 0x1
+#define HWIO_EE_n_GSI_STATUS_INI(n) in_dword_masked( \
+		HWIO_EE_n_GSI_STATUS_ADDR(n), \
+		HWIO_EE_n_GSI_STATUS_RMSK)
+#define HWIO_EE_n_GSI_STATUS_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_STATUS_ADDR(n), \
+		mask)
+#define HWIO_EE_n_GSI_STATUS_ENABLED_BMSK 0x1
+#define HWIO_EE_n_GSI_STATUS_ENABLED_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x00012008 + 0x4000 * \
+				      (n))
+#define HWIO_EE_n_GSI_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012008 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_GSI_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012008 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_EV_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x00012010 + 0x4000 * \
+				     (n))
+#define HWIO_EE_n_EV_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012010 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_EV_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012010 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_GSI_EE_GENERIC_CMD_ADDR(n) (GSI_REG_BASE + 0x00012018 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_GSI_EE_GENERIC_CMD_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012018 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012018 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_0_ADDR(n) (GSI_REG_BASE + 0x00012038 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_0_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012038 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_0_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012038 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_1_ADDR(n) (GSI_REG_BASE + 0x0001203c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_1_PHYS(n) (GSI_REG_BASE_PHYS + 0x0001203c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_1_OFFS(n) (GSI_REG_BASE_OFFS + 0x0001203c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_2_ADDR(n) (GSI_REG_BASE + 0x00012040 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_2_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012040 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_2_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012040 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_SW_VERSION_ADDR(n) (GSI_REG_BASE + 0x00012044 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_SW_VERSION_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012044 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_SW_VERSION_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012044 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_MCS_CODE_VER_ADDR(n) (GSI_REG_BASE + 0x00012048 +	\
+					    0x4000 * (n))
+#define HWIO_EE_n_GSI_MCS_CODE_VER_PHYS(n) (GSI_REG_BASE_PHYS +	\
+					    0x00012048 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_MCS_CODE_VER_OFFS(n) (GSI_REG_BASE_OFFS +	\
+					    0x00012048 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_3_ADDR(n) (GSI_REG_BASE + 0x0001204c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_3_PHYS(n) (GSI_REG_BASE_PHYS + 0x0001204c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_3_OFFS(n) (GSI_REG_BASE_OFFS + 0x0001204c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_ADDR(n) (GSI_REG_BASE + 0x00012080 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012080 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012080 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_RMSK 0x7f
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INMI(n, mask) in_dword_masked(	\
+		HWIO_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n) (GSI_REG_BASE + 0x00012088 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012088 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012088 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK 0x7f
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK 0x10
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n) (GSI_REG_BASE + \
+						0x00012090 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+						0x00012090 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+						0x00012090 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n),	\
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n),	\
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n) (GSI_REG_BASE + 0x00012094 + \
+					       0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x00012094 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x00012094 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						    0x00012098 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x00012098 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x00012098 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK 0x7fffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTMI(n, mask, \
+						 val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
+	0x7fffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						   0x0001209c + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x0001209c + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x0001209c + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						    0x000120a0 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x000120a0 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x000120a0 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						   0x000120a4 + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x000120a4 + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x000120a4 + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n) (GSI_REG_BASE + 0x000120b0 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x000120b0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x000120b0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						  0x000120b8 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+						  0x000120b8 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+						  0x000120b8 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INMI(n, mask) in_dword_masked(	\
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTMI(n, mask,	\
+					       val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						  0x000120c0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+						  0x000120c0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+						  0x000120c0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ATTR 0x2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n) (GSI_REG_BASE + 0x00012100 + \
+					       0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x00012100 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x00012100 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK 0xf
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_MAXn 2
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ATTR 0x1
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK)
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_EN_ADDR(n) (GSI_REG_BASE + 0x00012108 + \
+					     0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_EN_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00012108 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00012108 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_CLR_ADDR(n) (GSI_REG_BASE + 0x00012110 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012110 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012110 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n) (GSI_REG_BASE + 0x00012118 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012118 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012118 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_RMSK 0xf
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_MAXn 2
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ATTR 0x1
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n), \
+		HWIO_EE_n_CNTXT_GSI_IRQ_STTS_RMSK)
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0
+#define HWIO_EE_n_CNTXT_GSI_IRQ_EN_ADDR(n) (GSI_REG_BASE + 0x00012120 +	\
+					    0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_EN_PHYS(n) (GSI_REG_BASE_PHYS +	\
+					    0x00012120 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) (GSI_REG_BASE_OFFS +	\
+					    0x00012120 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_CLR_ADDR(n) (GSI_REG_BASE + 0x00012128 + \
+					     0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00012128 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00012128 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_ADDR(n) (GSI_REG_BASE + 0x00012180 + \
+					0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012180 + \
+					0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012180 + \
+					0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_RMSK 0x1
+#define HWIO_EE_n_CNTXT_INTSET_MAXn 2
+#define HWIO_EE_n_CNTXT_INTSET_ATTR 0x3
+#define HWIO_EE_n_CNTXT_INTSET_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		HWIO_EE_n_CNTXT_INTSET_RMSK)
+#define HWIO_EE_n_CNTXT_INTSET_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		mask)
+#define HWIO_EE_n_CNTXT_INTSET_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		val)
+#define HWIO_EE_n_CNTXT_INTSET_OUTMI(n, mask, val) out_dword_masked_ns(	\
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_INTSET_INI(n))
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_MSI_FVAL 0x0
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_IRQ_FVAL 0x1
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n) (GSI_REG_BASE + 0x00012188 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012188 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012188 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_MAXn 2
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_ATTR 0x3
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_RMSK)
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_INI(n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n) (GSI_REG_BASE + 0x0001218c + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x0001218c + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x0001218c + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_MAXn 2
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_ATTR 0x3
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_RMSK)
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_INI(n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_EE_n_CNTXT_INT_VEC_ADDR(n) (GSI_REG_BASE + 0x00012190 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INT_VEC_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012190 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INT_VEC_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012190 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_ADDR(n) (GSI_REG_BASE + 0x00012200 + 0x4000 * \
+				     (n))
+#define HWIO_EE_n_ERROR_LOG_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012200 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012200 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_RMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_MAXn 2
+#define HWIO_EE_n_ERROR_LOG_ATTR 0x3
+#define HWIO_EE_n_ERROR_LOG_INI(n) in_dword_masked( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		HWIO_EE_n_ERROR_LOG_RMSK)
+#define HWIO_EE_n_ERROR_LOG_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		mask)
+#define HWIO_EE_n_ERROR_LOG_OUTI(n, val) out_dword( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		val)
+#define HWIO_EE_n_ERROR_LOG_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_EE_n_ERROR_LOG_INI(n))
+#define HWIO_EE_n_ERROR_LOG_ERROR_LOG_BMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_ERROR_LOG_SHFT 0x0
+#define HWIO_EE_n_ERROR_LOG_CLR_ADDR(n) (GSI_REG_BASE + 0x00012210 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_CLR_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012210 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_CLR_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012210 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_CLR_MAXn 2
+#define HWIO_EE_n_ERROR_LOG_CLR_ATTR 0x2
+#define HWIO_EE_n_ERROR_LOG_CLR_OUTI(n, val) out_dword(	\
+		HWIO_EE_n_ERROR_LOG_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_BMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n) (GSI_REG_BASE + 0x00012400 + \
+					   0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00012400 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00012400 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_0_MAXn 2
+#define HWIO_EE_n_CNTXT_SCRATCH_0_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SCRATCH_0_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		HWIO_EE_n_CNTXT_SCRATCH_0_RMSK)
+#define HWIO_EE_n_CNTXT_SCRATCH_0_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SCRATCH_0_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SCRATCH_0_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SCRATCH_0_INI(n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n) (GSI_REG_BASE + 0x00012404 + \
+					   0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00012404 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00012404 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_1_MAXn 2
+#define HWIO_EE_n_CNTXT_SCRATCH_1_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SCRATCH_1_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		HWIO_EE_n_CNTXT_SCRATCH_1_RMSK)
+#define HWIO_EE_n_CNTXT_SCRATCH_1_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SCRATCH_1_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SCRATCH_1_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SCRATCH_1_INI(n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_GSI_MCS_CFG_ADDR (GSI_REG_BASE + 0x0000b000)
+#define HWIO_GSI_MCS_CFG_PHYS (GSI_REG_BASE_PHYS + 0x0000b000)
+#define HWIO_GSI_MCS_CFG_OFFS (GSI_REG_BASE_OFFS + 0x0000b000)
+#define HWIO_GSI_TZ_FW_AUTH_LOCK_ADDR (GSI_REG_BASE + 0x0000b008)
+#define HWIO_GSI_TZ_FW_AUTH_LOCK_PHYS (GSI_REG_BASE_PHYS + 0x0000b008)
+#define HWIO_GSI_TZ_FW_AUTH_LOCK_OFFS (GSI_REG_BASE_OFFS + 0x0000b008)
+#define HWIO_GSI_MSA_FW_AUTH_LOCK_ADDR (GSI_REG_BASE + 0x0000b010)
+#define HWIO_GSI_MSA_FW_AUTH_LOCK_PHYS (GSI_REG_BASE_PHYS + 0x0000b010)
+#define HWIO_GSI_MSA_FW_AUTH_LOCK_OFFS (GSI_REG_BASE_OFFS + 0x0000b010)
+#define HWIO_GSI_SP_FW_AUTH_LOCK_ADDR (GSI_REG_BASE + 0x0000b018)
+#define HWIO_GSI_SP_FW_AUTH_LOCK_PHYS (GSI_REG_BASE_PHYS + 0x0000b018)
+#define HWIO_GSI_SP_FW_AUTH_LOCK_OFFS (GSI_REG_BASE_OFFS + 0x0000b018)
+#define HWIO_INTER_EE_n_ORIGINATOR_EE_ADDR(n) (GSI_REG_BASE + 0x0000c000 + \
+					       0x1000 * (n))
+#define HWIO_INTER_EE_n_ORIGINATOR_EE_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x0000c000 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_ORIGINATOR_EE_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x0000c000 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_GSI_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x0000c008 +	\
+					    0x1000 * (n))
+#define HWIO_INTER_EE_n_GSI_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS +	\
+					    0x0000c008 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_GSI_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS +	\
+					    0x0000c008 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_EV_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x0000c010 + \
+					   0x1000 * (n))
+#define HWIO_INTER_EE_n_EV_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x0000c010 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_EV_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x0000c010 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_ADDR(n) (GSI_REG_BASE + \
+						0x0000c018 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+						0x0000c018 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+						0x0000c018 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_ADDR(n) (GSI_REG_BASE + 0x0000c01c + \
+					       0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x0000c01c + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x0000c01c + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						    0x0000c020 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x0000c020 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x0000c020 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						   0x0000c024 + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x0000c024 + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x0000c024 + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						    0x0000c028 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x0000c028 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x0000c028 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						   0x0000c02c + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x0000c02c + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x0000c02c + 0x1000 * \
+						   (n))
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio_def.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio_def.h
new file mode 100644
index 0000000..efd0a2b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio_def.h
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_GSI_HWIO_DEF_H_)
+#define _GSI_HWIO_DEF_H_
+struct gsi_hwio_def_gsi_cfg_s {
+	u32	gsi_enable : 1;
+	u32	mcs_enable : 1;
+	u32	double_mcs_clk_freq : 1;
+	u32	uc_is_mcs : 1;
+	u32	gsi_pwr_clps : 1;
+	u32	bp_mtrix_disable : 1;
+	u32	reserved0 : 2;
+	u32	sleep_clk_div : 4;
+	u32	reserved1 : 20;
+};
+union gsi_hwio_def_gsi_cfg_u {
+	struct gsi_hwio_def_gsi_cfg_s	def;
+	u32				value;
+};
+struct gsi_hwio_def_gsi_ree_cfg_s {
+	u32	move_to_esc_clr_mode_trsh : 1;
+	u32	channel_empty_int_enable : 1;
+	u32	reserved0 : 6;
+	u32	max_burst_size : 8;
+	u32	reserved1 : 16;
+};
+union gsi_hwio_def_gsi_ree_cfg_u {
+	struct gsi_hwio_def_gsi_ree_cfg_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_manager_ee_qos_n_s {
+	u32	ee_prio : 2;
+	u32	reserved0 : 6;
+	u32	max_ch_alloc : 5;
+	u32	reserved1 : 3;
+	u32	max_ev_alloc : 5;
+	u32	reserved2 : 11;
+};
+union gsi_hwio_def_gsi_manager_ee_qos_n_u {
+	struct gsi_hwio_def_gsi_manager_ee_qos_n_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_shram_n_s {
+	u32 shram : 32;
+};
+union gsi_hwio_def_gsi_shram_n_u {
+	struct gsi_hwio_def_gsi_shram_n_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s {
+	u32	phy_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_u {
+	struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_gsi_test_bus_sel_s {
+	u32	gsi_testbus_sel : 8;
+	u32	reserved0 : 8;
+	u32	gsi_hw_events_sel : 4;
+	u32	reserved1 : 12;
+};
+union gsi_hwio_def_gsi_test_bus_sel_u {
+	struct gsi_hwio_def_gsi_test_bus_sel_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_test_bus_reg_s {
+	u32 gsi_testbus_reg : 32;
+};
+union gsi_hwio_def_gsi_test_bus_reg_u {
+	struct gsi_hwio_def_gsi_test_bus_reg_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_debug_countern_s {
+	u32	counter_value : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_gsi_debug_countern_u {
+	struct gsi_hwio_def_gsi_debug_countern_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_debug_sw_rf_n_read_s {
+	u32 rf_reg : 32;
+};
+union gsi_hwio_def_gsi_debug_sw_rf_n_read_u {
+	struct gsi_hwio_def_gsi_debug_sw_rf_n_read_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s {
+	u32	phy_ev_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_u {
+	struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s {
+	u32	chtype_protocol : 3;
+	u32	chtype_dir : 1;
+	u32	ee : 4;
+	u32	chid : 5;
+	u32	chtype_protocol_msb : 1;
+	u32	erindex : 5;
+	u32	reserved0 : 1;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s {
+	u32	read_ptr : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s {
+	u32	re_intr_db : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s {
+	u32	wrr_weight : 4;
+	u32	reserved0 : 4;
+	u32	max_prefetch : 1;
+	u32	use_db_eng : 1;
+	u32	prefetch_mode : 4;
+	u32	reserved1 : 2;
+	u32	empty_lvl_thrshold : 8;
+	u32	reserved2 : 8;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_qos_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s {
+	u32	chtype : 4;
+	u32	ee : 4;
+	u32	evchid : 8;
+	u32	intype : 1;
+	u32	reserved0 : 3;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s {
+	u32	int_modt : 16;
+	u32	int_modc : 8;
+	u32	int_mod_cnt : 8;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s {
+	u32 intvec : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s {
+	u32 msi_addr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s {
+	u32 msi_addr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s {
+	u32 rp_update_addr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s {
+	u32 rp_update_addr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_scratch_0_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_scratch_1_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_status_s {
+	u32	enabled : 1;
+	u32	reserved0 : 31;
+};
+union gsi_hwio_def_ee_n_gsi_status_u {
+	struct gsi_hwio_def_ee_n_gsi_status_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_cntxt_type_irq_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union gsi_hwio_def_ee_n_cntxt_type_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union gsi_hwio_def_ee_n_cntxt_type_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s {
+	u32	gsi_ch_bit_map_msk : 23;
+	u32	reserved0 : 9;
+};
+union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s {
+	u32	error_int : 1;
+	u32	gp_int1 : 1;
+	u32	gp_int2 : 1;
+	u32	gp_int3 : 1;
+	u32	reserved0 : 28;
+};
+union gsi_hwio_def_ee_n_cntxt_glob_irq_stts_u {
+	struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s {
+	u32	gsi_break_point : 1;
+	u32	gsi_bus_error : 1;
+	u32	gsi_cmd_fifo_ovrflow : 1;
+	u32	gsi_mcs_stack_ovrflow : 1;
+	u32	reserved0 : 28;
+};
+union gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_u {
+	struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_intset_s {
+	u32	intype : 1;
+	u32	reserved0 : 31;
+};
+union gsi_hwio_def_ee_n_cntxt_intset_u {
+	struct gsi_hwio_def_ee_n_cntxt_intset_s def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s {
+	u32 msi_addr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_msi_base_lsb_u {
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s {
+	u32 msi_addr_msb : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_msi_base_msb_u {
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_error_log_s {
+	u32 error_log : 32;
+};
+union gsi_hwio_def_ee_n_error_log_u {
+	struct gsi_hwio_def_ee_n_error_log_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_error_log_clr_s {
+	u32 error_log_clr : 32;
+};
+union gsi_hwio_def_ee_n_error_log_clr_u {
+	struct gsi_hwio_def_ee_n_error_log_clr_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_scratch_0_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_scratch_0_u {
+	struct gsi_hwio_def_ee_n_cntxt_scratch_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_scratch_1_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_scratch_1_u {
+	struct gsi_hwio_def_ee_n_cntxt_scratch_1_s	def;
+	u32						value;
+};
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_access_control.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_access_control.h
new file mode 100644
index 0000000..3fdb2ed
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_access_control.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_ACCESS_CONTROL_H_)
+#define _IPA_ACCESS_CONTROL_H_
+
+#include "ipa_reg_dump.h"
+
+/*
+ * The following is target specific.
+ */
+static struct reg_mem_access_map_t mem_access_map[] = {
+	/*------------------------------------------------------------*/
+	/*      Range               Use when              Use when    */
+	/*  Begin    End           SD_ENABLED           SD_DISABLED   */
+	/*------------------------------------------------------------*/
+	{ 0x04000, 0x05000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x1F000, 0x27000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x05000, 0x0f000, { &io_matrix[AA_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x0f000, 0x10000, { &io_matrix[NN_COMBO], &io_matrix[NN_COMBO] } },
+	{ 0x13000, 0x17000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x17000, 0x1b000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x1b000, 0x1f000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x10000, 0x11000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x11000, 0x12000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x12000, 0x13000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x43000, 0x44000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x44000, 0x45000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x45000, 0x47000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x40000, 0x42000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x42000, 0x43000, { &io_matrix[AA_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x50000, 0x60000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x60000, 0x80000, { &io_matrix[AN_COMBO], &io_matrix[NN_COMBO] } },
+	{ 0x80000, 0x81000, { &io_matrix[NN_COMBO], &io_matrix[NN_COMBO] } },
+	{ 0x81000, 0x83000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0xa0000, 0xc0000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0xc0000, 0xc2000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0xc2000, 0xd0000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+};
+
+#endif /* #if !defined(_IPA_ACCESS_CONTROL_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio.h
new file mode 100644
index 0000000..0adf6ad
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_GCC_HWIO_H_)
+#define _IPA_GCC_HWIO_H_
+/*
+ *
+ * HWIO register definitions to follow:
+ *
+ */
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio_def.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio_def.h
new file mode 100644
index 0000000..c841bac
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio_def.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_GCC_HWIO_DEF_H_)
+#define _IPA_GCC_HWIO_DEF_H_
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
new file mode 100644
index 0000000..7392102
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_HW_COMMON_EX_H_)
+#define _IPA_HW_COMMON_EX_H_
+
+/* VLVL defs are available for 854 */
+#define FEATURE_VLVL_DEFS                            true
+
+#define FEATURE_IPA_HW_VERSION_4_5                   true
+
+/* Important Platform Specific Values : IRQ_NUM, IRQ_CNT, BCR */
+#define IPA_HW_BAM_IRQ_NUM                           639
+
+/* Q6 IRQ number for IPA. */
+#define IPA_HW_IRQ_NUM                               640
+
+/* Total number of different interrupts that can be enabled */
+#define IPA_HW_IRQ_CNT_TOTAL                         23
+
+/* IPAv4 spare reg value */
+#define IPA_HW_SPARE_1_REG_VAL                       0xC0000005
+
+/* Whether to allow setting step mode on IPA when we crash or not */
+#define IPA_CFG_HW_IS_STEP_MODE_ALLOWED              (false)
+
+/* GSI MHI related definitions */
+#define IPA_HW_GSI_MHI_CONSUMER_CHANNEL_NUM          0x0
+#define IPA_HW_GSI_MHI_PRODUCER_CHANNEL_NUM          0x1
+
+#define IPA_HW_GSI_MHI_CONSUMER_EP_NUM               0x1
+#define IPA_HW_GSI_MHI_PRODUCER_EP_NUM               0x11
+
+/* IPA ZIP WA related Macros */
+#define IPA_HW_DCMP_SRC_PIPE                         0x8
+#define IPA_HW_DCMP_DEST_PIPE                        0x4
+#define IPA_HW_ACK_MNGR_MASK                         0x1D
+#define IPA_HW_DCMP_SRC_GRP                          0x5
+
+/* IPA Clock resource name */
+#define IPA_CLK_RESOURCE_NAME                        "/clk/pcnoc"
+
+/* IPA Clock Bus Client name */
+#define IPA_CLK_BUS_CLIENT_NAME                      "IPA_PCNOC_BUS_CLIENT"
+
+/* HPS Sequences */
+#define IPA_HW_PKT_PROCESS_HPS_DMA                      0x0
+#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_CIPHE         0x1
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_NO_DECIPH_UCP    0x2
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_UCP       0x3
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_NO_DECIPH      0x4
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_DECIPH         0x5
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_NO_DECIPH_NO_UCP 0x6
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_NO_UCP    0x7
+#define IPA_HW_PKT_PROCESS_HPS_DMA_PARSER               0x8
+#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_PARSER        0x9
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_NO_DECIPH  0xA
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_DECIPH     0xB
+#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_NO_DECIPH  0xC
+#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_DECIPH     0xD
+
+/* DPS Sequences */
+#define IPA_HW_PKT_PROCESS_DPS_DMA                      0x0
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECIPH          0x1
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECOMP          0x2
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_CIPH            0x3
+
+/* Src RSRC GRP config */
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_0           0x0B040803
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_1           0x0C0C0909
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_2           0x0E0E0909
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_3           0x3F003F00
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_4           0x10101616
+
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_0           0x01010101
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_1           0x02020202
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_2           0x04040404
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_3           0x3F003F00
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_4           0x02020606
+
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_0           0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_1           0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_2           0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_3           0x00003F00
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_4           0x00000000
+
+/* Dest RSRC GRP config */
+#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_0           0x05051010
+#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_1           0x3F013F02
+
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_0           0x02020202
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_1           0x02010201
+
+#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_0           0x00000000
+#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_1           0x00000200
+
+#define IPA_HW_RX_HPS_CLIENTS_MIN_DEPTH_0            0x03030303
+#define IPA_HW_RX_HPS_CLIENTS_MAX_DEPTH_0            0x03030303
+
+#define IPA_HW_RSRP_GRP_0                            0x0
+#define IPA_HW_RSRP_GRP_1                            0x1
+#define IPA_HW_RSRP_GRP_2                            0x2
+#define IPA_HW_RSRP_GRP_3                            0x3
+
+#define IPA_HW_PCIE_SRC_RSRP_GRP                     IPA_HW_RSRP_GRP_0
+#define IPA_HW_PCIE_DEST_RSRP_GRP                    IPA_HW_RSRP_GRP_0
+
+#define IPA_HW_DDR_SRC_RSRP_GRP                      IPA_HW_RSRP_GRP_1
+#define IPA_HW_DDR_DEST_RSRP_GRP                     IPA_HW_RSRP_GRP_1
+
+#define IPA_HW_DMA_SRC_RSRP_GRP                      IPA_HW_RSRP_GRP_2
+#define IPA_HW_DMA_DEST_RSRP_GRP                     IPA_HW_RSRP_GRP_2
+
+#define IPA_HW_SRC_RSRP_TYPE_MAX HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_MAXn
+#define IPA_HW_DST_RSRP_TYPE_MAX HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_MAXn
+
+#define GSI_HW_QSB_LOG_MISC_MAX 0x4
+
+/* IPA Clock Bus Client name */
+#define IPA_CLK_BUS_CLIENT_NAME                      "IPA_PCNOC_BUS_CLIENT"
+
+/* Is IPA decompression feature enabled */
+#define IPA_HW_IS_DECOMPRESSION_ENABLED              (1)
+
+/* Whether to allow setting step mode on IPA when we crash or not */
+#define IPA_HW_IS_STEP_MODE_ALLOWED                  (true)
+
+/* Max number of virtual pipes for UL QBAP provided by HW */
+#define IPA_HW_MAX_VP_NUM                             (32)
+
+/*
+ * HW specific clock vote freq values in KHz
+ * (BIMC/SNOC/PCNOC/IPA/Q6 CPU)
+ */
+enum ipa_hw_clk_freq_e {
+	/* BIMC */
+	IPA_HW_CLK_FREQ_BIMC_PEAK       = 518400,
+	IPA_HW_CLK_FREQ_BIMC_NOM_PLUS   = 404200,
+	IPA_HW_CLK_FREQ_BIMC_NOM        = 404200,
+	IPA_HW_CLK_FREQ_BIMC_SVS        = 100000,
+
+	/* PCNOC */
+	IPA_HW_CLK_FREQ_PCNOC_PEAK      = 133330,
+	IPA_HW_CLK_FREQ_PCNOC_NOM_PLUS  = 100000,
+	IPA_HW_CLK_FREQ_PCNOC_NOM       = 100000,
+	IPA_HW_CLK_FREQ_PCNOC_SVS       = 50000,
+
+	/*IPA_HW_CLK_SNOC*/
+	IPA_HW_CLK_FREQ_SNOC_PEAK       = 200000,
+	IPA_HW_CLK_FREQ_SNOC_NOM_PLUS   = 150000,
+	IPA_HW_CLK_FREQ_SNOC_NOM        = 150000,
+	IPA_HW_CLK_FREQ_SNOC_SVS        = 85000,
+	IPA_HW_CLK_FREQ_SNOC_SVS_2      = 50000,
+
+	/* IPA */
+	IPA_HW_CLK_FREQ_IPA_PEAK        = 600000,
+	IPA_HW_CLK_FREQ_IPA_NOM_PLUS    = 500000,
+	IPA_HW_CLK_FREQ_IPA_NOM         = 500000,
+	IPA_HW_CLK_FREQ_IPA_SVS         = 250000,
+	IPA_HW_CLK_FREQ_IPA_SVS_2       = 150000,
+
+	/* Q6 CPU */
+	IPA_HW_CLK_FREQ_Q6_PEAK         = 729600,
+	IPA_HW_CLK_FREQ_Q6_NOM_PLUS     = 729600,
+	IPA_HW_CLK_FREQ_Q6_NOM          = 729600,
+	IPA_HW_CLK_FREQ_Q6_SVS          = 729600,
+};
+
+enum ipa_hw_qtimer_gran_e {
+	IPA_HW_QTIMER_GRAN_0 = 0, /* granularity 0 is 10us */
+	IPA_HW_QTIMER_GRAN_1 = 1, /* granularity 1 is 100us */
+	IPA_HW_QTIMER_GRAN_MAX,
+};
+
+/* Pipe ID of all the IPA pipes */
+enum ipa_hw_pipe_id_e {
+	IPA_HW_PIPE_ID_0,
+	IPA_HW_PIPE_ID_1,
+	IPA_HW_PIPE_ID_2,
+	IPA_HW_PIPE_ID_3,
+	IPA_HW_PIPE_ID_4,
+	IPA_HW_PIPE_ID_5,
+	IPA_HW_PIPE_ID_6,
+	IPA_HW_PIPE_ID_7,
+	IPA_HW_PIPE_ID_8,
+	IPA_HW_PIPE_ID_9,
+	IPA_HW_PIPE_ID_10,
+	IPA_HW_PIPE_ID_11,
+	IPA_HW_PIPE_ID_12,
+	IPA_HW_PIPE_ID_13,
+	IPA_HW_PIPE_ID_14,
+	IPA_HW_PIPE_ID_15,
+	IPA_HW_PIPE_ID_16,
+	IPA_HW_PIPE_ID_17,
+	IPA_HW_PIPE_ID_18,
+	IPA_HW_PIPE_ID_19,
+	IPA_HW_PIPE_ID_20,
+	IPA_HW_PIPE_ID_21,
+	IPA_HW_PIPE_ID_22,
+	IPA_HW_PIPE_ID_23,
+	IPA_HW_PIPE_ID_24,
+	IPA_HW_PIPE_ID_25,
+	IPA_HW_PIPE_ID_26,
+	IPA_HW_PIPE_ID_27,
+	IPA_HW_PIPE_ID_28,
+	IPA_HW_PIPE_ID_29,
+	IPA_HW_PIPE_ID_30,
+	IPA_HW_PIPE_ID_MAX
+};
+
+/* Pipe ID's of System Bam Endpoints between Q6 & IPA */
+enum ipa_hw_q6_pipe_id_e {
+	/* Pipes used by IPA Q6 driver */
+	IPA_HW_Q6_DL_CONSUMER_PIPE_ID           = IPA_HW_PIPE_ID_5,
+	IPA_HW_Q6_CTL_CONSUMER_PIPE_ID          = IPA_HW_PIPE_ID_6,
+	IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_ID       = IPA_HW_PIPE_ID_8,
+
+	IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID   = IPA_HW_PIPE_ID_20,
+	IPA_HW_Q6_UL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_21,
+	IPA_HW_Q6_DL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_17,
+	IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_ID  = IPA_HW_PIPE_ID_18,
+	IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID  = IPA_HW_PIPE_ID_19,
+
+	IPA_HW_Q6_UL_ACK_PRODUCER_PIPE_ID  =
+	  IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID,
+	IPA_HW_Q6_UL_DATA_PRODUCER_PIPE_ID =
+	  IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID,
+
+	IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_ID    = IPA_HW_PIPE_ID_4,
+	IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_ID    = IPA_HW_PIPE_ID_29,
+
+	/* Test Simulator Pipes */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_ID     = IPA_HW_PIPE_ID_0,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_ID     = IPA_HW_PIPE_ID_1,
+
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_ID     = IPA_HW_PIPE_ID_3,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_10,
+
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_7,
+
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_DIAG_CONSUMER_PIPE_ID         = IPA_HW_PIPE_ID_9,
+
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_ID     = IPA_HW_PIPE_ID_23,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_24,
+
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_25,
+
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_26,
+
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_27,
+	IPA_HW_Q6_PIPE_ID_MAX                   = IPA_HW_PIPE_ID_MAX,
+};
+
+enum ipa_hw_q6_pipe_ch_id_e {
+	/* Channels used by IPA Q6 driver */
+	IPA_HW_Q6_DL_CONSUMER_PIPE_CH_ID                = 0,
+	IPA_HW_Q6_CTL_CONSUMER_PIPE_CH_ID               = 1,
+	IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_CH_ID            = 2,
+	IPA_HW_Q6_UL_ACC_PATH_ACK_PRODUCER_PIPE_CH_ID   = 6,
+	IPA_HW_Q6_UL_PRODUCER_PIPE_CH_ID                = 7,
+	IPA_HW_Q6_DL_PRODUCER_PIPE_CH_ID                = 3,
+	IPA_HW_Q6_UL_ACC_PATH_DATA_PRODUCER_PIPE_CH_ID  = 5,
+	IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_CH_ID       = 4,
+
+	IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_CH_ID         = 8,
+	IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_CH_ID         = 9,
+	/* CH_ID 8 and 9 are Q6 SPARE CONSUMERs */
+
+	/* Test Simulator Channels */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_CH_ID     = 10,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_CH_ID     = 11,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_CH_ID     = 12,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_CH_ID     = 13,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_CH_ID     = 14,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_CH_ID     = 15,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_CH_ID     = 16,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_CH_ID     = 17,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_CH_ID     = 18,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_CH_ID     = 19,
+};
+
+/* System Bam Endpoints between Q6 & IPA */
+enum ipa_hw_q6_pipe_e {
+	/* DL Pipe IPA->Q6 */
+	IPA_HW_Q6_DL_PRODUCER_PIPE = 0,
+	/* UL Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_PRODUCER_PIPE = 1,
+	/* DL Pipe Q6->IPA */
+	IPA_HW_Q6_DL_CONSUMER_PIPE = 2,
+	/* CTL Pipe Q6->IPA */
+	IPA_HW_Q6_CTL_CONSUMER_PIPE = 3,
+	/*  Q6 -> IPA,  DL NLO  */
+	IPA_HW_Q6_DL_NLO_CONSUMER_PIPE = 4,
+	/* DMA ASYNC CONSUMER */
+	IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE = 5,
+	/* DMA ASYNC PRODUCER */
+	IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE = 6,
+	/* UL Acc Path Data Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE = 7,
+	/* UL Acc Path ACK Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE = 8,
+	/* UL Acc Path QBAP status Pipe IPA->Q6 */
+	IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE = 9,
+	/* Diag status pipe IPA->Q6 */
+	/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
+	/* SIM Pipe IPA->Sim */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0 = 10,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1 = 11,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2 = 12,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0 = 13,
+	/* SIM B2B PROD Pipe  */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1 = 14,
+	/* SIM Pipe IPA->Sim */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2 = 15,
+	/* End FEATURE_IPA_TEST_PER_SIM */
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1 = 16,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1 = 17,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2 = 18,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2 = 19,
+
+	IPA_HW_Q6_PIPE_TOTAL
+};
+
+/* System Bam Endpoints between Q6 & IPA */
+enum ipa_hw_q6_gsi_ev_e { /* In Sdx24 0..11 */
+	/* DL Pipe IPA->Q6 */
+	IPA_HW_Q6_DL_PRODUCER_PIPE_GSI_EV = 0,
+	/* UL Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_PRODUCER_PIPE_GSI_EV = 1,
+	/* DL Pipe Q6->IPA */
+	//IPA_HW_Q6_DL_CONSUMER_PIPE_GSI_EV = 2,
+	/* CTL Pipe Q6->IPA */
+	//IPA_HW_Q6_CTL_CONSUMER_PIPE_GSI_EV = 3,
+	/*  Q6 -> IPA,  LTE DL Optimized path */
+	//IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_GSI_EV = 4,
+	/* LWA DL(Wifi to Q6) */
+	//IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_GSI_EV = 5,
+	/* Diag status pipe IPA->Q6 */
+	//IPA_HW_Q6_DIAG_STATUS_PRODUCER_PIPE_GSI_EV = 6,
+	/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
+	/* SIM Pipe IPA->Sim */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_GSI_EV = 2,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_GSI_EV = 3,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_GSI_EV = 4,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_1_GSI_EV = 5,
+	IPA_HW_Q6_SIM_2_GSI_EV = 6,
+	IPA_HW_Q6_SIM_3_GSI_EV = 7,
+	IPA_HW_Q6_SIM_4_GSI_EV = 8,
+
+	IPA_HW_Q6_PIPE_GSI_EV_TOTAL
+};
+
+/*
+ * All the IRQ's supported by the IPA HW. Use this enum to set IRQ_EN
+ * register and read IRQ_STTS register
+ */
+enum ipa_hw_irq_e {
+	IPA_HW_IRQ_GSI_HWP                     = (1 << 25),
+	IPA_HW_IRQ_GSI_IPA_IF_TLV_RCVD         = (1 << 24),
+	IPA_HW_IRQ_GSI_EE_IRQ                  = (1 << 23),
+	IPA_HW_IRQ_DCMP_ERR                    = (1 << 22),
+	IPA_HW_IRQ_HWP_ERR                     = (1 << 21),
+	IPA_HW_IRQ_RED_MARKER_ABOVE            = (1 << 20),
+	IPA_HW_IRQ_YELLOW_MARKER_ABOVE         = (1 << 19),
+	IPA_HW_IRQ_RED_MARKER_BELOW            = (1 << 18),
+	IPA_HW_IRQ_YELLOW_MARKER_BELOW         = (1 << 17),
+	IPA_HW_IRQ_BAM_IDLE_IRQ                = (1 << 16),
+	IPA_HW_IRQ_TX_HOLB_DROP                = (1 << 15),
+	IPA_HW_IRQ_TX_SUSPEND                  = (1 << 14),
+	IPA_HW_IRQ_PROC_ERR                    = (1 << 13),
+	IPA_HW_IRQ_STEP_MODE                   = (1 << 12),
+	IPA_HW_IRQ_TX_ERR                      = (1 << 11),
+	IPA_HW_IRQ_DEAGGR_ERR                  = (1 << 10),
+	IPA_HW_IRQ_RX_ERR                      = (1 << 9),
+	IPA_HW_IRQ_PROC_TO_HW_ACK_Q_NOT_EMPTY  = (1 << 8),
+	IPA_HW_IRQ_HWP_RX_CMD_Q_NOT_FULL       = (1 << 7),
+	IPA_HW_IRQ_HWP_IN_Q_NOT_EMPTY          = (1 << 6),
+	IPA_HW_IRQ_HWP_IRQ_3                   = (1 << 5),
+	IPA_HW_IRQ_HWP_IRQ_2                   = (1 << 4),
+	IPA_HW_IRQ_HWP_IRQ_1                   = (1 << 3),
+	IPA_HW_IRQ_HWP_IRQ_0                   = (1 << 2),
+	IPA_HW_IRQ_EOT_COAL                    = (1 << 1),
+	IPA_HW_IRQ_BAD_SNOC_ACCESS             = (1 << 0),
+	IPA_HW_IRQ_NONE                        = 0,
+	IPA_HW_IRQ_ALL                         = 0xFFFFFFFF
+};
+
+/*
+ * All the IRQ sources supported by the IPA HW. Use this enum to set
+ * IRQ_SRCS register
+ */
+enum ipa_hw_irq_srcs_e {
+	IPA_HW_IRQ_SRCS_PIPE_0  = (1 << IPA_HW_PIPE_ID_0),
+	IPA_HW_IRQ_SRCS_PIPE_1  = (1 << IPA_HW_PIPE_ID_1),
+	IPA_HW_IRQ_SRCS_PIPE_2  = (1 << IPA_HW_PIPE_ID_2),
+	IPA_HW_IRQ_SRCS_PIPE_3  = (1 << IPA_HW_PIPE_ID_3),
+	IPA_HW_IRQ_SRCS_PIPE_4  = (1 << IPA_HW_PIPE_ID_4),
+	IPA_HW_IRQ_SRCS_PIPE_5  = (1 << IPA_HW_PIPE_ID_5),
+	IPA_HW_IRQ_SRCS_PIPE_6  = (1 << IPA_HW_PIPE_ID_6),
+	IPA_HW_IRQ_SRCS_PIPE_7  = (1 << IPA_HW_PIPE_ID_7),
+	IPA_HW_IRQ_SRCS_PIPE_8  = (1 << IPA_HW_PIPE_ID_8),
+	IPA_HW_IRQ_SRCS_PIPE_9  = (1 << IPA_HW_PIPE_ID_9),
+	IPA_HW_IRQ_SRCS_PIPE_10 = (1 << IPA_HW_PIPE_ID_10),
+	IPA_HW_IRQ_SRCS_PIPE_11 = (1 << IPA_HW_PIPE_ID_11),
+	IPA_HW_IRQ_SRCS_PIPE_12 = (1 << IPA_HW_PIPE_ID_12),
+	IPA_HW_IRQ_SRCS_PIPE_13 = (1 << IPA_HW_PIPE_ID_13),
+	IPA_HW_IRQ_SRCS_PIPE_14 = (1 << IPA_HW_PIPE_ID_14),
+	IPA_HW_IRQ_SRCS_PIPE_15 = (1 << IPA_HW_PIPE_ID_15),
+	IPA_HW_IRQ_SRCS_PIPE_16 = (1 << IPA_HW_PIPE_ID_16),
+	IPA_HW_IRQ_SRCS_PIPE_17 = (1 << IPA_HW_PIPE_ID_17),
+	IPA_HW_IRQ_SRCS_PIPE_18 = (1 << IPA_HW_PIPE_ID_18),
+	IPA_HW_IRQ_SRCS_PIPE_19 = (1 << IPA_HW_PIPE_ID_19),
+	IPA_HW_IRQ_SRCS_PIPE_20 = (1 << IPA_HW_PIPE_ID_20),
+	IPA_HW_IRQ_SRCS_PIPE_21 = (1 << IPA_HW_PIPE_ID_21),
+	IPA_HW_IRQ_SRCS_PIPE_22 = (1 << IPA_HW_PIPE_ID_22),
+	IPA_HW_IRQ_SRCS_NONE    = 0,
+	IPA_HW_IRQ_SRCS_ALL     = 0xFFFFFFFF,
+};
+
+/*
+ * Total number of channel contexts that need to be saved for APPS
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          19
+
+/*
+ * Total number of channel contexts that need to be saved for UC
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC          2
+
+/*
+ * Total number of event ring contexts that need to be saved for APPS
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7         19
+
+/*
+ * Total number of event ring contexts that need to be saved for UC
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC         1
+
+/*
+ * Total number of endpoints for which ipa_reg_save.pipes[endp_number]
+ * are not saved by default (only if ipa_cfg.gen.full_reg_trace =
+ * true) There is no extra endpoints in Stingray
+ */
+#define IPA_HW_REG_SAVE_NUM_ENDP_EXTRA               0
+
+/*
+ * Total number of endpoints for which ipa_reg_save.pipes[endp_number]
+ * are always saved
+ */
+#define IPA_HW_REG_SAVE_NUM_ACTIVE_PIPES             IPA_HW_PIPE_ID_MAX
+
+/*
+ * SHRAM Bytes per ch
+ */
+#define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM         12
+
+/*
+ * Total number of rx splt cmdq's see:
+ * ipa_rx_splt_cmdq_n_cmd[IPA_RX_SPLT_CMDQ_MAX]
+ */
+#define IPA_RX_SPLT_CMDQ_MAX 4
+
+/*
+ * Macro to define a particular register cfg entry for all pipe
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(reg_name, var_name)	\
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.pipes[0].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.pipes[1].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.pipes[2].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.pipes[3].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.pipes[4].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
+		(u32 *)&ipa_reg_save.ipa.pipes[5].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
+		(u32 *)&ipa_reg_save.ipa.pipes[6].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
+		(u32 *)&ipa_reg_save.ipa.pipes[7].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 8), \
+		(u32 *)&ipa_reg_save.ipa.pipes[8].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 9), \
+		(u32 *)&ipa_reg_save.ipa.pipes[9].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 10), \
+		(u32 *)&ipa_reg_save.ipa.pipes[10].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 11), \
+		(u32 *)&ipa_reg_save.ipa.pipes[11].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 12), \
+		(u32 *)&ipa_reg_save.ipa.pipes[12].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 13), \
+		(u32 *)&ipa_reg_save.ipa.pipes[13].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 14), \
+		(u32 *)&ipa_reg_save.ipa.pipes[14].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 15), \
+		(u32 *)&ipa_reg_save.ipa.pipes[15].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 16), \
+		(u32 *)&ipa_reg_save.ipa.pipes[16].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 17), \
+		(u32 *)&ipa_reg_save.ipa.pipes[17].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 18), \
+		(u32 *)&ipa_reg_save.ipa.pipes[18].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 19), \
+		(u32 *)&ipa_reg_save.ipa.pipes[19].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 20), \
+		(u32 *)&ipa_reg_save.ipa.pipes[20].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 21), \
+		(u32 *)&ipa_reg_save.ipa.pipes[21].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 22), \
+		(u32 *)&ipa_reg_save.ipa.pipes[22].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 23), \
+		(u32 *)&ipa_reg_save.ipa.pipes[23].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 24), \
+		(u32 *)&ipa_reg_save.ipa.pipes[24].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 25), \
+		(u32 *)&ipa_reg_save.ipa.pipes[25].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 26), \
+		(u32 *)&ipa_reg_save.ipa.pipes[26].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 27), \
+		(u32 *)&ipa_reg_save.ipa.pipes[27].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 28), \
+		(u32 *)&ipa_reg_save.ipa.pipes[28].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 29), \
+		(u32 *)&ipa_reg_save.ipa.pipes[29].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 30), \
+		(u32 *)&ipa_reg_save.ipa.pipes[30].endp.var_name }
+
+/*
+ * Macro to define a particular register cfg entry for the remaining
+ * pipe indexed register.  In Stingray case we don't have extra
+ * endpoints so it is intentially empty
+ */
+#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(REG_NAME, VAR_NAME)  \
+	{ 0, 0 }
+
+/*
+ * Macro to set the active flag for all active pipe indexed register
+ * In Stingray case we don't have extra endpoints so it is intentially
+ * empty
+ */
+#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA_ACTIVE()  \
+	do { \
+	} while (0)
+
+#endif /* #if !defined(_IPA_HW_COMMON_EX_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
new file mode 100644
index 0000000..56b0713
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
@@ -0,0 +1,10813 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_HWIO_H_)
+#define _IPA_HWIO_H_
+/*
+ *
+ * HWIO register definitions to follow:
+ *
+ */
+#define IPA_GSI_TOP_GSI_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00004000)
+#define IPA_GSI_TOP_GSI_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + \
+				       0x00004000)
+#define IPA_GSI_TOP_GSI_REG_BASE_OFFS 0x00004000
+#define HWIO_IPA_GSI_TOP_GSI_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+				       0x00000000)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+				       0x00000000)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+				       0x00000000)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_RMSK 0xf3f
+#define HWIO_IPA_GSI_TOP_GSI_CFG_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_CFG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_CFG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_CFG_IN)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00
+#define HWIO_IPA_GSI_TOP_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_GSI_CFG_UC_IS_MCS_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_GSI_CFG_UC_IS_MCS_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_GSI_CFG_MCS_ENABLE_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_GSI_CFG_MCS_ENABLE_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_ENABLE_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_MCS_CODE_VER_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000008)
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_MCS_CODE_VER_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000008)
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_MCS_CODE_VER_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000008)
+#define HWIO_IPA_GSI_TOP_GSI_ZEROS_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					 0x00000010)
+#define HWIO_IPA_GSI_TOP_GSI_ZEROS_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					 0x00000010)
+#define HWIO_IPA_GSI_TOP_GSI_ZEROS_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					 0x00000010)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000018)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000018)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000018)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000001c)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000001c)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_PENDING_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000020)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_PENDING_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000020)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_PENDING_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000020)
+#define HWIO_IPA_GSI_TOP_GSI_MOQA_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					    0x00000030)
+#define HWIO_IPA_GSI_TOP_GSI_MOQA_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS \
+					    + 0x00000030)
+#define HWIO_IPA_GSI_TOP_GSI_MOQA_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS \
+					    + 0x00000030)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					   0x00000038)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					   0x00000038)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					   0x00000038)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_RMSK 0xff03
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_IN)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MAX_BURST_SIZE_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MAX_BURST_SIZE_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_CGC_CTRL_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					    0x00000060)
+#define HWIO_IPA_GSI_TOP_GSI_CGC_CTRL_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS \
+					    + 0x00000060)
+#define HWIO_IPA_GSI_TOP_GSI_CGC_CTRL_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS \
+					    + 0x00000060)
+#define HWIO_IPA_GSI_TOP_GSI_MSI_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000080)
+#define HWIO_IPA_GSI_TOP_GSI_MSI_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000080)
+#define HWIO_IPA_GSI_TOP_GSI_MSI_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000080)
+#define HWIO_IPA_GSI_TOP_GSI_EVENT_CACHEATTR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000084)
+#define HWIO_IPA_GSI_TOP_GSI_EVENT_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000084)
+#define HWIO_IPA_GSI_TOP_GSI_EVENT_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000084)
+#define HWIO_IPA_GSI_TOP_GSI_DATA_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000088)
+#define HWIO_IPA_GSI_TOP_GSI_DATA_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000088)
+#define HWIO_IPA_GSI_TOP_GSI_DATA_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000088)
+#define HWIO_IPA_GSI_TOP_GSI_TRE_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000090)
+#define HWIO_IPA_GSI_TOP_GSI_TRE_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000090)
+#define HWIO_IPA_GSI_TOP_GSI_TRE_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000a0)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000a0)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000a0)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000a4)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000a4)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000a4)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_LSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000a8)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_LSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000a8)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_LSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000a8)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_MSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000ac)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_MSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000ac)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_MSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000ac)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000b0)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000b0)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000b0)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000b4)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000b4)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000b4)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000b8)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000b8)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000b8)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000bc)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000bc)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000bc)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000c0)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000c0)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000c0)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000c4)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000c4)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000c4)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_LSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000c8)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_LSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000c8)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_LSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000c8)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_MSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000cc)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_MSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000cc)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_MSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000cc)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000d0)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000d0)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000d4)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000d4)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000d4)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000d8)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000d8)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000d8)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000dc)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000dc)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000dc)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000e0)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000e0)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000e0)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000e4)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000e4)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000e4)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000e8)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000e8)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000e8)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000ec)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000ec)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000ec)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000f0)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000f0)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000f0)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000f4)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000f4)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000f4)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_REE_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000100)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_REE_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000100)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_REE_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000100)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_EVT_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000104)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_EVT_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000104)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_EVT_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000104)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_INT_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000108)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_INT_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000108)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_INT_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000108)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_CSR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x0000010c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_CSR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000010c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_CSR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000010c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TLV_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000110)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TLV_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000110)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TLV_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000110)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TIMER_ENG_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000114)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TIMER_ENG_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000114)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TIMER_ENG_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000114)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_DB_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000118)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_DB_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_DB_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_RD_WR_ENG_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000011c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_RD_WR_ENG_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_RD_WR_ENG_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_UCONTROLLER_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000120)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_UCONTROLLER_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000120)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_UCONTROLLER_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000120)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_SDMA_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000124)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_SDMA_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000124)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_SDMA_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000124)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					    0x0000003c)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS \
+					    + 0x0000003c)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS \
+					    + 0x0000003c)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000094)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000094)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000094)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_LSB_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000140 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_LSB_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000140 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_LSB_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000140 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_MSB_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000144 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_MSB_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000144 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_MSB_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000144 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_EE_QOS_n_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000300 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_EE_QOS_n_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000300 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_EE_QOS_n_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000300 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000200)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000200)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000200)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OUTM(m, \
+							       v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000204)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000204)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000204)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OUTM(m, \
+							       v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000208)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000208)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000208)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OUTM(m, \
+								 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000020c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000020c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000020c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OUTM(m, \
+								 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000240)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000240)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000240)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OUTM(m, \
+								v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000244)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000244)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000244)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OUTM(m, \
+								 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000248)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000248)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000248)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000024c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000024c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000024c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000250)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000250)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000250)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000254)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000254)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000254)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000258)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000258)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000258)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000025c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000025c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000025c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000260)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000260)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000260)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000264)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000264)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000264)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000400)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000400)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000400)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_OUTM(m, \
+						  v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000404)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000404)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000404)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_OUTM(m, \
+							  v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TLV_CH_NOT_FULL_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000408)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TLV_CH_NOT_FULL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000408)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000408)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000418)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000418)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000418)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x0000041c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000041c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000041c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000420)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000420)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000420)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_OUTM(m, \
+						  v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000424)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000424)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000424)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_OUTM(m, \
+						       v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000428)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000428)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000428)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_OUTM(m, \
+						    v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000042c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000042c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000042c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_OUTM(m, \
+							  v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000430)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000430)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000430)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000434)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000434)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000434)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000438)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000438)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000438)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000043c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000043c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000043c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_OUTM(m, \
+							 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000440)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000440)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000440)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_OUTM(m, \
+							  v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000444)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000444)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000444)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_OUTM(m, \
+							 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000448)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000448)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000448)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_INM(m) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_OUTM(m,	\
+						     v)	\
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+			    m, \
+			    v, \
+			    HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000044c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000044c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000044c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUTM(m, \
+							   v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_SDMA_INT_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000450 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_SDMA_INT_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000450 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_SDMA_INT_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000450 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x0001b000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001b000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001b000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_MAXn 8191
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INMI(n, mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTI(n, val) \
+	out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTMI(n, mask, \
+					      val) out_dword_masked_ns(	\
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE + \
+					      0x00002000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00002000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00002000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_MAXn 1343
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_INI(n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_SHRAM_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_SHRAM_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, \
+							 k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00003800 + 0x80 * (n) + 0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_PHYS(n, \
+							 k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00003800 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(n, \
+							 k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00003800 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK 0x3f
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXn 2
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXk 22
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, \
+							 k) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR( \
+				n, \
+				k), \
+			HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_INMI2(n, k,	\
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTI2(n, k,	\
+							  val) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, \
+								 k), \
+		val)
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTMI2(n, k, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k),	\
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+						0x00001000)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001000)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001000)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_RMSK 0xf00ff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_IN)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_BMSK 0xf0000
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_BMSK 0xff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_ZEROS_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_0_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_1_FVAL 0x2
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_2_FVAL 0x3
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_3_FVAL 0x4
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_4_FVAL 0x5
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_DB_ENG_FVAL 0x9
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_0_FVAL 0xb
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_1_FVAL 0xc
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_2_FVAL 0xd
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_3_FVAL 0xe
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_4_FVAL 0xf
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_5_FVAL 0x10
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_6_FVAL 0x11
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_7_FVAL 0x12
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_0_FVAL 0x13
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_1_FVAL 0x14
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_2_FVAL 0x15
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_3_FVAL 0x16
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_4_FVAL 0x17
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_5_FVAL 0x18
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_0_FVAL 0x1b
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_1_FVAL 0x1c
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_2_FVAL 0x1d
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_0_FVAL 0x1f
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_1_FVAL 0x20
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_2_FVAL 0x21
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_3_FVAL 0x22
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_4_FVAL 0x23
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_0_FVAL 0x27
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_1_FVAL 0x28
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_2_FVAL 0x29
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_3_FVAL 0x2a
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_0_FVAL 0x2b
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_1_FVAL 0x2c
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_2_FVAL 0x2d
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_3_FVAL 0x2e
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_0_FVAL \
+	0x33
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_1_FVAL \
+	0x34
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_2_FVAL \
+	0x35
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_3_FVAL \
+	0x36
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_FVAL 0x3a
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SDMA_0_FVAL 0x3c
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SMDA_1_FVAL 0x3d
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_1_FVAL 0x3e
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_2_FVAL 0x3f
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_5_FVAL 0x40
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_5_FVAL 0x41
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_3_FVAL 0x42
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TLV_0_FVAL 0x43
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_8_FVAL 0x44
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+						0x00001008)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001008)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001008)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00001010)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001010)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001010)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RMSK 0x1fff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_SDMA_BUSY_BMSK 0x1000
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_SDMA_BUSY_SHFT 0xc
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_IC_BUSY_BMSK 0x800
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_IC_BUSY_SHFT 0xb
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_UC_BUSY_BMSK 0x400
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_UC_BUSY_SHFT 0xa
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DBG_CNT_BUSY_BMSK 0x200
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DBG_CNT_BUSY_SHFT 0x9
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DB_ENG_BUSY_BMSK 0x100
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DB_ENG_BUSY_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_BMSK 0x10
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_TIMER_BUSY_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_TIMER_BUSY_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_MCS_BUSY_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_MCS_BUSY_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_BUSY_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_BUSY_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_CSR_BUSY_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_CSR_BUSY_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001014)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001014)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001014)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_CHID_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_CHID_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001018)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001018)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001018)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_CHID_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_CHID_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000101c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000101c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000101c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_CHID_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_CHID_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTER_CFGn_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001200 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTER_CFGn_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001200 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTER_CFGn_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001200 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001240 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001240 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001240 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_MAXn 7
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_COUNTER_VALUE_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_COUNTER_VALUE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001040)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001040)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001040)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_OUT(v) out_dword(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_OUTM(m, \
+						   v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IN)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00001044)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001044)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001044)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_RMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_IN)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_MCS_STALL_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_MCS_STALL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001048)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001048)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001048)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_INM(m) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_SEL_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001050)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_SEL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001050)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_SEL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001050)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_CLR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001058)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_CLR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001058)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_CLR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001058)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001060)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001060)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001060)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_RMSK 0x1ffff01
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_BMSK \
+	0x1000000
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_BMSK \
+	0xff0000
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_0_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001064)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_0_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001064)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_0_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001064)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_1_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001068)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_1_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001068)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_1_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001068)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_2_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000106c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_2_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000106c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_2_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000106c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001070 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001070 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001070 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_WRITE_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001080 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_WRITE_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001080 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_WRITE_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001080 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001100 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001100 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001100 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_MAXn 31
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_INI(n) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_INMI(n,	\
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RF_REG_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RF_REG_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_CH_k_VP_TABLE_ADDR(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001400 + 0x80 * (n) + 0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHYS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001400 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001400 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001600 + 0x80 * (n) + 0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHYS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001600 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001600 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK 0x3f
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXn 3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXk 19
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INI2(n, \
+							   k) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INMI2(n, k, \
+							    mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_BMSK 0x1f
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SDMA_TRANS_DB_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001800 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SDMA_TRANS_DB_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001800 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SDMA_TRANS_DB_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001800 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					      0x00000500)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000500)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000500)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_MSK_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000504)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_MSK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000504)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_MSK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000504)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_CLR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000508)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_CLR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000508)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_CLR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000508)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ARGS_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x0000050c + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ARGS_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000050c + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ARGS_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000050c + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ROUTINE_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					       0x00000524)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ROUTINE_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000524)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ROUTINE_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000524)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_GO_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					  0x00000528)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_GO_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					  0x00000528)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_GO_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					  0x00000528)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_STTS_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000052c)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_STTS_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000052c)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_STTS_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000052c)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000530)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000530)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000530)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000534)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000534)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000534)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_VLD_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000538)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_VLD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000538)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_VLD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000538)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_PC_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x0000053c)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_PC_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000053c)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_PC_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000053c)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_ARGS_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000540 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_ARGS_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000540 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_ARGS_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000540 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_VLD_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000558)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_VLD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000558)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_VLD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000558)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ROUTINE_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000055c)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ROUTINE_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000055c)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ROUTINE_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000055c)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ARGS_n_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000560 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ARGS_n_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000560 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ARGS_n_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000560 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f000 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_RMSK 0xfff7ffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK \
+	0xff000000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STARTED_FVAL 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOPPED_FVAL 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOP_IN_PROC_FVAL \
+	0x4
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ERROR_FVAL 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK	\
+	0x2000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT	\
+	0xd
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_INBOUND_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_OUTBOUND_FVAL	\
+	0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MHI_FVAL	\
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XHCI_FVAL \
+	0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_GPI_FVAL	\
+	0x2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XDCI_FVAL \
+	0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f004 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f008 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f00c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f00c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f00c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f010 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f014 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f018 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f01c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f01c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f01c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, \
+							      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f054 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_PHYS(n, \
+							      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f054 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(n, \
+							      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f054 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, \
+							      k) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INMI2(n, k, \
+							       mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTI2(n, k, \
+							       val) \
+	out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTMI2(n, \
+								k, \
+								mask, \
+								val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+							       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f058 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_PHYS(n, \
+							       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f058 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(n, \
+							       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f058 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, \
+							       k) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INMI2(n, k, \
+								mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTI2(n, k, \
+								val) \
+	out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTMI2(n, \
+								 k, \
+								 mask, \
+								 val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, \
+						k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f05c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PHYS(n, \
+						k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f05c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_OFFS(n, \
+						k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f05c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_RMSK 0xff3f0f
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_INMI2(n, k, \
+						 mask) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, \
+							k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_OUTMI2(n, k, mask, \
+						  val) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, \
+							k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK \
+	0xff0000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_ESCAPE_BUF_ONLY_FVAL \
+	0x1
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SMART_PRE_FETCH_FVAL \
+	0x2
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_FREE_PRE_FETCH_FVAL \
+	0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_ONE_PREFETCH_SEG_FVAL \
+	0x0
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_TWO_PREFETCH_SEG_FVAL \
+	0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f060 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f060 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f060 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f064 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f064 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f064 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f068 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f068 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f068 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f06c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f06c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f06c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_ADDR(n,	\
+							     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f070 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_PHYS(n,	\
+							     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f070 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_OFFS(n,	\
+							     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f070 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010000 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_RMSK 0xfff1ffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_MSI_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_IRQ_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_MHI_EV_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XHCI_EV_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_GPI_EV_FVAL 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XDCI_FVAL 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010004 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010008 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001000c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001000c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001000c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010010 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010014 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010018 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001001c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001001c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001001c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010020 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010020 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010020 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010024 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010024 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010024 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010028 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010028 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010028 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001002c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001002c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001002c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010030 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010030 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010030 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010034 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010034 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010034 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010048 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_PHYS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010048 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_OFFS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010048 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_INMI2(n, k, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_OUTMI2(n, k, mask, \
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001004c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_PHYS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001004c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_OFFS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001004c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_INMI2(n, k, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_OUTMI2(n, k, mask, \
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_0_ADDR(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011000 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_0_PHYS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011000 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_0_OFFS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011000 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_1_ADDR(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011004 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_1_PHYS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011004 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_1_OFFS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011004 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_0_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011100 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_0_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011100 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_0_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011100 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_1_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011104 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_1_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011104 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_1_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011104 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00012000 + 0x4000 * \
+						  (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012000 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012000 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_RMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ENABLED_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ENABLED_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_CMD_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00012008 + 0x4000 * \
+						  (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012008 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012008 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_CMD_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00012010 + 0x4000 * \
+						 (n))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012010 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012010 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_EE_GENERIC_CMD_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012018 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_EE_GENERIC_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012018 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012018 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_0_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012038 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_0_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012038 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_0_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012038 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_1_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001203c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_1_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001203c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_1_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001203c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_2_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012040 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_2_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012040 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_2_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_SW_VERSION_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012044 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_SW_VERSION_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012044 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_SW_VERSION_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012044 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_MCS_CODE_VER_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012048 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_MCS_CODE_VER_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012048 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_MCS_CODE_VER_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012048 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_3_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001204c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_3_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001204c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_3_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001204c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012080 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012080 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012080 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_RMSK 0x7f
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012088 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012088 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012088 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK 0x7f
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_OUTMI(n, mask,	\
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK \
+	0x20
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK \
+	0x10
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012090 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012090 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012090 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012094 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012094 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012094 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_INMI(n, \
+						       mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012098 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012098 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012098 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK 0x7fffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR( \
+			n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INMI(n, \
+							    mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTI(n, \
+							    val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTMI(n, mask, \
+							     val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n))
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
+	0x7fffff
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001209c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001209c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001209c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK 0xfffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR( \
+				n), \
+			HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INMI(n, \
+							   mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTI(n, \
+							   val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTMI(n, mask, \
+							    val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n))
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
+	0xfffff
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120a0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120a0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120a0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OUTI(n, \
+							    val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120a4 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120a4 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120a4 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OUTI(n, \
+							   val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120b0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120b0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120b0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120b8 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120b8 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120b8 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK 0xfffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR( \
+				n), \
+			HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n))
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
+	0xfffff
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120c0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120c0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120c0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK	\
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT	\
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012100 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012100 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012100 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_INMI(n, \
+						       mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_EN_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012108 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_EN_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012108 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012108 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012110 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012110 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012110 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012118 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012118 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012118 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_RMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ADDR( \
+				n), \
+			mask)
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK \
+	0x8
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT \
+	0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK \
+	0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT \
+	0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_EN_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012120 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_EN_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012120 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012120 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012128 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012128 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012128 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012180 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012180 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012180 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_RMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_OUTMI(n, mask, \
+						 val) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_MSI_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_IRQ_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012188 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012188 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012188 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_OUTMI(n, mask,	\
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001218c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001218c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001218c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_OUTMI(n, mask,	\
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INT_VEC_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012190 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INT_VEC_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012190 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INT_VEC_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012190 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00012200 + 0x4000 * \
+						 (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012200 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012200 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_INI(n) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_OUTI(n, val) out_dword(	\
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_OUTMI(n, mask, \
+					      val) out_dword_masked_ns(	\
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ERROR_LOG_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ERROR_LOG_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012210 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012210 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012210 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012400 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012400 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012400 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_INMI(n, \
+						   mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_OUTMI(n, mask, \
+						    val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(	\
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012404 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012404 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012404 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_INMI(n, \
+						   mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_OUTMI(n, mask, \
+						    val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(	\
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_MCS_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					   0x0000b000)
+#define HWIO_IPA_GSI_TOP_GSI_MCS_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					   0x0000b000)
+#define HWIO_IPA_GSI_TOP_GSI_MCS_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					   0x0000b000)
+#define HWIO_IPA_GSI_TOP_GSI_TZ_FW_AUTH_LOCK_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000b008)
+#define HWIO_IPA_GSI_TOP_GSI_TZ_FW_AUTH_LOCK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000b008)
+#define HWIO_IPA_GSI_TOP_GSI_TZ_FW_AUTH_LOCK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000b008)
+#define HWIO_IPA_GSI_TOP_GSI_MSA_FW_AUTH_LOCK_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000b010)
+#define HWIO_IPA_GSI_TOP_GSI_MSA_FW_AUTH_LOCK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000b010)
+#define HWIO_IPA_GSI_TOP_GSI_MSA_FW_AUTH_LOCK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000b010)
+#define HWIO_IPA_GSI_TOP_GSI_SP_FW_AUTH_LOCK_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000b018)
+#define HWIO_IPA_GSI_TOP_GSI_SP_FW_AUTH_LOCK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000b018)
+#define HWIO_IPA_GSI_TOP_GSI_SP_FW_AUTH_LOCK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000b018)
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_ORIGINATOR_EE_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c000 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_ORIGINATOR_EE_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c000 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_ORIGINATOR_EE_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c000 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_GSI_CH_CMD_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c008 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_GSI_CH_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c008 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_GSI_CH_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c008 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_EV_CH_CMD_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c010 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_EV_CH_CMD_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c010 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_EV_CH_CMD_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c010 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c018 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c018 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c01c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c01c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c020 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c020 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c020 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_MSK_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c024 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_MSK_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c024 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c024 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c028 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c028 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_CLR_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c02c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_CLR_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c02c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n))
+#define IPA_CFG_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00040000)
+#define IPA_CFG_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00040000)
+#define IPA_CFG_REG_BASE_OFFS 0x00040000
+#define HWIO_IPA_COMP_HW_VERSION_ADDR (IPA_CFG_REG_BASE + 0x00000030)
+#define HWIO_IPA_COMP_HW_VERSION_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000030)
+#define HWIO_IPA_COMP_HW_VERSION_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000030)
+#define HWIO_IPA_COMP_HW_VERSION_RMSK 0xffffffff
+#define HWIO_IPA_COMP_HW_VERSION_ATTR 0x1
+#define HWIO_IPA_COMP_HW_VERSION_IN in_dword_masked( \
+		HWIO_IPA_COMP_HW_VERSION_ADDR, \
+		HWIO_IPA_COMP_HW_VERSION_RMSK)
+#define HWIO_IPA_COMP_HW_VERSION_INM(m) in_dword_masked( \
+		HWIO_IPA_COMP_HW_VERSION_ADDR, \
+		m)
+#define HWIO_IPA_COMP_HW_VERSION_MAJOR_BMSK 0xf0000000
+#define HWIO_IPA_COMP_HW_VERSION_MAJOR_SHFT 0x1c
+#define HWIO_IPA_COMP_HW_VERSION_MINOR_BMSK 0xfff0000
+#define HWIO_IPA_COMP_HW_VERSION_MINOR_SHFT 0x10
+#define HWIO_IPA_COMP_HW_VERSION_STEP_BMSK 0xffff
+#define HWIO_IPA_COMP_HW_VERSION_STEP_SHFT 0x0
+#define HWIO_IPA_VERSION_ADDR (IPA_CFG_REG_BASE + 0x00000034)
+#define HWIO_IPA_VERSION_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000034)
+#define HWIO_IPA_VERSION_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000034)
+#define HWIO_IPA_ENABLED_PIPES_ADDR (IPA_CFG_REG_BASE + 0x00000038)
+#define HWIO_IPA_ENABLED_PIPES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000038)
+#define HWIO_IPA_ENABLED_PIPES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000038)
+#define HWIO_IPA_COMP_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000003c)
+#define HWIO_IPA_COMP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_IPA_COMP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000003c)
+#define HWIO_IPA_COMP_CFG_RMSK 0x3fffee
+#define HWIO_IPA_COMP_CFG_ATTR 0x3
+#define HWIO_IPA_COMP_CFG_IN in_dword_masked(HWIO_IPA_COMP_CFG_ADDR, \
+					     HWIO_IPA_COMP_CFG_RMSK)
+#define HWIO_IPA_COMP_CFG_INM(m) in_dword_masked(HWIO_IPA_COMP_CFG_ADDR, m)
+#define HWIO_IPA_COMP_CFG_OUT(v) out_dword(HWIO_IPA_COMP_CFG_ADDR, v)
+#define HWIO_IPA_COMP_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_COMP_CFG_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_COMP_CFG_IN)
+#define HWIO_IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK 0x200000
+#define HWIO_IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT 0x15
+#define HWIO_IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK 0x1e0000
+#define HWIO_IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT 0x11
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK 0x10000
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT 0x10
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK 0x8000
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT 0xf
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK \
+	0x4000
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT 0xe
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK \
+	0x2000
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT \
+	0xd
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK 0x1000
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT 0xc
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK 0x800
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT 0xb
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK 0x400
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT 0xa
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK 0x200
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT 0x9
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK 0x100
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT 0x8
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK 0x80
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT 0x7
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK 0x40
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT 0x6
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK 0x20
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT 0x5
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK 0x8
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT 0x3
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK 0x4
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT 0x2
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK 0x2
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT 0x1
+#define HWIO_IPA_CLKON_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000044)
+#define HWIO_IPA_CLKON_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000044)
+#define HWIO_IPA_CLKON_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000044)
+#define HWIO_IPA_ROUTE_ADDR (IPA_CFG_REG_BASE + 0x00000048)
+#define HWIO_IPA_ROUTE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000048)
+#define HWIO_IPA_ROUTE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000048)
+#define HWIO_IPA_ROUTE_RMSK 0x13fffff
+#define HWIO_IPA_ROUTE_ATTR 0x3
+#define HWIO_IPA_ROUTE_IN in_dword_masked(HWIO_IPA_ROUTE_ADDR, \
+					  HWIO_IPA_ROUTE_RMSK)
+#define HWIO_IPA_ROUTE_INM(m) in_dword_masked(HWIO_IPA_ROUTE_ADDR, m)
+#define HWIO_IPA_ROUTE_OUT(v) out_dword(HWIO_IPA_ROUTE_ADDR, v)
+#define HWIO_IPA_ROUTE_OUTM(m, v) out_dword_masked_ns(HWIO_IPA_ROUTE_ADDR, \
+						      m, \
+						      v, \
+						      HWIO_IPA_ROUTE_IN)
+#define HWIO_IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000
+#define HWIO_IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
+#define HWIO_IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define HWIO_IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define HWIO_IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define HWIO_IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define HWIO_IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define HWIO_IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define HWIO_IPA_FILTER_ADDR (IPA_CFG_REG_BASE + 0x0000004c)
+#define HWIO_IPA_FILTER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000004c)
+#define HWIO_IPA_FILTER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000004c)
+#define HWIO_IPA_MASTER_PRIORITY_ADDR (IPA_CFG_REG_BASE + 0x00000050)
+#define HWIO_IPA_MASTER_PRIORITY_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000050)
+#define HWIO_IPA_MASTER_PRIORITY_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000050)
+#define HWIO_IPA_SHARED_MEM_SIZE_ADDR (IPA_CFG_REG_BASE + 0x00000054)
+#define HWIO_IPA_SHARED_MEM_SIZE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000054)
+#define HWIO_IPA_SHARED_MEM_SIZE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000054)
+#define HWIO_IPA_NAT_TIMER_ADDR (IPA_CFG_REG_BASE + 0x00000058)
+#define HWIO_IPA_NAT_TIMER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000058)
+#define HWIO_IPA_NAT_TIMER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000058)
+#define HWIO_IPA_TAG_TIMER_ADDR (IPA_CFG_REG_BASE + 0x00000060)
+#define HWIO_IPA_TAG_TIMER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000060)
+#define HWIO_IPA_TAG_TIMER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000060)
+#define HWIO_IPA_FRAG_RULES_CLR_ADDR (IPA_CFG_REG_BASE + 0x0000006c)
+#define HWIO_IPA_FRAG_RULES_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000006c)
+#define HWIO_IPA_FRAG_RULES_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000006c)
+#define HWIO_IPA_PROC_IPH_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000070)
+#define HWIO_IPA_PROC_IPH_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000070)
+#define HWIO_IPA_PROC_IPH_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000070)
+#define HWIO_IPA_PROC_IPH_CFG_RMSK 0x1ff0ff7
+#define HWIO_IPA_PROC_IPH_CFG_ATTR 0x3
+#define HWIO_IPA_PROC_IPH_CFG_IN in_dword_masked( \
+		HWIO_IPA_PROC_IPH_CFG_ADDR, \
+		HWIO_IPA_PROC_IPH_CFG_RMSK)
+#define HWIO_IPA_PROC_IPH_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_PROC_IPH_CFG_ADDR, \
+		m)
+#define HWIO_IPA_PROC_IPH_CFG_OUT(v) out_dword(HWIO_IPA_PROC_IPH_CFG_ADDR, \
+					       v)
+#define HWIO_IPA_PROC_IPH_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PROC_IPH_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_PROC_IPH_CFG_IN)
+#define HWIO_IPA_PROC_IPH_CFG_D_DCPH_MULTI_ENGINE_DISABLE_BMSK 0x1000000
+#define HWIO_IPA_PROC_IPH_CFG_D_DCPH_MULTI_ENGINE_DISABLE_SHFT 0x18
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_VALUE_BMSK \
+	0xff0000
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_VALUE_SHFT 0x10
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_IHL_TO_2ND_FRAG_EN_BMSK 0x800
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_IHL_TO_2ND_FRAG_EN_SHFT 0xb
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_DEST_BMSK 0x400
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_DEST_SHFT 0xa
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_HOP_BMSK 0x200
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_HOP_SHFT 0x9
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_ENABLE_BMSK \
+	0x100
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_ENABLE_SHFT 0x8
+#define HWIO_IPA_PROC_IPH_CFG_FTCH_DCPH_OVERLAP_ENABLE_BMSK 0x80
+#define HWIO_IPA_PROC_IPH_CFG_FTCH_DCPH_OVERLAP_ENABLE_SHFT 0x7
+#define HWIO_IPA_PROC_IPH_CFG_PIPESTAGE_OVERLAP_DISABLE_BMSK 0x40
+#define HWIO_IPA_PROC_IPH_CFG_PIPESTAGE_OVERLAP_DISABLE_SHFT 0x6
+#define HWIO_IPA_PROC_IPH_CFG_STATUS_FROM_IPH_FRST_ALWAYS_BMSK 0x10
+#define HWIO_IPA_PROC_IPH_CFG_STATUS_FROM_IPH_FRST_ALWAYS_SHFT 0x4
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PIPELINING_DISABLE_BMSK 0x4
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PIPELINING_DISABLE_SHFT 0x2
+#define HWIO_IPA_PROC_IPH_CFG_IPH_THRESHOLD_BMSK 0x3
+#define HWIO_IPA_PROC_IPH_CFG_IPH_THRESHOLD_SHFT 0x0
+#define HWIO_IPA_QSB_MAX_WRITES_ADDR (IPA_CFG_REG_BASE + 0x00000074)
+#define HWIO_IPA_QSB_MAX_WRITES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000074)
+#define HWIO_IPA_QSB_MAX_WRITES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000074)
+#define HWIO_IPA_QSB_MAX_READS_ADDR (IPA_CFG_REG_BASE + 0x00000078)
+#define HWIO_IPA_QSB_MAX_READS_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000078)
+#define HWIO_IPA_QSB_MAX_READS_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000078)
+#define HWIO_IPA_QSB_OUTSTANDING_COUNTER_ADDR (IPA_CFG_REG_BASE + \
+					       0x0000007c)
+#define HWIO_IPA_QSB_OUTSTANDING_COUNTER_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x0000007c)
+#define HWIO_IPA_QSB_OUTSTANDING_COUNTER_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x0000007c)
+#define HWIO_IPA_QSB_OUTSTANDING_BEATS_COUNTER_ADDR (IPA_CFG_REG_BASE +	\
+						     0x00000080)
+#define HWIO_IPA_QSB_OUTSTANDING_BEATS_COUNTER_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x00000080)
+#define HWIO_IPA_QSB_OUTSTANDING_BEATS_COUNTER_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x00000080)
+#define HWIO_IPA_QSB_READ_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000084)
+#define HWIO_IPA_QSB_READ_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000084)
+#define HWIO_IPA_QSB_READ_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000084)
+#define HWIO_IPA_DPL_TIMER_LSB_ADDR (IPA_CFG_REG_BASE + 0x00000088)
+#define HWIO_IPA_DPL_TIMER_LSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000088)
+#define HWIO_IPA_DPL_TIMER_LSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000088)
+#define HWIO_IPA_DPL_TIMER_LSB_RMSK 0xffffffff
+#define HWIO_IPA_DPL_TIMER_LSB_ATTR 0x3
+#define HWIO_IPA_DPL_TIMER_LSB_IN in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		HWIO_IPA_DPL_TIMER_LSB_RMSK)
+#define HWIO_IPA_DPL_TIMER_LSB_INM(m) in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		m)
+#define HWIO_IPA_DPL_TIMER_LSB_OUT(v) out_dword( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		v)
+#define HWIO_IPA_DPL_TIMER_LSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_DPL_TIMER_LSB_IN)
+#define HWIO_IPA_DPL_TIMER_LSB_TOD_LSB_BMSK 0xffffffff
+#define HWIO_IPA_DPL_TIMER_LSB_TOD_LSB_SHFT 0x0
+#define HWIO_IPA_DPL_TIMER_MSB_ADDR (IPA_CFG_REG_BASE + 0x0000008c)
+#define HWIO_IPA_DPL_TIMER_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000008c)
+#define HWIO_IPA_DPL_TIMER_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000008c)
+#define HWIO_IPA_DPL_TIMER_MSB_RMSK 0x8000ffff
+#define HWIO_IPA_DPL_TIMER_MSB_ATTR 0x3
+#define HWIO_IPA_DPL_TIMER_MSB_IN in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		HWIO_IPA_DPL_TIMER_MSB_RMSK)
+#define HWIO_IPA_DPL_TIMER_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		m)
+#define HWIO_IPA_DPL_TIMER_MSB_OUT(v) out_dword( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		v)
+#define HWIO_IPA_DPL_TIMER_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_DPL_TIMER_MSB_IN)
+#define HWIO_IPA_DPL_TIMER_MSB_TIMER_EN_BMSK 0x80000000
+#define HWIO_IPA_DPL_TIMER_MSB_TIMER_EN_SHFT 0x1f
+#define HWIO_IPA_DPL_TIMER_MSB_TOD_MSB_BMSK 0xffff
+#define HWIO_IPA_DPL_TIMER_MSB_TOD_MSB_SHFT 0x0
+#define HWIO_IPA_STATE_TX_WRAPPER_ADDR (IPA_CFG_REG_BASE + 0x00000090)
+#define HWIO_IPA_STATE_TX_WRAPPER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000090)
+#define HWIO_IPA_STATE_TX_WRAPPER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IPA_STATE_TX_WRAPPER_RMSK 0x1e01ffff
+#define HWIO_IPA_STATE_TX_WRAPPER_ATTR 0x1
+#define HWIO_IPA_STATE_TX_WRAPPER_IN in_dword_masked( \
+		HWIO_IPA_STATE_TX_WRAPPER_ADDR,	\
+		HWIO_IPA_STATE_TX_WRAPPER_RMSK)
+#define HWIO_IPA_STATE_TX_WRAPPER_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_TX_WRAPPER_ADDR,	\
+		m)
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK 0x1e000000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT 0x19
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK 0x10000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT 0x10
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK 0x6000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT 0xd
+#define HWIO_IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK 0x1800
+#define HWIO_IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT 0xb
+#define HWIO_IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK 0x200
+#define HWIO_IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT 0x9
+#define HWIO_IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK 0x180
+#define HWIO_IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT 0x7
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK 0x40
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT 0x6
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_BMSK 0x20
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK 0x10
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT 0x4
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0x3
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0x2
+#define HWIO_IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_TX1_ADDR (IPA_CFG_REG_BASE + 0x00000094)
+#define HWIO_IPA_STATE_TX1_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000094)
+#define HWIO_IPA_STATE_TX1_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000094)
+#define HWIO_IPA_STATE_TX1_RMSK 0xffffffff
+#define HWIO_IPA_STATE_TX1_ATTR 0x1
+#define HWIO_IPA_STATE_TX1_IN in_dword_masked(HWIO_IPA_STATE_TX1_ADDR, \
+					      HWIO_IPA_STATE_TX1_RMSK)
+#define HWIO_IPA_STATE_TX1_INM(m) in_dword_masked(HWIO_IPA_STATE_TX1_ADDR, \
+						  m)
+#define HWIO_IPA_STATE_TX1_SUSPEND_REQ_EMPTY_BMSK 0x80000000
+#define HWIO_IPA_STATE_TX1_SUSPEND_REQ_EMPTY_SHFT 0x1f
+#define HWIO_IPA_STATE_TX1_LAST_CMD_PIPE_BMSK 0x7c000000
+#define HWIO_IPA_STATE_TX1_LAST_CMD_PIPE_SHFT 0x1a
+#define HWIO_IPA_STATE_TX1_CS_SNIF_IDLE_BMSK 0x2000000
+#define HWIO_IPA_STATE_TX1_CS_SNIF_IDLE_SHFT 0x19
+#define HWIO_IPA_STATE_TX1_SUSPEND_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_STATE_TX1_SUSPEND_EMPTY_SHFT 0x18
+#define HWIO_IPA_STATE_TX1_RSRCREL_IDLE_BMSK 0x800000
+#define HWIO_IPA_STATE_TX1_RSRCREL_IDLE_SHFT 0x17
+#define HWIO_IPA_STATE_TX1_HOLB_MASK_IDLE_BMSK 0x400000
+#define HWIO_IPA_STATE_TX1_HOLB_MASK_IDLE_SHFT 0x16
+#define HWIO_IPA_STATE_TX1_HOLB_IDLE_BMSK 0x200000
+#define HWIO_IPA_STATE_TX1_HOLB_IDLE_SHFT 0x15
+#define HWIO_IPA_STATE_TX1_ALIGNER_EMPTY_BMSK 0x100000
+#define HWIO_IPA_STATE_TX1_ALIGNER_EMPTY_SHFT 0x14
+#define HWIO_IPA_STATE_TX1_PF_EMPTY_BMSK 0x80000
+#define HWIO_IPA_STATE_TX1_PF_EMPTY_SHFT 0x13
+#define HWIO_IPA_STATE_TX1_PF_IDLE_BMSK 0x40000
+#define HWIO_IPA_STATE_TX1_PF_IDLE_SHFT 0x12
+#define HWIO_IPA_STATE_TX1_DMAW_LAST_OUTSD_IDLE_BMSK 0x20000
+#define HWIO_IPA_STATE_TX1_DMAW_LAST_OUTSD_IDLE_SHFT 0x11
+#define HWIO_IPA_STATE_TX1_DMAW_IDLE_BMSK 0x10000
+#define HWIO_IPA_STATE_TX1_DMAW_IDLE_SHFT 0x10
+#define HWIO_IPA_STATE_TX1_AR_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_TX1_AR_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_INJ_IDLE_BMSK 0x4000
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_INJ_IDLE_SHFT 0xe
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_ALOC_IDLE_BMSK 0x2000
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_ALOC_IDLE_SHFT 0xd
+#define HWIO_IPA_STATE_TX1_TX_CMD_SNIF_IDLE_BMSK 0x1000
+#define HWIO_IPA_STATE_TX1_TX_CMD_SNIF_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_TX1_TX_CMD_TRNSEQ_IDLE_BMSK 0x800
+#define HWIO_IPA_STATE_TX1_TX_CMD_TRNSEQ_IDLE_SHFT 0xb
+#define HWIO_IPA_STATE_TX1_TX_CMD_MAIN_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_TX1_TX_CMD_MAIN_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_TX1_PA_PUB_CNT_EMPTY_BMSK 0x200
+#define HWIO_IPA_STATE_TX1_PA_PUB_CNT_EMPTY_SHFT 0x9
+#define HWIO_IPA_STATE_TX1_PA_RST_IDLE_BMSK 0x100
+#define HWIO_IPA_STATE_TX1_PA_RST_IDLE_SHFT 0x8
+#define HWIO_IPA_STATE_TX1_PA_CTX_IDLE_BMSK 0x80
+#define HWIO_IPA_STATE_TX1_PA_CTX_IDLE_SHFT 0x7
+#define HWIO_IPA_STATE_TX1_PA_IDLE_BMSK 0x40
+#define HWIO_IPA_STATE_TX1_PA_IDLE_SHFT 0x6
+#define HWIO_IPA_STATE_TX1_ARBIT_TYPE_BMSK 0x38
+#define HWIO_IPA_STATE_TX1_ARBIT_TYPE_SHFT 0x3
+#define HWIO_IPA_STATE_TX1_FLOPPED_ARBIT_TYPE_BMSK 0x7
+#define HWIO_IPA_STATE_TX1_FLOPPED_ARBIT_TYPE_SHFT 0x0
+#define HWIO_IPA_STATE_FETCHER_ADDR (IPA_CFG_REG_BASE + 0x00000098)
+#define HWIO_IPA_STATE_FETCHER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000098)
+#define HWIO_IPA_STATE_FETCHER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000098)
+#define HWIO_IPA_STATE_FETCHER_RMSK 0xfffff
+#define HWIO_IPA_STATE_FETCHER_ATTR 0x1
+#define HWIO_IPA_STATE_FETCHER_IN in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_ADDR, \
+		HWIO_IPA_STATE_FETCHER_RMSK)
+#define HWIO_IPA_STATE_FETCHER_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_ADDR, \
+		m)
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_IMM_CMD_EXEC_STATE_IDLE_BMSK \
+	0x80000
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_IMM_CMD_EXEC_STATE_IDLE_SHFT 0x13
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_SLOT_STATE_IDLE_BMSK 0x7f000
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_SLOT_STATE_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_STATE_IDLE_BMSK 0xfe0
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_STATE_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_CMPLT_STATE_IDLE_BMSK 0x10
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_CMPLT_STATE_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_IMM_STATE_IDLE_BMSK 0x8
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_IMM_STATE_IDLE_SHFT 0x3
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_PKT_STATE_IDLE_BMSK 0x4
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_PKT_STATE_IDLE_SHFT 0x2
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_ALLOC_STATE_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_ALLOC_STATE_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_STATE_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_STATE_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_FETCHER_MASK_0_ADDR (IPA_CFG_REG_BASE + 0x0000009c)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x0000009c)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x0000009c)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_RMSK 0xffffffff
+#define HWIO_IPA_STATE_FETCHER_MASK_0_ATTR 0x1
+#define HWIO_IPA_STATE_FETCHER_MASK_0_IN in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_0_ADDR, \
+		HWIO_IPA_STATE_FETCHER_MASK_0_RMSK)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_0_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_HPS_DMAR_BMSK \
+	0xff000000
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_HPS_DMAR_SHFT \
+	0x18
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_CONTEXT_BMSK \
+	0xff0000
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_CONTEXT_SHFT \
+	0x10
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_IMM_EXEC_BMSK 0xff00
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_IMM_EXEC_SHFT 0x8
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_DMAR_USES_QUEUE_BMSK 0xff
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_DMAR_USES_QUEUE_SHFT 0x0
+#define HWIO_IPA_STATE_FETCHER_MASK_1_ADDR (IPA_CFG_REG_BASE + 0x000000cc)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x000000cc)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x000000cc)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_RMSK 0xffffffff
+#define HWIO_IPA_STATE_FETCHER_MASK_1_ATTR 0x1
+#define HWIO_IPA_STATE_FETCHER_MASK_1_IN in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_1_ADDR, \
+		HWIO_IPA_STATE_FETCHER_MASK_1_RMSK)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_1_ADDR, \
+		m)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_SPACE_DPL_FIFO_BMSK	\
+	0xff000000
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_SPACE_DPL_FIFO_SHFT	\
+	0x18
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_STEP_MODE_BMSK 0xff0000
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_STEP_MODE_SHFT 0x10
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_ARB_LOCK_BMSK 0xff00
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_ARB_LOCK_SHFT 0x8
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_RESOURCES_ACK_ENTRY_BMSK \
+	0xff
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_RESOURCES_ACK_ENTRY_SHFT \
+	0x0
+#define HWIO_IPA_STATE_DPL_FIFO_ADDR (IPA_CFG_REG_BASE + 0x000000d0)
+#define HWIO_IPA_STATE_DPL_FIFO_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_IPA_STATE_DPL_FIFO_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000d0)
+#define HWIO_IPA_STATE_DPL_FIFO_RMSK 0x7
+#define HWIO_IPA_STATE_DPL_FIFO_ATTR 0x1
+#define HWIO_IPA_STATE_DPL_FIFO_IN in_dword_masked( \
+		HWIO_IPA_STATE_DPL_FIFO_ADDR, \
+		HWIO_IPA_STATE_DPL_FIFO_RMSK)
+#define HWIO_IPA_STATE_DPL_FIFO_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_DPL_FIFO_ADDR, \
+		m)
+#define HWIO_IPA_STATE_DPL_FIFO_POP_FSM_STATE_BMSK 0x7
+#define HWIO_IPA_STATE_DPL_FIFO_POP_FSM_STATE_SHFT 0x0
+#define HWIO_IPA_STATE_COAL_MASTER_ADDR (IPA_CFG_REG_BASE + 0x000000d4)
+#define HWIO_IPA_STATE_COAL_MASTER_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x000000d4)
+#define HWIO_IPA_STATE_COAL_MASTER_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x000000d4)
+#define HWIO_IPA_STATE_COAL_MASTER_RMSK 0xffffffff
+#define HWIO_IPA_STATE_COAL_MASTER_ATTR 0x1
+#define HWIO_IPA_STATE_COAL_MASTER_IN in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_ADDR, \
+		HWIO_IPA_STATE_COAL_MASTER_RMSK)
+#define HWIO_IPA_STATE_COAL_MASTER_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_ADDR, \
+		m)
+#define HWIO_IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_BMSK 0xf0000000
+#define HWIO_IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_SHFT 0x1c
+#define HWIO_IPA_STATE_COAL_MASTER_LRU_VP_BMSK 0xf000000
+#define HWIO_IPA_STATE_COAL_MASTER_LRU_VP_SHFT 0x18
+#define HWIO_IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_BMSK 0xf00000
+#define HWIO_IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_SHFT 0x14
+#define HWIO_IPA_STATE_COAL_MASTER_CHECK_FIT_FSM_STATE_BMSK 0xf0000
+#define HWIO_IPA_STATE_COAL_MASTER_CHECK_FIT_FSM_STATE_SHFT 0x10
+#define HWIO_IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_BMSK 0xf000
+#define HWIO_IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_SHFT 0xc
+#define HWIO_IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_BMSK 0xf00
+#define HWIO_IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_SHFT 0x8
+#define HWIO_IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_BMSK 0xf0
+#define HWIO_IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_SHFT 0x4
+#define HWIO_IPA_STATE_COAL_MASTER_VP_VLD_BMSK 0xf
+#define HWIO_IPA_STATE_COAL_MASTER_VP_VLD_SHFT 0x0
+#define HWIO_IPA_STATE_DFETCHER_ADDR (IPA_CFG_REG_BASE + 0x000000a0)
+#define HWIO_IPA_STATE_DFETCHER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000a0)
+#define HWIO_IPA_STATE_DFETCHER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000a0)
+#define HWIO_IPA_STATE_DFETCHER_RMSK 0x3f3f3
+#define HWIO_IPA_STATE_DFETCHER_ATTR 0x1
+#define HWIO_IPA_STATE_DFETCHER_IN in_dword_masked( \
+		HWIO_IPA_STATE_DFETCHER_ADDR, \
+		HWIO_IPA_STATE_DFETCHER_RMSK)
+#define HWIO_IPA_STATE_DFETCHER_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_DFETCHER_ADDR, \
+		m)
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_SLOT_STATE_IDLE_BMSK 0x3f000
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_SLOT_STATE_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_STATE_IDLE_BMSK 0x3f0
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_STATE_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_CMPLT_STATE_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_CMPLT_STATE_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_PKT_STATE_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_PKT_STATE_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_ACL_ADDR (IPA_CFG_REG_BASE + 0x000000a4)
+#define HWIO_IPA_STATE_ACL_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000a4)
+#define HWIO_IPA_STATE_ACL_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000a4)
+#define HWIO_IPA_STATE_ACL_RMSK 0xffcffff
+#define HWIO_IPA_STATE_ACL_ATTR 0x1
+#define HWIO_IPA_STATE_ACL_IN in_dword_masked(HWIO_IPA_STATE_ACL_ADDR, \
+					      HWIO_IPA_STATE_ACL_RMSK)
+#define HWIO_IPA_STATE_ACL_INM(m) in_dword_masked(HWIO_IPA_STATE_ACL_ADDR, \
+						  m)
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_ACTIVE_BMSK 0x8000000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_ACTIVE_SHFT 0x1b
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_EMPTY_BMSK 0x4000000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_EMPTY_SHFT 0x1a
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_ACTIVE_BMSK 0x2000000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_ACTIVE_SHFT 0x19
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_EMPTY_SHFT 0x18
+#define HWIO_IPA_STATE_ACL_IPA_DPS_SEQUENCER_IDLE_BMSK 0x800000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_SEQUENCER_IDLE_SHFT 0x17
+#define HWIO_IPA_STATE_ACL_IPA_HPS_SEQUENCER_IDLE_BMSK 0x400000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_SEQUENCER_IDLE_SHFT 0x16
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_ACTIVE_BMSK 0x200000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_ACTIVE_SHFT 0x15
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_EMPTY_BMSK 0x100000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_EMPTY_SHFT 0x14
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_ACTIVE_BMSK 0x80000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_ACTIVE_SHFT 0x13
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_EMPTY_BMSK 0x40000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_EMPTY_SHFT 0x12
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_ACTIVE_BMSK 0x8000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_ACTIVE_SHFT 0xf
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_EMPTY_BMSK 0x4000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_EMPTY_SHFT 0xe
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_ACTIVE_BMSK 0x2000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_ACTIVE_SHFT 0xd
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_EMPTY_BMSK 0x1000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_EMPTY_SHFT 0xc
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_ACTIVE_BMSK 0x800
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_ACTIVE_SHFT 0xb
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_EMPTY_BMSK 0x400
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_EMPTY_SHFT 0xa
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_ACTIVE_BMSK 0x200
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_ACTIVE_SHFT 0x9
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_EMPTY_BMSK 0x100
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_EMPTY_SHFT 0x8
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_ACTIVE_BMSK 0x80
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_ACTIVE_SHFT 0x7
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_EMPTY_BMSK 0x40
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_EMPTY_SHFT 0x6
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_ACTIVE_BMSK 0x20
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_ACTIVE_SHFT 0x5
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_EMPTY_BMSK 0x10
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_EMPTY_SHFT 0x4
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_ACTIVE_BMSK 0x8
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_ACTIVE_SHFT 0x3
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_EMPTY_BMSK 0x4
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_EMPTY_SHFT 0x2
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_ACTIVE_BMSK 0x2
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_ACTIVE_SHFT 0x1
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_EMPTY_BMSK 0x1
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_EMPTY_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_TLV_ADDR (IPA_CFG_REG_BASE + 0x000000b8)
+#define HWIO_IPA_STATE_GSI_TLV_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000b8)
+#define HWIO_IPA_STATE_GSI_TLV_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000b8)
+#define HWIO_IPA_STATE_GSI_TLV_RMSK 0x1
+#define HWIO_IPA_STATE_GSI_TLV_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_TLV_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_TLV_ADDR, \
+		HWIO_IPA_STATE_GSI_TLV_RMSK)
+#define HWIO_IPA_STATE_GSI_TLV_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_TLV_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_TLV_IPA_GSI_TOGGLE_FSM_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_GSI_TLV_IPA_GSI_TOGGLE_FSM_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_AOS_ADDR (IPA_CFG_REG_BASE + 0x000000bc)
+#define HWIO_IPA_STATE_GSI_AOS_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000bc)
+#define HWIO_IPA_STATE_GSI_AOS_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000bc)
+#define HWIO_IPA_STATE_GSI_AOS_RMSK 0x1
+#define HWIO_IPA_STATE_GSI_AOS_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_AOS_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_AOS_ADDR, \
+		HWIO_IPA_STATE_GSI_AOS_RMSK)
+#define HWIO_IPA_STATE_GSI_AOS_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_AOS_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_AOS_IPA_GSI_AOS_FSM_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_GSI_AOS_IPA_GSI_AOS_FSM_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_IF_ADDR (IPA_CFG_REG_BASE + 0x000000c0)
+#define HWIO_IPA_STATE_GSI_IF_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000c0)
+#define HWIO_IPA_STATE_GSI_IF_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000c0)
+#define HWIO_IPA_STATE_GSI_IF_RMSK 0xff
+#define HWIO_IPA_STATE_GSI_IF_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_IF_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_ADDR, \
+		HWIO_IPA_STATE_GSI_IF_RMSK)
+#define HWIO_IPA_STATE_GSI_IF_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_1_BMSK 0xf0
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_1_SHFT 0x4
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_0_BMSK 0xf
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_0_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_SKIP_ADDR (IPA_CFG_REG_BASE + 0x000000c4)
+#define HWIO_IPA_STATE_GSI_SKIP_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000c4)
+#define HWIO_IPA_STATE_GSI_SKIP_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000c4)
+#define HWIO_IPA_STATE_GSI_SKIP_RMSK 0x3
+#define HWIO_IPA_STATE_GSI_SKIP_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_SKIP_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_SKIP_ADDR, \
+		HWIO_IPA_STATE_GSI_SKIP_RMSK)
+#define HWIO_IPA_STATE_GSI_SKIP_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_GSI_SKIP_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_SKIP_IPA_GSI_SKIP_FSM_BMSK 0x3
+#define HWIO_IPA_STATE_GSI_SKIP_IPA_GSI_SKIP_FSM_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_IF_CONS_ADDR (IPA_CFG_REG_BASE + 0x000000c8)
+#define HWIO_IPA_STATE_GSI_IF_CONS_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x000000c8)
+#define HWIO_IPA_STATE_GSI_IF_CONS_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x000000c8)
+#define HWIO_IPA_STATE_GSI_IF_CONS_RMSK 0x7ffffff
+#define HWIO_IPA_STATE_GSI_IF_CONS_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_IF_CONS_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_CONS_ADDR, \
+		HWIO_IPA_STATE_GSI_IF_CONS_RMSK)
+#define HWIO_IPA_STATE_GSI_IF_CONS_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_CONS_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_NO_ZERO_BMSK \
+	0x7fe0000
+#define	\
+	HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_NO_ZERO_SHFT \
+	0x11
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_BMSK \
+	0x1ff80
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_SHFT 0x7
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_CACHE_VLD_BMSK	\
+	0x7e
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_CACHE_VLD_SHFT	\
+	0x1
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_STATE_BMSK 0x1
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_STATE_SHFT 0x0
+#define HWIO_IPA_STATE_ADDR (IPA_CFG_REG_BASE + 0x000000a8)
+#define HWIO_IPA_STATE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000a8)
+#define HWIO_IPA_STATE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000a8)
+#define HWIO_IPA_STATE_RMSK 0xf7ffffff
+#define HWIO_IPA_STATE_ATTR 0x1
+#define HWIO_IPA_STATE_IN in_dword_masked(HWIO_IPA_STATE_ADDR, \
+					  HWIO_IPA_STATE_RMSK)
+#define HWIO_IPA_STATE_INM(m) in_dword_masked(HWIO_IPA_STATE_ADDR, m)
+#define HWIO_IPA_STATE_IPA_UC_RX_HND_CMDQ_EMPTY_BMSK 0x80000000
+#define HWIO_IPA_STATE_IPA_UC_RX_HND_CMDQ_EMPTY_SHFT 0x1f
+#define HWIO_IPA_STATE_IPA_DPS_TX_EMPTY_BMSK 0x40000000
+#define HWIO_IPA_STATE_IPA_DPS_TX_EMPTY_SHFT 0x1e
+#define HWIO_IPA_STATE_IPA_HPS_DPS_EMPTY_BMSK 0x20000000
+#define HWIO_IPA_STATE_IPA_HPS_DPS_EMPTY_SHFT 0x1d
+#define HWIO_IPA_STATE_IPA_RX_HPS_EMPTY_BMSK 0x10000000
+#define HWIO_IPA_STATE_IPA_RX_HPS_EMPTY_SHFT 0x1c
+#define HWIO_IPA_STATE_IPA_RX_SPLT_CMDQ_EMPTY_BMSK 0x7800000
+#define HWIO_IPA_STATE_IPA_RX_SPLT_CMDQ_EMPTY_SHFT 0x17
+#define HWIO_IPA_STATE_IPA_TX_COMMANDER_CMDQ_EMPTY_BMSK 0x400000
+#define HWIO_IPA_STATE_IPA_TX_COMMANDER_CMDQ_EMPTY_SHFT 0x16
+#define HWIO_IPA_STATE_IPA_RX_ACKQ_EMPTY_BMSK 0x200000
+#define HWIO_IPA_STATE_IPA_RX_ACKQ_EMPTY_SHFT 0x15
+#define HWIO_IPA_STATE_IPA_UC_ACKQ_EMPTY_BMSK 0x100000
+#define HWIO_IPA_STATE_IPA_UC_ACKQ_EMPTY_SHFT 0x14
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_EMPTY_BMSK 0x80000
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_EMPTY_SHFT 0x13
+#define HWIO_IPA_STATE_IPA_NTF_TX_EMPTY_BMSK 0x40000
+#define HWIO_IPA_STATE_IPA_NTF_TX_EMPTY_SHFT 0x12
+#define HWIO_IPA_STATE_IPA_FULL_IDLE_BMSK 0x20000
+#define HWIO_IPA_STATE_IPA_FULL_IDLE_SHFT 0x11
+#define HWIO_IPA_STATE_IPA_PROD_BRESP_IDLE_BMSK 0x10000
+#define HWIO_IPA_STATE_IPA_PROD_BRESP_IDLE_SHFT 0x10
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4000
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0xe
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_FULL_BMSK 0x2000
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_FULL_SHFT 0xd
+#define HWIO_IPA_STATE_IPA_ACKMNGR_STATE_IDLE_BMSK 0x1000
+#define HWIO_IPA_STATE_IPA_ACKMNGR_STATE_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_IPA_ACKMNGR_DB_EMPTY_BMSK 0x800
+#define HWIO_IPA_STATE_IPA_ACKMNGR_DB_EMPTY_SHFT 0xb
+#define HWIO_IPA_STATE_IPA_RSRC_STATE_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_IPA_RSRC_STATE_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_IPA_RSRC_MNGR_DB_EMPTY_BMSK 0x200
+#define HWIO_IPA_STATE_IPA_RSRC_MNGR_DB_EMPTY_SHFT 0x9
+#define HWIO_IPA_STATE_MBIM_AGGR_IDLE_BMSK 0x100
+#define HWIO_IPA_STATE_MBIM_AGGR_IDLE_SHFT 0x8
+#define HWIO_IPA_STATE_AGGR_IDLE_BMSK 0x80
+#define HWIO_IPA_STATE_AGGR_IDLE_SHFT 0x7
+#define HWIO_IPA_STATE_IPA_NOC_IDLE_BMSK 0x40
+#define HWIO_IPA_STATE_IPA_NOC_IDLE_SHFT 0x6
+#define HWIO_IPA_STATE_IPA_STATUS_SNIFFER_IDLE_BMSK 0x20
+#define HWIO_IPA_STATE_IPA_STATUS_SNIFFER_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_BAM_GSI_IDLE_BMSK 0x10
+#define HWIO_IPA_STATE_BAM_GSI_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_DPL_FIFO_IDLE_BMSK 0x8
+#define HWIO_IPA_STATE_DPL_FIFO_IDLE_SHFT 0x3
+#define HWIO_IPA_STATE_TX_IDLE_BMSK 0x4
+#define HWIO_IPA_STATE_TX_IDLE_SHFT 0x2
+#define HWIO_IPA_STATE_RX_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_RX_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_RX_WAIT_BMSK 0x1
+#define HWIO_IPA_STATE_RX_WAIT_SHFT 0x0
+#define HWIO_IPA_STATE_RX_ACTIVE_ADDR (IPA_CFG_REG_BASE + 0x000000ac)
+#define HWIO_IPA_STATE_RX_ACTIVE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000ac)
+#define HWIO_IPA_STATE_RX_ACTIVE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000ac)
+#define HWIO_IPA_STATE_RX_ACTIVE_RMSK 0x1fff
+#define HWIO_IPA_STATE_RX_ACTIVE_ATTR 0x1
+#define HWIO_IPA_STATE_RX_ACTIVE_IN in_dword_masked( \
+		HWIO_IPA_STATE_RX_ACTIVE_ADDR, \
+		HWIO_IPA_STATE_RX_ACTIVE_RMSK)
+#define HWIO_IPA_STATE_RX_ACTIVE_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_RX_ACTIVE_ADDR, \
+		m)
+#define HWIO_IPA_STATE_RX_ACTIVE_ENDPOINTS_BMSK 0x1fff
+#define HWIO_IPA_STATE_RX_ACTIVE_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_STATE_TX0_ADDR (IPA_CFG_REG_BASE + 0x000000b0)
+#define HWIO_IPA_STATE_TX0_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000b0)
+#define HWIO_IPA_STATE_TX0_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000b0)
+#define HWIO_IPA_STATE_TX0_RMSK 0xfffffff
+#define HWIO_IPA_STATE_TX0_ATTR 0x1
+#define HWIO_IPA_STATE_TX0_IN in_dword_masked(HWIO_IPA_STATE_TX0_ADDR, \
+					      HWIO_IPA_STATE_TX0_RMSK)
+#define HWIO_IPA_STATE_TX0_INM(m) in_dword_masked(HWIO_IPA_STATE_TX0_ADDR, \
+						  m)
+#define HWIO_IPA_STATE_TX0_LAST_CMD_PIPE_BMSK 0xf800000
+#define HWIO_IPA_STATE_TX0_LAST_CMD_PIPE_SHFT 0x17
+#define HWIO_IPA_STATE_TX0_CS_SNIF_IDLE_BMSK 0x400000
+#define HWIO_IPA_STATE_TX0_CS_SNIF_IDLE_SHFT 0x16
+#define HWIO_IPA_STATE_TX0_SUSPEND_EMPTY_BMSK 0x200000
+#define HWIO_IPA_STATE_TX0_SUSPEND_EMPTY_SHFT 0x15
+#define HWIO_IPA_STATE_TX0_RSRCREL_IDLE_BMSK 0x100000
+#define HWIO_IPA_STATE_TX0_RSRCREL_IDLE_SHFT 0x14
+#define HWIO_IPA_STATE_TX0_HOLB_MASK_IDLE_BMSK 0x80000
+#define HWIO_IPA_STATE_TX0_HOLB_MASK_IDLE_SHFT 0x13
+#define HWIO_IPA_STATE_TX0_HOLB_IDLE_BMSK 0x40000
+#define HWIO_IPA_STATE_TX0_HOLB_IDLE_SHFT 0x12
+#define HWIO_IPA_STATE_TX0_ALIGNER_EMPTY_BMSK 0x20000
+#define HWIO_IPA_STATE_TX0_ALIGNER_EMPTY_SHFT 0x11
+#define HWIO_IPA_STATE_TX0_PF_EMPTY_BMSK 0x10000
+#define HWIO_IPA_STATE_TX0_PF_EMPTY_SHFT 0x10
+#define HWIO_IPA_STATE_TX0_PF_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_TX0_PF_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_TX0_DMAW_LAST_OUTSD_IDLE_BMSK 0x4000
+#define HWIO_IPA_STATE_TX0_DMAW_LAST_OUTSD_IDLE_SHFT 0xe
+#define HWIO_IPA_STATE_TX0_DMAW_IDLE_BMSK 0x2000
+#define HWIO_IPA_STATE_TX0_DMAW_IDLE_SHFT 0xd
+#define HWIO_IPA_STATE_TX0_AR_IDLE_BMSK 0x1000
+#define HWIO_IPA_STATE_TX0_AR_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_INJ_IDLE_BMSK 0x800
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_INJ_IDLE_SHFT 0xb
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_ALOC_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_ALOC_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_TX0_TX_CMD_SNIF_IDLE_BMSK 0x200
+#define HWIO_IPA_STATE_TX0_TX_CMD_SNIF_IDLE_SHFT 0x9
+#define HWIO_IPA_STATE_TX0_TX_CMD_TRNSEQ_IDLE_BMSK 0x100
+#define HWIO_IPA_STATE_TX0_TX_CMD_TRNSEQ_IDLE_SHFT 0x8
+#define HWIO_IPA_STATE_TX0_TX_CMD_MAIN_IDLE_BMSK 0x80
+#define HWIO_IPA_STATE_TX0_TX_CMD_MAIN_IDLE_SHFT 0x7
+#define HWIO_IPA_STATE_TX0_PA_PUB_CNT_EMPTY_BMSK 0x40
+#define HWIO_IPA_STATE_TX0_PA_PUB_CNT_EMPTY_SHFT 0x6
+#define HWIO_IPA_STATE_TX0_PA_CTX_IDLE_BMSK 0x20
+#define HWIO_IPA_STATE_TX0_PA_CTX_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_TX0_PA_IDLE_BMSK 0x10
+#define HWIO_IPA_STATE_TX0_PA_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_TX0_NEXT_ARBIT_TYPE_BMSK 0xc
+#define HWIO_IPA_STATE_TX0_NEXT_ARBIT_TYPE_SHFT 0x2
+#define HWIO_IPA_STATE_TX0_LAST_ARBIT_TYPE_BMSK 0x3
+#define HWIO_IPA_STATE_TX0_LAST_ARBIT_TYPE_SHFT 0x0
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ADDR (IPA_CFG_REG_BASE + 0x000000b4)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x000000b4)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x000000b4)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_RMSK 0x7fffffff
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ATTR 0x1
+#define HWIO_IPA_STATE_AGGR_ACTIVE_IN in_dword_masked( \
+		HWIO_IPA_STATE_AGGR_ACTIVE_ADDR, \
+		HWIO_IPA_STATE_AGGR_ACTIVE_RMSK)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_AGGR_ACTIVE_ADDR, \
+		m)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_GENERIC_RAM_ARBITER_PRIORITY_ADDR (IPA_CFG_REG_BASE + \
+						    0x000000d8)
+#define HWIO_IPA_GENERIC_RAM_ARBITER_PRIORITY_PHYS (IPA_CFG_REG_BASE_PHYS \
+						    + 0x000000d8)
+#define HWIO_IPA_GENERIC_RAM_ARBITER_PRIORITY_OFFS (IPA_CFG_REG_BASE_OFFS \
+						    + 0x000000d8)
+#define HWIO_IPA_STATE_NLO_AGGR_ADDR (IPA_CFG_REG_BASE + 0x000000dc)
+#define HWIO_IPA_STATE_NLO_AGGR_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000dc)
+#define HWIO_IPA_STATE_NLO_AGGR_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000dc)
+#define HWIO_IPA_STATE_NLO_AGGR_RMSK 0xffffffff
+#define HWIO_IPA_STATE_NLO_AGGR_ATTR 0x1
+#define HWIO_IPA_STATE_NLO_AGGR_IN in_dword_masked( \
+		HWIO_IPA_STATE_NLO_AGGR_ADDR, \
+		HWIO_IPA_STATE_NLO_AGGR_RMSK)
+#define HWIO_IPA_STATE_NLO_AGGR_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_NLO_AGGR_ADDR, \
+		m)
+#define HWIO_IPA_STATE_NLO_AGGR_NLO_AGGR_STATE_BMSK 0xffffffff
+#define HWIO_IPA_STATE_NLO_AGGR_NLO_AGGR_STATE_SHFT 0x0
+#define HWIO_IPA_STATE_COAL_MASTER_1_ADDR (IPA_CFG_REG_BASE + 0x000000e0)
+#define HWIO_IPA_STATE_COAL_MASTER_1_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x000000e0)
+#define HWIO_IPA_STATE_COAL_MASTER_1_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x000000e0)
+#define HWIO_IPA_STATE_COAL_MASTER_1_RMSK 0x3fffffff
+#define HWIO_IPA_STATE_COAL_MASTER_1_ATTR 0x1
+#define HWIO_IPA_STATE_COAL_MASTER_1_IN in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_1_ADDR, \
+		HWIO_IPA_STATE_COAL_MASTER_1_RMSK)
+#define HWIO_IPA_STATE_COAL_MASTER_1_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_1_ADDR, \
+		m)
+#define HWIO_IPA_STATE_COAL_MASTER_1_ARBITER_STATE_BMSK 0x3c000000
+#define HWIO_IPA_STATE_COAL_MASTER_1_ARBITER_STATE_SHFT 0x1a
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_FSM_STATE_BMSK 0x3c00000
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_FSM_STATE_SHFT 0x16
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_RD_CTX_LINE_BMSK 0x3f0000
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_RD_CTX_LINE_SHFT 0x10
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_FSM_STATE_BMSK 0xf000
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_FSM_STATE_SHFT 0xc
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_RD_PKT_LINE_BMSK 0xfc0
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_RD_PKT_LINE_SHFT 0x6
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_WR_CTX_LINE_BMSK 0x3f
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_WR_CTX_LINE_SHFT 0x0
+#define HWIO_IPA_YELLOW_MARKER_BELOW_ADDR (IPA_CFG_REG_BASE + 0x00000110)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000110)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000110)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_EN_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000114)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000114)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000114)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_CLR_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000118)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000118)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000118)
+#define HWIO_IPA_RED_MARKER_BELOW_ADDR (IPA_CFG_REG_BASE + 0x0000011c)
+#define HWIO_IPA_RED_MARKER_BELOW_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IPA_RED_MARKER_BELOW_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IPA_RED_MARKER_BELOW_EN_ADDR (IPA_CFG_REG_BASE + 0x00000120)
+#define HWIO_IPA_RED_MARKER_BELOW_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000120)
+#define HWIO_IPA_RED_MARKER_BELOW_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000120)
+#define HWIO_IPA_RED_MARKER_BELOW_CLR_ADDR (IPA_CFG_REG_BASE + 0x00000124)
+#define HWIO_IPA_RED_MARKER_BELOW_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000124)
+#define HWIO_IPA_RED_MARKER_BELOW_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000124)
+#define HWIO_IPA_YELLOW_MARKER_SHADOW_ADDR (IPA_CFG_REG_BASE + 0x00000128)
+#define HWIO_IPA_YELLOW_MARKER_SHADOW_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000128)
+#define HWIO_IPA_YELLOW_MARKER_SHADOW_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000128)
+#define HWIO_IPA_RED_MARKER_SHADOW_ADDR (IPA_CFG_REG_BASE + 0x0000012c)
+#define HWIO_IPA_RED_MARKER_SHADOW_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x0000012c)
+#define HWIO_IPA_RED_MARKER_SHADOW_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x0000012c)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_ADDR (IPA_CFG_REG_BASE + 0x00000130)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000130)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000130)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_EN_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000134)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000134)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000134)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_CLR_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000138)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000138)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000138)
+#define HWIO_IPA_RED_MARKER_ABOVE_ADDR (IPA_CFG_REG_BASE + 0x0000013c)
+#define HWIO_IPA_RED_MARKER_ABOVE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000013c)
+#define HWIO_IPA_RED_MARKER_ABOVE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000013c)
+#define HWIO_IPA_RED_MARKER_ABOVE_EN_ADDR (IPA_CFG_REG_BASE + 0x00000140)
+#define HWIO_IPA_RED_MARKER_ABOVE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000140)
+#define HWIO_IPA_RED_MARKER_ABOVE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000140)
+#define HWIO_IPA_RED_MARKER_ABOVE_CLR_ADDR (IPA_CFG_REG_BASE + 0x00000144)
+#define HWIO_IPA_RED_MARKER_ABOVE_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000144)
+#define HWIO_IPA_RED_MARKER_ABOVE_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000144)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_ADDR (IPA_CFG_REG_BASE + 0x00000148)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x00000148)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x00000148)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_RMSK 0x1111
+#define HWIO_IPA_FILT_ROUT_HASH_EN_ATTR 0x3
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IN in_dword_masked( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		HWIO_IPA_FILT_ROUT_HASH_EN_RMSK)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_INM(m) in_dword_masked( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		m)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_OUT(v) out_dword( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		v)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_FILT_ROUT_HASH_EN_IN)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_FILTER_HASH_EN_BMSK 0x1000
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_FILTER_HASH_EN_SHFT 0xc
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_ROUTER_HASH_EN_BMSK 0x100
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_ROUTER_HASH_EN_SHFT 0x8
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_FILTER_HASH_EN_BMSK 0x10
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_FILTER_HASH_EN_SHFT 0x4
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_ROUTER_HASH_EN_BMSK 0x1
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_ROUTER_HASH_EN_SHFT 0x0
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_ADDR (IPA_CFG_REG_BASE + 0x0000014c)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x0000014c)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x0000014c)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_RMSK 0x1111
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_ATTR 0x2
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_OUT(v) out_dword(	\
+		HWIO_IPA_FILT_ROUT_HASH_FLUSH_ADDR, \
+		v)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_FILTER_HASH_FLUSH_BMSK 0x1000
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_FILTER_HASH_FLUSH_SHFT 0xc
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_ROUTER_HASH_FLUSH_BMSK 0x100
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_ROUTER_HASH_FLUSH_SHFT 0x8
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_FILTER_HASH_FLUSH_BMSK 0x10
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_FILTER_HASH_FLUSH_SHFT 0x4
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_ROUTER_HASH_FLUSH_BMSK 0x1
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_ROUTER_HASH_FLUSH_SHFT 0x0
+#define HWIO_IPA_FILT_ROUT_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000150)
+#define HWIO_IPA_FILT_ROUT_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000150)
+#define HWIO_IPA_FILT_ROUT_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000150)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000160)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000160)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000160)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV4_FILTER_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV4_FILTER_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_INM(m) in_dword_masked( \
+		HWIO_IPA_IPV4_FILTER_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV4_FILTER_INIT_VALUES_IP_V4_FILTER_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define	\
+	HWIO_IPA_IPV4_FILTER_INIT_VALUES_IP_V4_FILTER_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000164)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000164)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000164)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV6_FILTER_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV6_FILTER_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_INM(m) in_dword_masked( \
+		HWIO_IPA_IPV6_FILTER_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV6_FILTER_INIT_VALUES_IP_V6_FILTER_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define	\
+	HWIO_IPA_IPV6_FILTER_INIT_VALUES_IP_V6_FILTER_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000178)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000178)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000178)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x0000017c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x0000017c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x0000017c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000180)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000180)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000180)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x00000184)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000184)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000184)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000188)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000188)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000188)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x0000018c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x0000018c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x0000018c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000190)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000190)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000190)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x00000194)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000194)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000194)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_4_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000198)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_4_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000198)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_4_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000198)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_5_ADDR (IPA_CFG_REG_BASE + \
+					      0x0000019c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_5_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x0000019c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_5_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x0000019c)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					      0x000001a0)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x000001a0)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x000001a0)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV4_ROUTE_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_INM(m) in_dword_masked(	\
+		HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_NON_HASHED_ADDR_BMSK \
+	0xffff0000
+#define	\
+	HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_NON_HASHED_ADDR_SHFT \
+	0x10
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					      0x000001a4)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x000001a4)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x000001a4)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV6_ROUTE_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_INM(m) in_dword_masked(	\
+		HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_NON_HASHED_ADDR_BMSK \
+	0xffff0000
+#define	\
+	HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_NON_HASHED_ADDR_SHFT \
+	0x10
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_ADDR (IPA_CFG_REG_BASE +	\
+						     0x000001a8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x000001a8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x000001a8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_MSB_ADDR (IPA_CFG_REG_BASE \
+							 + 0x000001ac)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_MSB_PHYS ( \
+		IPA_CFG_REG_BASE_PHYS + 0x000001ac)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_MSB_OFFS ( \
+		IPA_CFG_REG_BASE_OFFS + 0x000001ac)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_ADDR (IPA_CFG_REG_BASE +	\
+						     0x000001b0)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x000001b0)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x000001b0)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_MSB_ADDR (IPA_CFG_REG_BASE \
+							 + 0x000001b4)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_MSB_PHYS ( \
+		IPA_CFG_REG_BASE_PHYS + 0x000001b4)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_MSB_OFFS ( \
+		IPA_CFG_REG_BASE_OFFS + 0x000001b4)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_2_ADDR (IPA_CFG_REG_BASE +	\
+						     0x000001b8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_2_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x000001b8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_2_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x000001b8)
+#define HWIO_IPA_HDR_INIT_LOCAL_VALUES_ADDR (IPA_CFG_REG_BASE + 0x000001c0)
+#define HWIO_IPA_HDR_INIT_LOCAL_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x000001c0)
+#define HWIO_IPA_HDR_INIT_LOCAL_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x000001c0)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					      0x000001c4)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x000001c4)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x000001c4)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x000001c8)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x000001c8)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x000001c8)
+#define HWIO_IPA_IMM_CMD_ACCESS_PIPE_VALUES_ADDR (IPA_CFG_REG_BASE + \
+						  0x000001cc)
+#define HWIO_IPA_IMM_CMD_ACCESS_PIPE_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x000001cc)
+#define HWIO_IPA_IMM_CMD_ACCESS_PIPE_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x000001cc)
+#define HWIO_IPA_FRAG_VALUES_ADDR (IPA_CFG_REG_BASE + 0x000001d8)
+#define HWIO_IPA_FRAG_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001d8)
+#define HWIO_IPA_FRAG_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001d8)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ADDR (IPA_CFG_REG_BASE + 0x000001dc)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x000001dc)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x000001dc)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_RMSK 0x7fffffff
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ATTR 0x1
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_IN in_dword_masked( \
+		HWIO_IPA_BAM_ACTIVATED_PORTS_ADDR, \
+		HWIO_IPA_BAM_ACTIVATED_PORTS_RMSK)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_INM(m) in_dword_masked( \
+		HWIO_IPA_BAM_ACTIVATED_PORTS_ADDR, \
+		m)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR (IPA_CFG_REG_BASE + \
+					       0x000001e0)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x000001e0)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x000001e0)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_RMSK 0xffffffff
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ATTR 0x3
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_IN in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_RMSK)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_INM(m) in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		m)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_OUT(v) out_dword( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		v)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_IN)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR_BMSK 0xfffffff8
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR_SHFT 0x3
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ZERO_BMSK 0x7
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ZERO_SHFT 0x0
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR (IPA_CFG_REG_BASE + \
+						   0x000001e4)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						   0x000001e4)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						   0x000001e4)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_RMSK 0xffffffff
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ATTR 0x3
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_IN in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_RMSK)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		m)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_OUT(v) out_dword( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		v)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_OUTM(m, \
+						  v) out_dword_masked_ns( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_IN)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR_SHFT 0x0
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR (IPA_CFG_REG_BASE + \
+						 0x000001e8)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						 0x000001e8)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						 0x000001e8)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_RMSK 0x3ffff
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ATTR 0x3
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_IN in_dword_masked( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_RMSK)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_INM(m) in_dword_masked( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		m)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_OUT(v) out_dword( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		v)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_IN)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR_BMSK 0x3fff8
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR_SHFT 0x3
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ZERO_BMSK 0x7
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ZERO_SHFT 0x0
+#define HWIO_IPA_AGGR_FORCE_CLOSE_ADDR (IPA_CFG_REG_BASE + 0x000001ec)
+#define HWIO_IPA_AGGR_FORCE_CLOSE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001ec)
+#define HWIO_IPA_AGGR_FORCE_CLOSE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001ec)
+#define HWIO_IPA_SCND_FRAG_VALUES_ADDR (IPA_CFG_REG_BASE + 0x000001f4)
+#define HWIO_IPA_SCND_FRAG_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001f4)
+#define HWIO_IPA_SCND_FRAG_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001f4)
+#define HWIO_IPA_TX_CFG_ADDR (IPA_CFG_REG_BASE + 0x000001fc)
+#define HWIO_IPA_TX_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001fc)
+#define HWIO_IPA_TX_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001fc)
+#define HWIO_IPA_NAT_UC_EXTERNAL_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000200)
+#define HWIO_IPA_NAT_UC_EXTERNAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000200)
+#define HWIO_IPA_NAT_UC_EXTERNAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000200)
+#define HWIO_IPA_NAT_UC_LOCAL_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000204)
+#define HWIO_IPA_NAT_UC_LOCAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000204)
+#define HWIO_IPA_NAT_UC_LOCAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000204)
+#define HWIO_IPA_NAT_UC_SHARED_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000208)
+#define HWIO_IPA_NAT_UC_SHARED_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x00000208)
+#define HWIO_IPA_NAT_UC_SHARED_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x00000208)
+#define HWIO_IPA_RAM_INTLV_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000020c)
+#define HWIO_IPA_RAM_INTLV_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000020c)
+#define HWIO_IPA_RAM_INTLV_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000020c)
+#define HWIO_IPA_FLAVOR_0_ADDR (IPA_CFG_REG_BASE + 0x00000210)
+#define HWIO_IPA_FLAVOR_0_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000210)
+#define HWIO_IPA_FLAVOR_0_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000210)
+#define HWIO_IPA_FLAVOR_1_ADDR (IPA_CFG_REG_BASE + 0x00000214)
+#define HWIO_IPA_FLAVOR_1_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000214)
+#define HWIO_IPA_FLAVOR_1_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000214)
+#define HWIO_IPA_FLAVOR_2_ADDR (IPA_CFG_REG_BASE + 0x00000218)
+#define HWIO_IPA_FLAVOR_2_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000218)
+#define HWIO_IPA_FLAVOR_2_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000218)
+#define HWIO_IPA_FLAVOR_3_ADDR (IPA_CFG_REG_BASE + 0x0000021c)
+#define HWIO_IPA_FLAVOR_3_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000021c)
+#define HWIO_IPA_FLAVOR_3_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000021c)
+#define HWIO_IPA_FLAVOR_4_ADDR (IPA_CFG_REG_BASE + 0x00000220)
+#define HWIO_IPA_FLAVOR_4_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000220)
+#define HWIO_IPA_FLAVOR_4_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000220)
+#define HWIO_IPA_FLAVOR_5_ADDR (IPA_CFG_REG_BASE + 0x00000224)
+#define HWIO_IPA_FLAVOR_5_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000224)
+#define HWIO_IPA_FLAVOR_5_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000224)
+#define HWIO_IPA_FLAVOR_6_ADDR (IPA_CFG_REG_BASE + 0x00000228)
+#define HWIO_IPA_FLAVOR_6_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000228)
+#define HWIO_IPA_FLAVOR_6_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000228)
+#define HWIO_IPA_FLAVOR_7_ADDR (IPA_CFG_REG_BASE + 0x0000022c)
+#define HWIO_IPA_FLAVOR_7_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000022c)
+#define HWIO_IPA_FLAVOR_7_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000022c)
+#define HWIO_IPA_CONN_TRACK_UC_EXTERNAL_CFG_ADDR (IPA_CFG_REG_BASE + \
+						  0x00000230)
+#define HWIO_IPA_CONN_TRACK_UC_EXTERNAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000230)
+#define HWIO_IPA_CONN_TRACK_UC_EXTERNAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000230)
+#define HWIO_IPA_CONN_TRACK_UC_LOCAL_CFG_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000234)
+#define HWIO_IPA_CONN_TRACK_UC_LOCAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000234)
+#define HWIO_IPA_CONN_TRACK_UC_LOCAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000234)
+#define HWIO_IPA_CONN_TRACK_UC_SHARED_CFG_ADDR (IPA_CFG_REG_BASE + \
+						0x00000238)
+#define HWIO_IPA_CONN_TRACK_UC_SHARED_CFG_PHYS (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000238)
+#define HWIO_IPA_CONN_TRACK_UC_SHARED_CFG_OFFS (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000238)
+#define HWIO_IPA_IDLE_INDICATION_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000240)
+#define HWIO_IPA_IDLE_INDICATION_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000240)
+#define HWIO_IPA_IDLE_INDICATION_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000240)
+#define HWIO_IPA_QTIME_TIMESTAMP_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000024c)
+#define HWIO_IPA_QTIME_TIMESTAMP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x0000024c)
+#define HWIO_IPA_QTIME_TIMESTAMP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x0000024c)
+#define HWIO_IPA_TIMERS_XO_CLK_DIV_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000250)
+#define HWIO_IPA_TIMERS_XO_CLK_DIV_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000250)
+#define HWIO_IPA_TIMERS_XO_CLK_DIV_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000250)
+#define HWIO_IPA_TIMERS_PULSE_GRAN_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000254)
+#define HWIO_IPA_TIMERS_PULSE_GRAN_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000254)
+#define HWIO_IPA_TIMERS_PULSE_GRAN_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000254)
+#define HWIO_IPA_QTIME_SMP_ADDR (IPA_CFG_REG_BASE + 0x00000260)
+#define HWIO_IPA_QTIME_SMP_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000260)
+#define HWIO_IPA_QTIME_SMP_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000260)
+#define HWIO_IPA_QTIME_LSB_ADDR (IPA_CFG_REG_BASE + 0x00000264)
+#define HWIO_IPA_QTIME_LSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000264)
+#define HWIO_IPA_QTIME_LSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000264)
+#define HWIO_IPA_QTIME_MSB_ADDR (IPA_CFG_REG_BASE + 0x00000268)
+#define HWIO_IPA_QTIME_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000268)
+#define HWIO_IPA_QTIME_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000268)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_EN_ADDR (IPA_CFG_REG_BASE + \
+						 0x00000334)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000334)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000334)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_0_ADDR (IPA_CFG_REG_BASE + \
+						       0x00000338)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_0_PHYS (	\
+		IPA_CFG_REG_BASE_PHYS + 0x00000338)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_0_OFFS (	\
+		IPA_CFG_REG_BASE_OFFS + 0x00000338)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_1_ADDR (IPA_CFG_REG_BASE + \
+						       0x0000033c)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_1_PHYS (	\
+		IPA_CFG_REG_BASE_PHYS + 0x0000033c)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_1_OFFS (	\
+		IPA_CFG_REG_BASE_OFFS + 0x0000033c)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_EN_ADDR (IPA_CFG_REG_BASE + \
+						 0x00000340)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000340)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000340)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_VALUES_0_ADDR (IPA_CFG_REG_BASE + \
+						       0x00000344)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_VALUES_0_PHYS (	\
+		IPA_CFG_REG_BASE_PHYS + 0x00000344)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_VALUES_0_OFFS (	\
+		IPA_CFG_REG_BASE_OFFS + 0x00000344)
+#define HWIO_IPA_HPS_DPS_CMDQ_RED_IRQ_MASK_ENABLE_ADDR (IPA_CFG_REG_BASE + \
+							0x00000348)
+#define HWIO_IPA_HPS_DPS_CMDQ_RED_IRQ_MASK_ENABLE_PHYS ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000348)
+#define HWIO_IPA_HPS_DPS_CMDQ_RED_IRQ_MASK_ENABLE_OFFS ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000348)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000400 + 0x20 * \
+						      (n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000400 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000400 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000404 + 0x20 * \
+						      (n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000404 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000404 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000408 + 0x20 * \
+						      (n))
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000408 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000408 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000410 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000410 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000410 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ATTR 0x1
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_3_CNT_BMSK \
+	0x3f000000
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_3_CNT_SHFT \
+	0x18
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_2_CNT_BMSK \
+	0x3f0000
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_2_CNT_SHFT \
+	0x10
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_1_CNT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_1_CNT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_0_CNT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_0_CNT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000414 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000414 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000414 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_TYPE_AMOUNT_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						 0x00000418 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_TYPE_AMOUNT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000418 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_TYPE_AMOUNT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000418 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000500 + 0x20 * \
+						      (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000500 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000500 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000504 + 0x20 * \
+						      (n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000504 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000504 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000508 + 0x20 * \
+						      (n))
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000508 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000508 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000510 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000510 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000510 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ATTR 0x1
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_3_CNT_BMSK \
+	0x3f000000
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_3_CNT_SHFT \
+	0x18
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_2_CNT_BMSK \
+	0x3f0000
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_2_CNT_SHFT \
+	0x10
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_1_CNT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_1_CNT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_0_CNT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_0_CNT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000514 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000514 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000514 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_TYPE_AMOUNT_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						 0x00000518 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_TYPE_AMOUNT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000518 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_TYPE_AMOUNT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000518 + 0x20 * (n))
+#define HWIO_IPA_RSRC_GRP_CFG_ADDR (IPA_CFG_REG_BASE + 0x000005a0)
+#define HWIO_IPA_RSRC_GRP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000005a0)
+#define HWIO_IPA_RSRC_GRP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000005a0)
+#define HWIO_IPA_RSRC_GRP_CFG_RMSK 0x3f11f171
+#define HWIO_IPA_RSRC_GRP_CFG_ATTR 0x3
+#define HWIO_IPA_RSRC_GRP_CFG_IN in_dword_masked( \
+		HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+		HWIO_IPA_RSRC_GRP_CFG_RMSK)
+#define HWIO_IPA_RSRC_GRP_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+		m)
+#define HWIO_IPA_RSRC_GRP_CFG_OUT(v) out_dword(HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+					       v)
+#define HWIO_IPA_RSRC_GRP_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RSRC_GRP_CFG_IN)
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_INDEX_BMSK 0x3f000000
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_INDEX_SHFT 0x18
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_VALID_BMSK 0x100000
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_VALID_SHFT 0x14
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_INDEX_BMSK 0x1f000
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_INDEX_SHFT 0xc
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_VALID_BMSK 0x100
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_VALID_SHFT 0x8
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_INDEX_BMSK 0x70
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_INDEX_SHFT 0x4
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_VALID_BMSK 0x1
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_VALID_SHFT 0x0
+#define HWIO_IPA_PIPELINE_DISABLE_ADDR (IPA_CFG_REG_BASE + 0x000005a8)
+#define HWIO_IPA_PIPELINE_DISABLE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000005a8)
+#define HWIO_IPA_PIPELINE_DISABLE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000005a8)
+#define HWIO_IPA_PIPELINE_DISABLE_RMSK 0x8
+#define HWIO_IPA_PIPELINE_DISABLE_ATTR 0x3
+#define HWIO_IPA_PIPELINE_DISABLE_IN in_dword_masked( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		HWIO_IPA_PIPELINE_DISABLE_RMSK)
+#define HWIO_IPA_PIPELINE_DISABLE_INM(m) in_dword_masked( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		m)
+#define HWIO_IPA_PIPELINE_DISABLE_OUT(v) out_dword( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		v)
+#define HWIO_IPA_PIPELINE_DISABLE_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_PIPELINE_DISABLE_IN)
+#define HWIO_IPA_PIPELINE_DISABLE_RX_CMDQ_SPLITTER_DIS_BMSK 0x8
+#define HWIO_IPA_PIPELINE_DISABLE_RX_CMDQ_SPLITTER_DIS_SHFT 0x3
+#define HWIO_IPA_AXI_CFG_ADDR (IPA_CFG_REG_BASE + 0x000005ac)
+#define HWIO_IPA_AXI_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000005ac)
+#define HWIO_IPA_AXI_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000005ac)
+#define HWIO_IPA_STAT_QUOTA_BASE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					    0x00000700 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_BASE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000700 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_BASE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000700 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					    0x00000708 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_MASK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000708 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_MASK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000708 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_BASE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00000710 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_BASE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000710 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_BASE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000710 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00000718 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_MASK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000718 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_MASK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000718 + 0x4 * (n))
+#define HWIO_IPA_STAT_FILTER_IPV4_BASE_ADDR (IPA_CFG_REG_BASE + 0x00000720)
+#define HWIO_IPA_STAT_FILTER_IPV4_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000720)
+#define HWIO_IPA_STAT_FILTER_IPV4_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000720)
+#define HWIO_IPA_STAT_FILTER_IPV6_BASE_ADDR (IPA_CFG_REG_BASE + 0x00000724)
+#define HWIO_IPA_STAT_FILTER_IPV6_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000724)
+#define HWIO_IPA_STAT_FILTER_IPV6_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000724)
+#define HWIO_IPA_STAT_ROUTER_IPV4_BASE_ADDR (IPA_CFG_REG_BASE + 0x00000728)
+#define HWIO_IPA_STAT_ROUTER_IPV4_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000728)
+#define HWIO_IPA_STAT_ROUTER_IPV4_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000728)
+#define HWIO_IPA_STAT_ROUTER_IPV6_BASE_ADDR (IPA_CFG_REG_BASE + 0x0000072c)
+#define HWIO_IPA_STAT_ROUTER_IPV6_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x0000072c)
+#define HWIO_IPA_STAT_ROUTER_IPV6_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x0000072c)
+#define HWIO_IPA_STAT_DROP_CNT_BASE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000750 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_BASE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000750 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_BASE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000750 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000758 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_MASK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000758 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_MASK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000758 + 0x4 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000800 + \
+					   0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000800 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000800 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_RMSK 0x3
+#define HWIO_IPA_ENDP_INIT_CTRL_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_CTRL_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_CTRL_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_CTRL_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_CTRL_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_CTRL_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_CTRL_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00000804 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000804 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000804 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_RMSK 0x2
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n),	\
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n),	\
+		val)
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_OUTMI(n, mask, \
+					     val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_BMSK 0x2
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000808 + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x00000808 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x00000808 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_RMSK 0x17f
+#define HWIO_IPA_ENDP_INIT_CFG_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_CFG_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_CFG_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_CFG_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_CFG_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_CFG_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_CFG_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_CFG_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_GEN_QMB_MASTER_SEL_BMSK 0x100
+#define HWIO_IPA_ENDP_INIT_CFG_n_GEN_QMB_MASTER_SEL_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define HWIO_IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n) (IPA_CFG_REG_BASE + 0x0000080c + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x0000080c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x0000080c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_RMSK 0x3
+#define HWIO_IPA_ENDP_INIT_NAT_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_NAT_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_NAT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_NAT_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_NAT_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_NAT_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_NAT_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_NAT_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define HWIO_IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000810 + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x00000810 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x00000810 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_RMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HDR_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HDR_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_BMSK 0xc0000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_SHFT 0x1e
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_BMSK 0x30000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_SHFT 0x1c
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK 0x8000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT 0x1b
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					      0x00000814 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000814 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000814 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_RMSK 0x3f3fff
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_BMSK \
+	0x300000
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_SHFT \
+	0x14
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_BMSK 0xc0000
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_SHFT 0x12
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_BMSK \
+	0x30000
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_SHFT \
+	0x10
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK 0x3c00
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK \
+	0x3f0
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+							0x00000818 + \
+							0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000818 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000818 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n),	\
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_INMI(n, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n),	\
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_OUTMI(n, mask, \
+						     val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK \
+	0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						   0x0000081c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						   0x0000081c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						   0x0000081c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_RMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000820 + \
+					   0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000820 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000820 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_RMSK 0x3ffff1ff
+#define HWIO_IPA_ENDP_INIT_MODE_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_MODE_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_MODE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_MODE_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_MODE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_MODE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_MODE_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_MODE_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
+#define HWIO_IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
+#define HWIO_IPA_ENDP_INIT_MODE_n_PIPE_REPLICATE_EN_BMSK 0x10000000
+#define HWIO_IPA_ENDP_INIT_MODE_n_PIPE_REPLICATE_EN_SHFT 0x1c
+#define HWIO_IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
+#define HWIO_IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
+#define HWIO_IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
+#define HWIO_IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
+#define HWIO_IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_BMSK 0x8
+#define HWIO_IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_SHFT 0x3
+#define HWIO_IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
+#define HWIO_IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000824 + \
+					   0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000824 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000824 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_RMSK 0xdfff7ff
+#define HWIO_IPA_ENDP_INIT_AGGR_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_AGGR_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_AGGR_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_AGGR_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_AGGR_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_AGGR_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_AGGR_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_AGGR_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK 0x8000000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_SHFT 0x1b
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK \
+	0x4000000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x1a
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x1000000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x18
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x800000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x17
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x7e0000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0x11
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x1f000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xc
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x7e0
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						   0x0000082c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						   0x0000082c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						   0x0000082c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000830 + 0x70 * \
+						      (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000830 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000830 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_RMSK 0x11f
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_BMSK 0x100
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_BMSK 0x1f
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n) (IPA_CFG_REG_BASE +	\
+					     0x00000834 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000834 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000834 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_RMSK 0xffff7fff
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xffff0000
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_IGNORE_MIN_PKT_ERR_BMSK 0x4000
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_IGNORE_MIN_PKT_ERR_SHFT 0xe
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3f00
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_SYSPIPE_ERR_DETECTION_BMSK 0x40
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_SYSPIPE_ERR_DETECTION_SHFT 0x6
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3f
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000838 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000838 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000838 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RMSK 0x7
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_OUTMI(n, mask, \
+					    val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n) (IPA_CFG_REG_BASE + 0x0000083c + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x0000083c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x0000083c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_RMSK 0xffff
+#define HWIO_IPA_ENDP_INIT_SEQ_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_SEQ_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_SEQ_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_SEQ_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_SEQ_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_SEQ_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_SEQ_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_SEQ_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
+#define HWIO_IPA_ENDP_STATUS_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000840 +	\
+					0x70 * (n))
+#define HWIO_IPA_ENDP_STATUS_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+					0x00000840 + 0x70 * (n))
+#define HWIO_IPA_ENDP_STATUS_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+					0x00000840 + 0x70 * (n))
+#define HWIO_IPA_ENDP_STATUS_n_RMSK 0x23f
+#define HWIO_IPA_ENDP_STATUS_n_MAXn 30
+#define HWIO_IPA_ENDP_STATUS_n_ATTR 0x3
+#define HWIO_IPA_ENDP_STATUS_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		HWIO_IPA_ENDP_STATUS_n_RMSK)
+#define HWIO_IPA_ENDP_STATUS_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_ENDP_STATUS_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		val)
+#define HWIO_IPA_ENDP_STATUS_n_OUTMI(n, mask, val) out_dword_masked_ns(	\
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_STATUS_n_INI(n))
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_SRC_ID_WRITE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					      0x00000848 + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_WRITE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000848 + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_WRITE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000848 + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_READ_n_ADDR(n) (IPA_CFG_REG_BASE +	\
+					     0x0000084c + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_READ_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					     0x0000084c + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_READ_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					     0x0000084c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CONN_TRACK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						 0x00000850 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CONN_TRACK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000850 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CONN_TRACK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000850 + 0x70 * (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						       0x0000085c + 0x70 * \
+						       (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_PHYS(n) (	\
+		IPA_CFG_REG_BASE_PHYS + 0x0000085c + 0x70 * (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFFS(n) (	\
+		IPA_CFG_REG_BASE_OFFS + 0x0000085c + 0x70 * (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_RMSK 0x7f007f
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_MAXn 31
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ATTR 0x3
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(n), \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_RMSK)
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_INMI(n, \
+						   mask) in_dword_masked( \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OUTMI(n, mask, \
+						    val) \
+	out_dword_masked_ns(HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(	\
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_INI(n))
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK \
+	0x400000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT \
+	0x16
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK \
+	0x200000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT \
+	0x15
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK \
+	0x100000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT \
+	0x14
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK \
+	0x80000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT \
+	0x13
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_ADD_BMSK \
+	0x40000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_ADD_SHFT \
+	0x12
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_ADD_BMSK \
+	0x20000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_ADD_SHFT \
+	0x11
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK \
+	0x10000
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT \
+	0x10
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK \
+	0x40
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT \
+	0x6
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK \
+	0x20
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT \
+	0x5
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK \
+	0x10
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT \
+	0x4
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK \
+	0x8
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT \
+	0x3
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_ADD_BMSK \
+	0x4
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_ADD_SHFT \
+	0x2
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_ADD_BMSK \
+	0x2
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_ADD_SHFT \
+	0x1
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK \
+	0x1
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT \
+	0x0
+#define HWIO_IPA_ENDP_YELLOW_RED_MARKER_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						       0x00000860 + 0x70 * \
+						       (n))
+#define HWIO_IPA_ENDP_YELLOW_RED_MARKER_CFG_n_PHYS(n) (	\
+		IPA_CFG_REG_BASE_PHYS + 0x00000860 + 0x70 * (n))
+#define HWIO_IPA_ENDP_YELLOW_RED_MARKER_CFG_n_OFFS(n) (	\
+		IPA_CFG_REG_BASE_OFFS + 0x00000860 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_STATUS_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						  0x00000864 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_STATUS_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000864 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_STATUS_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000864 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_PROD_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000868 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_PROD_CFG_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000868 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_PROD_CFG_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000868 + 0x70 * (n))
+#define HWIO_IPA_NLO_PP_CFG1_ADDR (IPA_CFG_REG_BASE + 0x00001680)
+#define HWIO_IPA_NLO_PP_CFG1_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001680)
+#define HWIO_IPA_NLO_PP_CFG1_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001680)
+#define HWIO_IPA_NLO_PP_CFG1_RMSK 0x3fffffff
+#define HWIO_IPA_NLO_PP_CFG1_ATTR 0x3
+#define HWIO_IPA_NLO_PP_CFG1_IN in_dword_masked(HWIO_IPA_NLO_PP_CFG1_ADDR, \
+						HWIO_IPA_NLO_PP_CFG1_RMSK)
+#define HWIO_IPA_NLO_PP_CFG1_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_CFG1_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_CFG1_OUT(v) out_dword(HWIO_IPA_NLO_PP_CFG1_ADDR, v)
+#define HWIO_IPA_NLO_PP_CFG1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_CFG1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_CFG1_IN)
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_MAX_VP_BMSK 0x3f000000
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_MAX_VP_SHFT 0x18
+#define HWIO_IPA_NLO_PP_CFG1_NLO_STATUS_PP_BMSK 0xff0000
+#define HWIO_IPA_NLO_PP_CFG1_NLO_STATUS_PP_SHFT 0x10
+#define HWIO_IPA_NLO_PP_CFG1_NLO_DATA_PP_BMSK 0xff00
+#define HWIO_IPA_NLO_PP_CFG1_NLO_DATA_PP_SHFT 0x8
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_PP_BMSK 0xff
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_PP_SHFT 0x0
+#define HWIO_IPA_NLO_PP_CFG2_ADDR (IPA_CFG_REG_BASE + 0x00001684)
+#define HWIO_IPA_NLO_PP_CFG2_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001684)
+#define HWIO_IPA_NLO_PP_CFG2_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001684)
+#define HWIO_IPA_NLO_PP_CFG2_RMSK 0x7ffff
+#define HWIO_IPA_NLO_PP_CFG2_ATTR 0x3
+#define HWIO_IPA_NLO_PP_CFG2_IN in_dword_masked(HWIO_IPA_NLO_PP_CFG2_ADDR, \
+						HWIO_IPA_NLO_PP_CFG2_RMSK)
+#define HWIO_IPA_NLO_PP_CFG2_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_CFG2_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_CFG2_OUT(v) out_dword(HWIO_IPA_NLO_PP_CFG2_ADDR, v)
+#define HWIO_IPA_NLO_PP_CFG2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_CFG2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_CFG2_IN)
+#define HWIO_IPA_NLO_PP_CFG2_NLO_STATUS_BUFFER_MODE_BMSK 0x40000
+#define HWIO_IPA_NLO_PP_CFG2_NLO_STATUS_BUFFER_MODE_SHFT 0x12
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_BUFFER_MODE_BMSK 0x20000
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_BUFFER_MODE_SHFT 0x11
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_BUFFER_MODE_BMSK 0x10000
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_BUFFER_MODE_SHFT 0x10
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_CLOSE_PADD_BMSK 0xff00
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_CLOSE_PADD_SHFT 0x8
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_CLOSE_PADD_BMSK 0xff
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_CLOSE_PADD_SHFT 0x0
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR (IPA_CFG_REG_BASE + 0x00001688)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00001688)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00001688)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_RMSK 0xffffffff
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ATTR 0x3
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_IN in_dword_masked( \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_RMSK)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_OUT(v) out_dword(	\
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		v)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_IN)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_UPPER_SIZE_BMSK 0xffff0000
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_UPPER_SIZE_SHFT 0x10
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_LOWER_SIZE_BMSK 0xffff
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_LOWER_SIZE_SHFT 0x0
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000168c)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x0000168c)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x0000168c)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_RMSK 0xffffffff
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ATTR 0x3
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_IN in_dword_masked( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_RMSK)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_OUT(v) out_dword( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		v)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_IN)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_UPPER_SIZE_BMSK 0xffff0000
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_UPPER_SIZE_SHFT 0x10
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_LOWER_SIZE_BMSK 0xffff
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_LOWER_SIZE_SHFT 0x0
+#define HWIO_IPA_NLO_MIN_DSM_CFG_ADDR (IPA_CFG_REG_BASE + 0x00001690)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001690)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001690)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_RMSK 0xffffffff
+#define HWIO_IPA_NLO_MIN_DSM_CFG_ATTR 0x3
+#define HWIO_IPA_NLO_MIN_DSM_CFG_IN in_dword_masked( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		HWIO_IPA_NLO_MIN_DSM_CFG_RMSK)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		m)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_OUT(v) out_dword( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		v)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_MIN_DSM_CFG_IN)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_DATA_MIN_DSM_LEN_BMSK 0xffff0000
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_DATA_MIN_DSM_LEN_SHFT 0x10
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_ACK_MIN_DSM_LEN_BMSK 0xffff
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_ACK_MIN_DSM_LEN_SHFT 0x0
+#define HWIO_IPA_NLO_VP_AGGR_CFG_LSB_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00001700 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_LSB_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00001700 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_LSB_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00001700 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_MSB_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00001704 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_MSB_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00001704 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_MSB_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00001704 + 0x8 * (n))
+#define HWIO_IPA_SNIFFER_QMB_SEL_ADDR (IPA_CFG_REG_BASE + 0x00001800)
+#define HWIO_IPA_SNIFFER_QMB_SEL_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001800)
+#define HWIO_IPA_SNIFFER_QMB_SEL_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001800)
+#define HWIO_IPA_COAL_EVICT_LRU_ADDR (IPA_CFG_REG_BASE + 0x0000180c)
+#define HWIO_IPA_COAL_EVICT_LRU_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000180c)
+#define HWIO_IPA_COAL_EVICT_LRU_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000180c)
+#define HWIO_IPA_COAL_QMAP_CFG_ADDR (IPA_CFG_REG_BASE + 0x00001810)
+#define HWIO_IPA_COAL_QMAP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001810)
+#define HWIO_IPA_COAL_QMAP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001810)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR (IPA_CFG_REG_BASE + 0x00001814)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001814)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001814)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_RMSK 0x80ff00ff
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_ATTR 0x3
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		HWIO_IPA_NLO_VP_FLUSH_REQ_RMSK)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		m)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_OUT(v) out_dword( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		v)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_IN)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_REQ_BMSK 0x80000000
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_REQ_SHFT 0x1f
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_VP_INDX_BMSK 0xff0000
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_VP_INDX_SHFT 0x10
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_PP_INDX_BMSK 0xff
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_PP_INDX_SHFT 0x0
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_ADDR (IPA_CFG_REG_BASE + 0x00001818)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00001818)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00001818)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_RMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_ATTR 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_COOKIE_ADDR, \
+		HWIO_IPA_NLO_VP_FLUSH_COOKIE_RMSK)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_COOKIE_ADDR, \
+		m)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_VP_FLUSH_COOKIE_BMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_VP_FLUSH_COOKIE_SHFT 0x0
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_ADDR (IPA_CFG_REG_BASE + 0x0000181c)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000181c)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000181c)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_RMSK 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_ATTR 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_ACK_ADDR,	\
+		HWIO_IPA_NLO_VP_FLUSH_ACK_RMSK)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_ACK_ADDR,	\
+		m)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_VP_FLUSH_ACK_BMSK 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_VP_FLUSH_ACK_SHFT 0x0
+#define HWIO_IPA_NLO_VP_DSM_OPEN_ADDR (IPA_CFG_REG_BASE + 0x00001820)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001820)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001820)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_RMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_DSM_OPEN_ATTR 0x1
+#define HWIO_IPA_NLO_VP_DSM_OPEN_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_DSM_OPEN_ADDR, \
+		HWIO_IPA_NLO_VP_DSM_OPEN_RMSK)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_DSM_OPEN_ADDR, \
+		m)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_VP_DSM_OPEN_BMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_DSM_OPEN_VP_DSM_OPEN_SHFT 0x0
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_ADDR (IPA_CFG_REG_BASE + 0x00001824)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001824)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001824)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_RMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_ATTR 0x1
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_QBAP_OPEN_ADDR,	\
+		HWIO_IPA_NLO_VP_QBAP_OPEN_RMSK)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_QBAP_OPEN_ADDR,	\
+		m)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_VP_QBAP_OPEN_BMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_VP_QBAP_OPEN_SHFT 0x0
+#define IPA_DEBUG_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00042000)
+#define IPA_DEBUG_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00042000)
+#define IPA_DEBUG_REG_BASE_OFFS 0x00042000
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000000)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000000)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000000)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000004)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_BLOCK_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000004)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_BLOCK_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000004)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000008)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000008)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000008)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x0000000c)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000000c)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000000c)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000010)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000010)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000010)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000014)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000014)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000014)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000018)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_BLOCK_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000018)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_BLOCK_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000018)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000001c)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000001c)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000001c)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000020)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000020)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000020)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000024)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000024)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000024)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000028)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000028)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000028)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000002c)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000002c)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000002c)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_MASK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000030)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000030)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000030)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000034)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000034)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000034)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000038)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CMD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000038)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CMD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000038)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000003c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000003c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000040)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000040)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000040)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_MASK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000044)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000044)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000044)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000048)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000048)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000048)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						     0x0000004c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CMD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000004c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CMD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000004c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000050)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000050)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000050)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000054)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000054)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000054)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_MASK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000058)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000058)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000058)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000005c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000005c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000005c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000060)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CMD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000060)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CMD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000060)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000064)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000064)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000064)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000068)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000068)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000068)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000006c)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000006c)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000006c)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000070)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000070)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000070)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000074)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000074)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000074)
+#define HWIO_IPA_TX_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000078)
+#define HWIO_IPA_TX_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000078)
+#define HWIO_IPA_TX_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000078)
+#define HWIO_IPA_TX_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x0000007c)
+#define HWIO_IPA_TX_ARB_DEBUG_BLOCK_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x0000007c)
+#define HWIO_IPA_TX_ARB_DEBUG_BLOCK_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x0000007c)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000080)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000080)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000080)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000084)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_BLOCK_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000084)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_BLOCK_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000084)
+#define HWIO_IPA_TX_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000088)
+#define HWIO_IPA_TX_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x00000088)
+#define HWIO_IPA_TX_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x00000088)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000008c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000008c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000008c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000090)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000090)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000094)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000094)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000094)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000098)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_BLOCK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000098)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_BLOCK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000098)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000009c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000009c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000009c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000100)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000100)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000100)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000104)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000104)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000104)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000108)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000108)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000108)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000010c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_BLOCK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000010c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_BLOCK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000010c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000110)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000110)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000110)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_ALLOC_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						     0x00000114)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_ALLOC_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000114)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_ALLOC_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000114)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_SRCH_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000118)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_SRCH_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_SRCH_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_REL_CFG_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000011c)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_REL_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000011c)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_REL_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000011c)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000120)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000120)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000120)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000124)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000124)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000124)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000128)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x00000128)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x00000128)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RMSK 0x3f77
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_ATTR 0x3
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_IN in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		HWIO_IPA_RSRC_MNGR_DB_CFG_RMSK)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		m)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_OUT(v) out_dword( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		v)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_IN)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_ID_SEL_BMSK 0x3f00
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_ID_SEL_SHFT 0x8
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_TYPE_SEL_BMSK 0x70
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_TYPE_SEL_SHFT 0x4
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_GRP_SEL_BMSK 0x7
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_GRP_SEL_SHFT 0x0
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x0000012c)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x0000012c)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x0000012c)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RMSK 0x3f3
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ATTR 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_IN in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ADDR, \
+		HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RMSK)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_INM(m) in_dword_masked(	\
+		HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ADDR, \
+		m)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_INDEX_BMSK 0x3f0
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_INDEX_SHFT 0x4
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_VALID_BMSK 0x2
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_VALID_SHFT 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_OCCUPIED_BMSK 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_OCCUPIED_SHFT 0x0
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000130)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000130)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000130)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RMSK 0x7f7f3f3
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ATTR 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_IN in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ADDR, \
+		HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RMSK)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_INM(m) in_dword_masked(	\
+		HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ADDR, \
+		m)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_ENTRY_CNT_BMSK 0x7f00000
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_ENTRY_CNT_SHFT 0x14
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_CNT_BMSK 0x7f000
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_CNT_SHFT 0xc
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_RSRC_BMSK 0x3f0
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_RSRC_SHFT 0x4
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HOLD_BMSK 0x2
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HOLD_SHFT 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_VALID_BMSK 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_VALID_SHFT 0x0
+#define HWIO_IPA_RSRC_MNGR_CONTEXTS_ADDR (IPA_DEBUG_REG_BASE + 0x00000134)
+#define HWIO_IPA_RSRC_MNGR_CONTEXTS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000134)
+#define HWIO_IPA_RSRC_MNGR_CONTEXTS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000134)
+#define HWIO_IPA_BRESP_DB_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000138)
+#define HWIO_IPA_BRESP_DB_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000138)
+#define HWIO_IPA_BRESP_DB_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000138)
+#define HWIO_IPA_BRESP_DB_DATA_ADDR (IPA_DEBUG_REG_BASE + 0x0000013c)
+#define HWIO_IPA_BRESP_DB_DATA_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000013c)
+#define HWIO_IPA_BRESP_DB_DATA_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000013c)
+#define HWIO_IPA_DEBUG_DATA_ADDR (IPA_DEBUG_REG_BASE + 0x00000204)
+#define HWIO_IPA_DEBUG_DATA_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000204)
+#define HWIO_IPA_DEBUG_DATA_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000204)
+#define HWIO_IPA_DEBUG_DATA_RMSK 0xffffffff
+#define HWIO_IPA_DEBUG_DATA_ATTR 0x1
+#define HWIO_IPA_DEBUG_DATA_IN in_dword_masked(HWIO_IPA_DEBUG_DATA_ADDR, \
+					       HWIO_IPA_DEBUG_DATA_RMSK)
+#define HWIO_IPA_DEBUG_DATA_INM(m) in_dword_masked( \
+		HWIO_IPA_DEBUG_DATA_ADDR, \
+		m)
+#define HWIO_IPA_DEBUG_DATA_DEBUG_DATA_BMSK 0xffffffff
+#define HWIO_IPA_DEBUG_DATA_DEBUG_DATA_SHFT 0x0
+#define HWIO_IPA_TESTBUS_SEL_ADDR (IPA_DEBUG_REG_BASE + 0x00000208)
+#define HWIO_IPA_TESTBUS_SEL_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000208)
+#define HWIO_IPA_TESTBUS_SEL_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000208)
+#define HWIO_IPA_TESTBUS_SEL_RMSK 0x1fffff1
+#define HWIO_IPA_TESTBUS_SEL_ATTR 0x3
+#define HWIO_IPA_TESTBUS_SEL_IN in_dword_masked(HWIO_IPA_TESTBUS_SEL_ADDR, \
+						HWIO_IPA_TESTBUS_SEL_RMSK)
+#define HWIO_IPA_TESTBUS_SEL_INM(m) in_dword_masked( \
+		HWIO_IPA_TESTBUS_SEL_ADDR, \
+		m)
+#define HWIO_IPA_TESTBUS_SEL_OUT(v) out_dword(HWIO_IPA_TESTBUS_SEL_ADDR, v)
+#define HWIO_IPA_TESTBUS_SEL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_TESTBUS_SEL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_TESTBUS_SEL_IN)
+#define HWIO_IPA_TESTBUS_SEL_PIPE_SELECT_BMSK 0x1f00000
+#define HWIO_IPA_TESTBUS_SEL_PIPE_SELECT_SHFT 0x14
+#define HWIO_IPA_TESTBUS_SEL_INTERNAL_BLOCK_SELECT_BMSK 0xff000
+#define HWIO_IPA_TESTBUS_SEL_INTERNAL_BLOCK_SELECT_SHFT 0xc
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_BMSK 0xff0
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_SHFT 0x4
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_RX_FVAL 0x0
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_TX0_FVAL 0x1
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_FRAG_FVAL 0x2
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_UCP_FVAL 0x3
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_ENQUEUER_FVAL 0x4
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_ROUTER_FVAL 0x5
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_PKT_PARSER_FVAL 0x6
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_FILTER_NAT_FVAL 0x7
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_HDRI_RSRCREL_FVAL \
+	0x8
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_AHB2AHB_FVAL 0x9
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_MAXI2AXI_FVAL 0xa
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_DCMP_FVAL 0xb
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_DISPATCHER_FVAL 0xc
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_D_DCPH_FVAL 0xd
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_GSI_TEST_BUS_FVAL 0xe
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DEADBEAF_FVAL 0xf
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_MISC_FVAL 0x10
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_STTS_SNIFFER_FVAL \
+	0x11
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_QMB_0_FVAL 0x12
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_QMB_1_FVAL 0x13
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_UC_ACKQ_FVAL 0x14
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_RX_ACKQ_FVAL 0x15
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_TX1_FVAL 0x16
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_H_DCPH_FVAL 0x17
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_RX_HPS_CMDQ_FVAL 0x18
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_DPS_CMDQ_FVAL 0x19
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_TX_CMDQ_FVAL 0x1a
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_CMDQ_L_FVAL 0x1b
+#define	\
+	HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_RX_LEGACY_CMDQ_INT_FVAL \
+	0x1c
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_CTX_HANDLER_FVAL	\
+	0x1d
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_GSI_FVAL 0x1e
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ACK_MNGR_CMDQ_FVAL 0x1f
+#define	\
+	HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ENDP_INIT_CTRL_SUSPEND_FVAL \
+	0x20
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ACL_WRAPPER_FVAL 0x22
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_TX_WRAPPER_FVAL \
+	0x23
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_AHB2AHB_BRIDGE_FVAL \
+	0x24
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_RSRC_TYPE_FVAL 0x31
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_RSRC_FVAL 0x32
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ACKMNGR_FVAL 0x33
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_SEQ_FVAL 0x34
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_SEQ_FVAL 0x35
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_FTCH_FVAL 0x36
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_FTCH_FVAL 0x37
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_D_DCPH_2_FVAL 0x38
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_NTF_TX_CMDQ_FVAL 0x39
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_PROD_ACK_MNGR_CMDQ_FVAL \
+	0x3a
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_PROD_ACKMNGR_FVAL 0x3b
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_GSI_AHB2AHB_FVAL	\
+	0x3c
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_MAXI2AXI_PCIE_FVAL \
+	0x3d
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_QSB2AXI_FVAL 0x3e
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_UC_FVAL 0x3f
+#define HWIO_IPA_TESTBUS_SEL_TESTBUS_EN_BMSK 0x1
+#define HWIO_IPA_TESTBUS_SEL_TESTBUS_EN_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000020c)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000020c)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000020c)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_RMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_ATTR 0x3
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_IN in_dword_masked( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_RMSK)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_INM(m) in_dword_masked( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		m)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_OUT(v) out_dword( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		v)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_IN)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_HW_EN_BMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_HW_EN_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000210)
+#define HWIO_IPA_STEP_MODE_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x00000210)
+#define HWIO_IPA_STEP_MODE_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x00000210)
+#define HWIO_IPA_STEP_MODE_STATUS_RMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_STATUS_ATTR 0x1
+#define HWIO_IPA_STEP_MODE_STATUS_IN in_dword_masked( \
+		HWIO_IPA_STEP_MODE_STATUS_ADDR,	\
+		HWIO_IPA_STEP_MODE_STATUS_RMSK)
+#define HWIO_IPA_STEP_MODE_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_STEP_MODE_STATUS_ADDR,	\
+		m)
+#define HWIO_IPA_STEP_MODE_STATUS_HW_EN_BMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_STATUS_HW_EN_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_GO_ADDR (IPA_DEBUG_REG_BASE + 0x00000214)
+#define HWIO_IPA_STEP_MODE_GO_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000214)
+#define HWIO_IPA_STEP_MODE_GO_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000214)
+#define HWIO_IPA_HW_EVENTS_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000218)
+#define HWIO_IPA_HW_EVENTS_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000218)
+#define HWIO_IPA_HW_EVENTS_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000218)
+#define HWIO_IPA_LOG_ADDR (IPA_DEBUG_REG_BASE + 0x0000021c)
+#define HWIO_IPA_LOG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000021c)
+#define HWIO_IPA_LOG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000021c)
+#define HWIO_IPA_LOG_RMSK 0x3ff1f2
+#define HWIO_IPA_LOG_ATTR 0x3
+#define HWIO_IPA_LOG_IN in_dword_masked(HWIO_IPA_LOG_ADDR, \
+					HWIO_IPA_LOG_RMSK)
+#define HWIO_IPA_LOG_INM(m) in_dword_masked(HWIO_IPA_LOG_ADDR, m)
+#define HWIO_IPA_LOG_OUT(v) out_dword(HWIO_IPA_LOG_ADDR, v)
+#define HWIO_IPA_LOG_OUTM(m, v) out_dword_masked_ns(HWIO_IPA_LOG_ADDR, \
+						    m, \
+						    v, \
+						    HWIO_IPA_LOG_IN)
+#define HWIO_IPA_LOG_LOG_DPL_L2_REMOVE_EN_BMSK 0x200000
+#define HWIO_IPA_LOG_LOG_DPL_L2_REMOVE_EN_SHFT 0x15
+#define HWIO_IPA_LOG_LOG_REDUCTION_EN_BMSK 0x100000
+#define HWIO_IPA_LOG_LOG_REDUCTION_EN_SHFT 0x14
+#define HWIO_IPA_LOG_LOG_LENGTH_BMSK 0xff000
+#define HWIO_IPA_LOG_LOG_LENGTH_SHFT 0xc
+#define HWIO_IPA_LOG_LOG_PIPE_BMSK 0x1f0
+#define HWIO_IPA_LOG_LOG_PIPE_SHFT 0x4
+#define HWIO_IPA_LOG_LOG_EN_BMSK 0x2
+#define HWIO_IPA_LOG_LOG_EN_SHFT 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR (IPA_DEBUG_REG_BASE + 0x00000224)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000224)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000224)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_IN)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000228)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000228)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000228)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_IN)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000022c)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000022c)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000022c)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ADDR,	\
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ADDR,	\
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_WRITR_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_WRITR_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000230)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000230)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000230)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_WRITR_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_WRITR_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000234)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000234)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000234)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_RMSK 0x3ffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_IN in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_IN)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SKIP_DDR_DMA_BMSK 0x20000
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SKIP_DDR_DMA_SHFT 0x11
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ENABLE_BMSK 0x10000
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ENABLE_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SIZE_BMSK 0xffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SIZE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000238)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000238)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000238)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_RMSK 0xbfff3fff
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_INM(m) in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_SKIP_DDR_WRAP_HAPPENED_BMSK \
+	0x80000000
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_SKIP_DDR_WRAP_HAPPENED_SHFT 0x1f
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_WRITE_PTR_BMSK 0x3fff0000
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_WRITE_PTR_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_READ_PTR_BMSK 0x3fff
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_READ_PTR_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_LSB_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000023c)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_LSB_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000023c)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_LSB_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000023c)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_MSB_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000240)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_MSB_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000240)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_MSB_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000240)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_RESULT_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000244)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_RESULT_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000244)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_RESULT_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000244)
+#define HWIO_IPA_STEP_MODE_HSEQ_BREAKPOINT_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000248)
+#define HWIO_IPA_STEP_MODE_HSEQ_BREAKPOINT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000248)
+#define HWIO_IPA_STEP_MODE_HSEQ_BREAKPOINT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000248)
+#define HWIO_IPA_STEP_MODE_HSEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000024c)
+#define HWIO_IPA_STEP_MODE_HSEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000024c)
+#define HWIO_IPA_STEP_MODE_HSEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000024c)
+#define HWIO_IPA_STEP_MODE_DSEQ_BREAKPOINT_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000250)
+#define HWIO_IPA_STEP_MODE_DSEQ_BREAKPOINT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000250)
+#define HWIO_IPA_STEP_MODE_DSEQ_BREAKPOINT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000250)
+#define HWIO_IPA_STEP_MODE_DSEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000254)
+#define HWIO_IPA_STEP_MODE_DSEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000254)
+#define HWIO_IPA_STEP_MODE_DSEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000254)
+#define HWIO_IPA_RX_ACKQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000258)
+#define HWIO_IPA_RX_ACKQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000258)
+#define HWIO_IPA_RX_ACKQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000258)
+#define HWIO_IPA_RX_ACKQ_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x0000025c)
+#define HWIO_IPA_RX_ACKQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000025c)
+#define HWIO_IPA_RX_ACKQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000025c)
+#define HWIO_IPA_RX_ACKQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000260)
+#define HWIO_IPA_RX_ACKQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000260)
+#define HWIO_IPA_RX_ACKQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000260)
+#define HWIO_IPA_RX_ACKQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000264)
+#define HWIO_IPA_RX_ACKQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000264)
+#define HWIO_IPA_RX_ACKQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000264)
+#define HWIO_IPA_RX_ACKQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000268)
+#define HWIO_IPA_RX_ACKQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000268)
+#define HWIO_IPA_RX_ACKQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000268)
+#define HWIO_IPA_UC_ACKQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x0000026c)
+#define HWIO_IPA_UC_ACKQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000026c)
+#define HWIO_IPA_UC_ACKQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000026c)
+#define HWIO_IPA_UC_ACKQ_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000270)
+#define HWIO_IPA_UC_ACKQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000270)
+#define HWIO_IPA_UC_ACKQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000270)
+#define HWIO_IPA_UC_ACKQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000274)
+#define HWIO_IPA_UC_ACKQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000274)
+#define HWIO_IPA_UC_ACKQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000274)
+#define HWIO_IPA_UC_ACKQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000278)
+#define HWIO_IPA_UC_ACKQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000278)
+#define HWIO_IPA_UC_ACKQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000278)
+#define HWIO_IPA_UC_ACKQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000027c)
+#define HWIO_IPA_UC_ACKQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000027c)
+#define HWIO_IPA_UC_ACKQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000027c)
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000280 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000280 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000280 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RMSK 0x7f
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_ATTR 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_CMD_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_ENHANCED_BMSK 0x40
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_ENHANCED_SHFT 0x6
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_PKT_BMSK 0x20
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_PKT_SHFT 0x5
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_BMSK 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_SHFT 0x4
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_CMD_BMSK 0x8
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_CMD_SHFT 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_CMD_BMSK 0x4
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_CMD_SHFT 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_POP_CMD_BMSK 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_POP_CMD_SHFT 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000284 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000284 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000284 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_RMSK 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_WR_BMSK 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_WR_SHFT 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_RD_BMSK 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_RD_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000288 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000288 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000288 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_SRC_LEN_F_BMSK 0xffff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_SRC_LEN_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_PACKET_LEN_F_BMSK 0xffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_PACKET_LEN_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x0000028c + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000028c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000028c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_METADATA_F_BMSK 0xff000000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_METADATA_F_SHFT 0x18
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_OPCODE_F_BMSK 0xff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_OPCODE_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_FLAGS_F_BMSK 0xfc00
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_FLAGS_F_SHFT 0xa
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_ORDER_F_BMSK 0x300
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_ORDER_F_SHFT 0x8
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_SRC_PIPE_F_BMSK 0xff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_SRC_PIPE_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000290 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000290 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000290 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_CMDQ_ADDR_LSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_CMDQ_ADDR_LSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000294 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000294 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000294 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_CMDQ_ADDR_MSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_CMDQ_ADDR_MSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000298 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000298 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000298 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_SRC_LEN_F_BMSK 0xffff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_SRC_LEN_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_PACKET_LEN_F_BMSK 0xffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_PACKET_LEN_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x0000029c + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000029c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000029c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_METADATA_F_BMSK 0xff000000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_METADATA_F_SHFT 0x18
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_OPCODE_F_BMSK 0xff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_OPCODE_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_FLAGS_F_BMSK 0xfc00
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_FLAGS_F_SHFT 0xa
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_ORDER_F_BMSK 0x300
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_ORDER_F_SHFT 0x8
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_SRC_PIPE_F_BMSK 0xff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_SRC_PIPE_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x000002a0 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x000002a0 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x000002a0 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_CMDQ_ADDR_LSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_CMDQ_ADDR_LSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x000002a4 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x000002a4 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x000002a4 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_CMDQ_ADDR_MSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_CMDQ_ADDR_MSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+						0x000002a8 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+						0x000002a8 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+						0x000002a8 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_RMSK 0x7f
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ADDR(n),	\
+		HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_DEPTH_BMSK 0x60
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_DEPTH_SHFT 0x5
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_COUNT_BMSK 0x18
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_COUNT_SHFT 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_FULL_BMSK 0x4
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_FULL_SHFT 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_EMPTY_BMSK 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_EMPTY_SHFT 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_STATUS_BMSK 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_STATUS_SHFT 0x0
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000035c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000035c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000035c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000360)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000360)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000360)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000364)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000364)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000364)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_1_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000368)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_1_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000368)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_1_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000368)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_2_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000036c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_2_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000036c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_2_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000036c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000370)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000370)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000370)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_1_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000374)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_1_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000374)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_1_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000374)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_2_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000378)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_2_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000378)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_2_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000378)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000037c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000037c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000037c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_RMSK 0x7
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ADDR,	\
+		HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ADDR,	\
+		m)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_FULL_BMSK 0x4
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_FULL_SHFT 0x2
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_EMPTY_BMSK 0x2
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_EMPTY_SHFT 0x1
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000380)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000380)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000380)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_RMSK 0x3f
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_RD_REQ_BMSK 0x20
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_RD_REQ_SHFT 0x5
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_CMD_CLIENT_BMSK 0x1c
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000384)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000384)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000384)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000388)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000388)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000388)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_BLOCK_WR_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_BLOCK_WR_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_BLOCK_RD_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_BLOCK_RD_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000394)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000394)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000394)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000398)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000398)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000398)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000039c)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000039c)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000039c)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003a0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003a0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003a0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003a4)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003a4)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003a4)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_DEST_LEN_F_BMSK 0xffff0000
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_DEST_LEN_F_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_PACKET_LEN_F_BMSK 0xffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_PACKET_LEN_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003a8)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003a8)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003a8)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_METADATA_F_BMSK 0xff000000
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_METADATA_F_SHFT 0x18
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_OPCODE_F_BMSK 0xff0000
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_OPCODE_F_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_FLAGS_F_BMSK 0xfc00
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_FLAGS_F_SHFT 0xa
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_ORDER_F_BMSK 0x300
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_ORDER_F_SHFT 0x8
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_SRC_PIPE_F_BMSK 0xff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_SRC_PIPE_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003ac)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003ac)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003ac)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_CMDQ_ADDR_LSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_CMDQ_ADDR_LSB_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003b0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003b0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003b0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_CMDQ_ADDR_MSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_CMDQ_ADDR_MSB_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000003b4)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x000003b4)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x000003b4)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						0x000003b8)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x000003b8)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x000003b8)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ADDR,	\
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ADDR,	\
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_RX_HPS_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x000003bc)
+#define HWIO_IPA_RX_HPS_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x000003bc)
+#define HWIO_IPA_RX_HPS_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x000003bc)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x000003c0)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x000003c0)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x000003c0)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x000003c4)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x000003c4)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x000003c4)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_RMSK 0xff0f0f0f
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_RMSK)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_IN)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_4_MIN_DEPTH_BMSK \
+	0xf0000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_4_MIN_DEPTH_SHFT 0x1c
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_3_MIN_DEPTH_BMSK \
+	0xf000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_3_MIN_DEPTH_SHFT 0x18
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_2_MIN_DEPTH_BMSK \
+	0xf0000
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_2_MIN_DEPTH_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_1_MIN_DEPTH_BMSK 0xf00
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_1_MIN_DEPTH_SHFT 0x8
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_0_MIN_DEPTH_BMSK 0xf
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_0_MIN_DEPTH_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x000003cc)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x000003cc)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x000003cc)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_RMSK 0xff0f0f0f
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_RMSK)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_IN)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_4_MAX_DEPTH_BMSK \
+	0xf0000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_4_MAX_DEPTH_SHFT 0x1c
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_3_MAX_DEPTH_BMSK \
+	0xf000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_3_MAX_DEPTH_SHFT 0x18
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_2_MAX_DEPTH_BMSK \
+	0xf0000
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_2_MAX_DEPTH_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_1_MAX_DEPTH_BMSK 0xf00
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_1_MAX_DEPTH_SHFT 0x8
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_0_MAX_DEPTH_BMSK 0xf
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_0_MAX_DEPTH_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x000003d4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x000003d4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x000003d4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		v)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_IN)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000003d8)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000003d8)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000003d8)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000003dc)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000003dc)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000003dc)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x000003e0)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000003e0)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000003e0)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x000003e4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000003e4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000003e4)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x000003e8)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x000003e8)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x000003e8)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x000003ec)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x000003ec)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x000003ec)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_RMSK 0xfffff
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_INM(m) in_dword_masked(	\
+		HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_REP_F_BMSK 0x80000
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_REP_F_SHFT 0x13
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_BMSK 0x60000
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_SHFT 0x11
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_BMSK 0x1f000
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_SHFT 0xc
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_BMSK 0xff0
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_SHFT 0x4
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_BMSK 0xf
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000003f0)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000003f0)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000003f0)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_RMSK 0xff
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0xfc
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x000003f4)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x000003f4)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x000003f4)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_RMSK 0x7fffffff
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x7fffffff
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x000003f8)
+#define HWIO_IPA_HPS_DPS_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x000003f8)
+#define HWIO_IPA_HPS_DPS_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x000003f8)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x000003fc)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x000003fc)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x000003fc)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_RMSK 0x3f
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_IN in_dword_masked(	\
+		HWIO_IPA_HPS_DPS_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_FIFO_COUNT_BMSK 0x3f
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000400)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000400)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000400)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_RMSK 0xbf
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_IN)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_CMD_CLIENT_BMSK 0x3c
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000404)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000404)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000404)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000408)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000408)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000408)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x0000040c)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000040c)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000040c)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000410)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000410)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000410)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000414)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000414)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000414)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000418)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000418)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000418)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_RMSK 0xfffff
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_BMSK 0x80000
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_SHFT 0x13
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_BMSK 0x60000
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_SHFT 0x11
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_BMSK 0x1f000
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_SHFT 0xc
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_BMSK 0xff0
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_SHFT 0x4
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_BMSK 0xf
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000041c)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000041c)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000041c)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_IN in_dword_masked(	\
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000420)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000420)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000420)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_RMSK 0x3ff
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x3ff
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_DPS_TX_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x00000424)
+#define HWIO_IPA_DPS_TX_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000424)
+#define HWIO_IPA_DPS_TX_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000424)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x00000428)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000428)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000428)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000042c)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000042c)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000042c)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_RMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_BITMAP_BMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_BITMAP_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000430)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000430)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000430)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_RMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_INM(m) in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_OUTM(m,	\
+						     v)	\
+	out_dword_masked_ns(HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+			    m, \
+			    v, \
+			    HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_BITMAP_BMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_BITMAP_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000434)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000434)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000434)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_RMSK 0xfff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_IN in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ALL_CLI_MUX_CONCAT_BMSK 0xfff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ALL_CLI_MUX_CONCAT_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000438 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000438 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000438 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x0000043c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000043c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000043c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000440 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000440 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000440 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000444 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000444 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000444 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000468 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000468 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000468 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x0000046c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000046c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000046c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000470 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000470 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000470 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000474 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000474 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000474 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000498)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000498)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000498)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_RMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_SRC_GROUP_SEL_BMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_SRC_GROUP_SEL_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x000004a0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x000004a0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x000004a0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		v)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_IN)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000004a4)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000004a4)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000004a4)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000004a8)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000004a8)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000004a8)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x000004ac)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004ac)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004ac)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x000004b0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004b0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004b0)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					    0x000004b4)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					    0x000004b4)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					    0x000004b4)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					    0x000004b8)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					    0x000004b8)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					    0x000004b8)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_RMSK 0x7ffffff
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_BMSK 0x4000000
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_SHFT 0x1a
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_BMSK 0x2000000
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_SHFT 0x19
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_BMSK 0x1000000
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_SHFT 0x18
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_BMSK 0xffff00
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_SHFT 0x8
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_BMSK 0xff
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000004bc)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004bc)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004bc)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x000004c0)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x000004c0)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x000004c0)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK 0x1fff
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x1fff
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x000004c4)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x000004c4)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x000004c4)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_IN in_dword_masked(	\
+		HWIO_IPA_ACKMNGR_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR (IPA_DEBUG_REG_BASE + \
+					    0x000004c8)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					    0x000004c8)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					    0x000004c8)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_RMSK 0x3f
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_ATTR 0x3
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IN in_dword_masked( \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_RMSK)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		m)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_OUT(v) out_dword(	\
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		v)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_IN)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_EN_BMSK 0x20
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_EN_SHFT 0x5
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_PORT_SEL_BMSK	\
+	0x1f
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_PORT_SEL_SHFT	\
+	0x0
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000004cc)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004cc)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004cc)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_RMSK 0x7fffffff
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_ATTR 0x1
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_IN in_dword_masked( \
+		HWIO_IPA_GSI_TLV_FIFO_STATUS_ADDR, \
+		HWIO_IPA_GSI_TLV_FIFO_STATUS_RMSK)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TLV_FIFO_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_BMSK 0x40000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_SHFT 0x1e
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_PUB_BMSK 0x20000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_PUB_SHFT 0x1d
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_BMSK 0x10000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_SHFT 0x1c
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_BMSK 0x8000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_SHFT 0x1b
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_BMSK 0x4000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_SHFT 0x1a
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_PUB_BMSK 0x2000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_PUB_SHFT 0x19
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_SHFT 0x18
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PUB_PTR_BMSK 0xff0000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PUB_PTR_SHFT 0x10
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PTR_BMSK 0xff00
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PTR_SHFT 0x8
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_WR_PTR_BMSK 0xff
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_WR_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000004d0)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004d0)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004d0)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_RMSK 0x7fffffff
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_ATTR 0x1
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_IN in_dword_masked( \
+		HWIO_IPA_GSI_AOS_FIFO_STATUS_ADDR, \
+		HWIO_IPA_GSI_AOS_FIFO_STATUS_RMSK)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_AOS_FIFO_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_BMSK 0x40000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_SHFT 0x1e
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_PUB_BMSK 0x20000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_PUB_SHFT 0x1d
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_BMSK 0x10000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_SHFT 0x1c
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_BMSK 0x8000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_SHFT 0x1b
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_BMSK 0x4000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_SHFT 0x1a
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_PUB_BMSK 0x2000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_PUB_SHFT 0x19
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_SHFT 0x18
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PUB_PTR_BMSK 0xff0000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PUB_PTR_SHFT 0x10
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PTR_BMSK 0xff00
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PTR_SHFT 0x8
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_WR_PTR_BMSK 0xff
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_WR_PTR_SHFT 0x0
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_TLV_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000004d4)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_TLV_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000004d4)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_TLV_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000004d4)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_AOS_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000548)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_AOS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000548)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_AOS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000548)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004d8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004d8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004d8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004dc)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004dc)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004dc)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004e0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004e0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004e0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004e4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004e4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004e4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004e8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004e8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004e8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004ec)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004ec)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004ec)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004f0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004f0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004f0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004f4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004f4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004f4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004f8)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004f8)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004f8)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004fc)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004fc)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004fc)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000500)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000500)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000500)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000504)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000504)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000504)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000508)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000508)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000508)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000050c)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000050c)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000050c)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000510)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000510)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000510)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000514)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000514)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000514)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_VALUE_SHFT 0x0
+#define HWIO_IPA_UC_RX_HND_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000518)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000518)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000518)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x0000051c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000051c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000051c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000520)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000520)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000520)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_1_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000524)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000524)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000524)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_2_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000528)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000528)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000528)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_3_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000052c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000052c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000052c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000530)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000530)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000530)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_1_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000534)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000534)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000534)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_2_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000538)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000538)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000538)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_3_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000053c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000053c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000053c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000540)
+#define HWIO_IPA_UC_RX_HND_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000540)
+#define HWIO_IPA_UC_RX_HND_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000540)
+#define HWIO_IPA_RAM_HW_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x0000054c)
+#define HWIO_IPA_RAM_HW_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000054c)
+#define HWIO_IPA_RAM_HW_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000054c)
+#define HWIO_IPA_RAM_HW_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x00000550)
+#define HWIO_IPA_RAM_HW_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000550)
+#define HWIO_IPA_RAM_HW_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000550)
+#define HWIO_IPA_RAM_SNIFFER_BASE_OFFSET_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000554)
+#define HWIO_IPA_RAM_SNIFFER_BASE_OFFSET_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000554)
+#define HWIO_IPA_RAM_SNIFFER_BASE_OFFSET_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000554)
+#define HWIO_IPA_RAM_FRAG_FRST_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000558)
+#define HWIO_IPA_RAM_FRAG_FRST_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000558)
+#define HWIO_IPA_RAM_FRAG_FRST_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000558)
+#define HWIO_IPA_RAM_FRAG_SCND_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x0000055c)
+#define HWIO_IPA_RAM_FRAG_SCND_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x0000055c)
+#define HWIO_IPA_RAM_FRAG_SCND_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x0000055c)
+#define HWIO_IPA_RAM_GSI_TLV_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000560)
+#define HWIO_IPA_RAM_GSI_TLV_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000560)
+#define HWIO_IPA_RAM_GSI_TLV_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000560)
+#define HWIO_IPA_RAM_DCPH_KEYS_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x00000564)
+#define HWIO_IPA_RAM_DCPH_KEYS_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000564)
+#define HWIO_IPA_RAM_DCPH_KEYS_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000564)
+#define HWIO_IPA_RAM_DCPH_KEYS_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x00000568)
+#define HWIO_IPA_RAM_DCPH_KEYS_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000568)
+#define HWIO_IPA_RAM_DCPH_KEYS_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000568)
+#define HWIO_IPA_DPS_SEQUENCER_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x00000570)
+#define HWIO_IPA_DPS_SEQUENCER_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000570)
+#define HWIO_IPA_DPS_SEQUENCER_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000570)
+#define HWIO_IPA_DPS_SEQUENCER_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x00000574)
+#define HWIO_IPA_DPS_SEQUENCER_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000574)
+#define HWIO_IPA_DPS_SEQUENCER_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000574)
+#define HWIO_IPA_HPS_SEQUENCER_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x00000578)
+#define HWIO_IPA_HPS_SEQUENCER_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000578)
+#define HWIO_IPA_HPS_SEQUENCER_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000578)
+#define HWIO_IPA_HPS_SEQUENCER_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x0000057c)
+#define HWIO_IPA_HPS_SEQUENCER_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000057c)
+#define HWIO_IPA_HPS_SEQUENCER_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000057c)
+#define HWIO_IPA_RAM_PKT_CTX_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000650)
+#define HWIO_IPA_RAM_PKT_CTX_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000650)
+#define HWIO_IPA_RAM_PKT_CTX_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000650)
+#define HWIO_IPA_RAM_SW_AREA_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000654)
+#define HWIO_IPA_RAM_SW_AREA_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000654)
+#define HWIO_IPA_RAM_SW_AREA_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000654)
+#define HWIO_IPA_RAM_HDRI_TYPE1_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000658)
+#define HWIO_IPA_RAM_HDRI_TYPE1_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000658)
+#define HWIO_IPA_RAM_HDRI_TYPE1_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000658)
+#define HWIO_IPA_RAM_AGGR_NLO_COUNTERS_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x0000065c)
+#define HWIO_IPA_RAM_AGGR_NLO_COUNTERS_BASE_ADDR_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000065c)
+#define HWIO_IPA_RAM_AGGR_NLO_COUNTERS_BASE_ADDR_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000065c)
+#define HWIO_IPA_RAM_NLO_VP_CACHE_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000660)
+#define HWIO_IPA_RAM_NLO_VP_CACHE_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000660)
+#define HWIO_IPA_RAM_NLO_VP_CACHE_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000660)
+#define HWIO_IPA_RAM_COAL_VP_CACHE_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000664)
+#define HWIO_IPA_RAM_COAL_VP_CACHE_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000664)
+#define HWIO_IPA_RAM_COAL_VP_CACHE_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000664)
+#define HWIO_IPA_RAM_COAL_VP_FIFO_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000668)
+#define HWIO_IPA_RAM_COAL_VP_FIFO_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000668)
+#define HWIO_IPA_RAM_COAL_VP_FIFO_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000668)
+#define HWIO_IPA_RAM_GSI_IF_CONS_ACCUMS_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000066c)
+#define HWIO_IPA_RAM_GSI_IF_CONS_ACCUMS_BASE_ADDR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000066c)
+#define HWIO_IPA_RAM_GSI_IF_CONS_ACCUMS_BASE_ADDR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000066c)
+#define HWIO_IPA_RAM_AGGR_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + 0x00000670)
+#define HWIO_IPA_RAM_AGGR_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000670)
+#define HWIO_IPA_RAM_AGGR_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000670)
+#define HWIO_IPA_RAM_TX_COUNTERS_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000674)
+#define HWIO_IPA_RAM_TX_COUNTERS_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000674)
+#define HWIO_IPA_RAM_TX_COUNTERS_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000674)
+#define HWIO_IPA_RAM_DPL_FIFO_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000678)
+#define HWIO_IPA_RAM_DPL_FIFO_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000678)
+#define HWIO_IPA_RAM_DPL_FIFO_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000678)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_CTX_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000067c)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_CTX_BASE_ADDR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000067c)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_CTX_BASE_ADDR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000067c)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_AGGR_BASE_ADDR_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000680)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_AGGR_BASE_ADDR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000680)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_AGGR_BASE_ADDR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000680)
+#define HWIO_IPA_RAM_COAL_SLAVE_VP_CTX_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000684)
+#define HWIO_IPA_RAM_COAL_SLAVE_VP_CTX_BASE_ADDR_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000684)
+#define HWIO_IPA_RAM_COAL_SLAVE_VP_CTX_BASE_ADDR_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000684)
+#define HWIO_IPA_RAM_UL_NLO_AGGR_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000688)
+#define HWIO_IPA_RAM_UL_NLO_AGGR_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000688)
+#define HWIO_IPA_RAM_UL_NLO_AGGR_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000688)
+#define HWIO_IPA_RAM_UC_IRAM_ADDR_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x0000069c)
+#define HWIO_IPA_RAM_UC_IRAM_ADDR_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x0000069c)
+#define HWIO_IPA_RAM_UC_IRAM_ADDR_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x0000069c)
+#define HWIO_IPA_HPS_UC2SEQ_PUSH_ADDR (IPA_DEBUG_REG_BASE + 0x00000580)
+#define HWIO_IPA_HPS_UC2SEQ_PUSH_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000580)
+#define HWIO_IPA_HPS_UC2SEQ_PUSH_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000580)
+#define HWIO_IPA_HPS_UC2SEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000584)
+#define HWIO_IPA_HPS_UC2SEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000584)
+#define HWIO_IPA_HPS_UC2SEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000584)
+#define HWIO_IPA_HPS_SEQ2UC_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000588)
+#define HWIO_IPA_HPS_SEQ2UC_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000588)
+#define HWIO_IPA_HPS_SEQ2UC_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000588)
+#define HWIO_IPA_HPS_SEQ2UC_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000058c)
+#define HWIO_IPA_HPS_SEQ2UC_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x0000058c)
+#define HWIO_IPA_HPS_SEQ2UC_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x0000058c)
+#define HWIO_IPA_HPS_SEQ2UC_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000590)
+#define HWIO_IPA_HPS_SEQ2UC_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000590)
+#define HWIO_IPA_HPS_SEQ2UC_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000590)
+#define HWIO_IPA_DPS_UC2SEQ_PUSH_ADDR (IPA_DEBUG_REG_BASE + 0x00000594)
+#define HWIO_IPA_DPS_UC2SEQ_PUSH_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000594)
+#define HWIO_IPA_DPS_UC2SEQ_PUSH_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000594)
+#define HWIO_IPA_DPS_UC2SEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000598)
+#define HWIO_IPA_DPS_UC2SEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000598)
+#define HWIO_IPA_DPS_UC2SEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000598)
+#define HWIO_IPA_DPS_SEQ2UC_RD_ADDR (IPA_DEBUG_REG_BASE + 0x0000059c)
+#define HWIO_IPA_DPS_SEQ2UC_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000059c)
+#define HWIO_IPA_DPS_SEQ2UC_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000059c)
+#define HWIO_IPA_DPS_SEQ2UC_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000005a0)
+#define HWIO_IPA_DPS_SEQ2UC_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x000005a0)
+#define HWIO_IPA_DPS_SEQ2UC_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x000005a0)
+#define HWIO_IPA_DPS_SEQ2UC_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x000005a4)
+#define HWIO_IPA_DPS_SEQ2UC_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x000005a4)
+#define HWIO_IPA_DPS_SEQ2UC_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x000005a4)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000600)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000600)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000600)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_IN)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000604)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000604)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000604)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000608)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000608)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000608)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x0000060c)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000060c)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000060c)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000610)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000610)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000610)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000614)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000614)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000614)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000618)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000618)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000618)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_RMSK 0xfffff
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_BMSK 0x80000
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_SHFT 0x13
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_BMSK 0x60000
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_SHFT 0x11
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_BMSK 0x1f000
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_SHFT 0xc
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_BMSK 0xff0
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_SHFT 0x4
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_BMSK 0xf
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_SHFT 0x0
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000061c)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000061c)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000061c)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_IN in_dword_masked(	\
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000620)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000620)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000620)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_RMSK 0x7fffffff
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x7fffffff
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_NTF_TX_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x00000624)
+#define HWIO_IPA_NTF_TX_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000624)
+#define HWIO_IPA_NTF_TX_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000624)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x00000628)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000628)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000628)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000700)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000700)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000700)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_IN)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000704)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_WR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000704)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_WR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000704)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000708)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_RD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000708)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_RD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000708)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000070c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000070c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000070c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000710)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000710)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000710)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_WR_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000714)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000714)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000714)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000718)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000718)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000718)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_RMSK 0xffffffff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ATTR 0x3
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_OUT(v) out_dword( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		v)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_IN)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_USERDATA_BMSK 0xf8000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_USERDATA_SHFT 0x1b
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_BMSK \
+	0x4000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_SHFT 0x1a
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_BMSK 0x2000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_SHFT 0x19
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_BMSK 0x1000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_SHFT 0x18
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_BMSK 0xffff00
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_SHFT 0x8
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_BMSK 0xff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000071c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000071c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000071c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ADDR,	\
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ADDR,	\
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000720)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000720)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000720)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK 0x7fffffff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked(	\
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x7fffffff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000724)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000724)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000724)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000728)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000728)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000728)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000072c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000072c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000072c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000730)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000730)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000730)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000734)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000734)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000734)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_4_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000738)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_4_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000738)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_4_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000738)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_5_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000073c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_5_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000073c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_5_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000073c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_6_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000740)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_6_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000740)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_6_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000740)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKINJ_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000744)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKINJ_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000744)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKINJ_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000744)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKUPD_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000748)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKUPD_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000748)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKUPD_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000748)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000074c)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000074c)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000074c)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000750)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000750)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000750)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_0_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000754)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000754)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000754)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_1_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000758)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_1_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000758)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_1_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000758)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_2_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x0000075c)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_2_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x0000075c)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_2_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x0000075c)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_3_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000760)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_3_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000760)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_3_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000760)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_4_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000764)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_4_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000764)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_4_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000764)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_5_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000768)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_5_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000768)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_5_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000768)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000076c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000076c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000076c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKUPD_CFG_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000770)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKUPD_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000770)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKUPD_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000770)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000774)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000774)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000774)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						     0x00000778)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000778)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000778)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG1_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000077c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG1_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000077c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG1_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000077c)
+#define HWIO_IPA_SPARE_REG_1_ADDR (IPA_DEBUG_REG_BASE + 0x00000780)
+#define HWIO_IPA_SPARE_REG_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000780)
+#define HWIO_IPA_SPARE_REG_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000780)
+#define HWIO_IPA_SPARE_REG_1_RMSK 0xffffffff
+#define HWIO_IPA_SPARE_REG_1_ATTR 0x3
+#define HWIO_IPA_SPARE_REG_1_IN in_dword_masked(HWIO_IPA_SPARE_REG_1_ADDR, \
+						HWIO_IPA_SPARE_REG_1_RMSK)
+#define HWIO_IPA_SPARE_REG_1_INM(m) in_dword_masked( \
+		HWIO_IPA_SPARE_REG_1_ADDR, \
+		m)
+#define HWIO_IPA_SPARE_REG_1_OUT(v) out_dword(HWIO_IPA_SPARE_REG_1_ADDR, v)
+#define HWIO_IPA_SPARE_REG_1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_SPARE_REG_1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SPARE_REG_1_IN)
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT31_BMSK 0x80000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT31_SHFT 0x1f
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT30_BMSK 0x40000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT30_SHFT 0x1e
+#define HWIO_IPA_SPARE_REG_1_SPARE_ACKINJ_PIPE8_MASK_ENABLE_BMSK \
+	0x20000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_ACKINJ_PIPE8_MASK_ENABLE_SHFT 0x1d
+#define	\
+	HWIO_IPA_SPARE_REG_1_WARB_FORCE_ARB_ROUND_FINISH_SPECIAL_DISABLE_BMSK \
+	0x10000000
+#define	\
+	HWIO_IPA_SPARE_REG_1_WARB_FORCE_ARB_ROUND_FINISH_SPECIAL_DISABLE_SHFT \
+	0x1c
+#define HWIO_IPA_SPARE_REG_1_DCPH_RAM_RD_PREFETCH_DISABLE_BMSK 0x8000000
+#define HWIO_IPA_SPARE_REG_1_DCPH_RAM_RD_PREFETCH_DISABLE_SHFT 0x1b
+#define HWIO_IPA_SPARE_REG_1_RAM_SLAVEWAY_ACCESS_PROTECTION_DISABLE_BMSK \
+	0x4000000
+#define HWIO_IPA_SPARE_REG_1_RAM_SLAVEWAY_ACCESS_PROTECTION_DISABLE_SHFT \
+	0x1a
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT25_BMSK 0x2000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT25_SHFT 0x19
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT24_BMSK 0x1000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT24_SHFT 0x18
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT23_BMSK 0x800000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT23_SHFT 0x17
+#define HWIO_IPA_SPARE_REG_1_BAM_IDLE_IN_IPA_MISC_CGC_EN_BMSK 0x400000
+#define HWIO_IPA_SPARE_REG_1_BAM_IDLE_IN_IPA_MISC_CGC_EN_SHFT 0x16
+#define HWIO_IPA_SPARE_REG_1_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_BMSK \
+	0x200000
+#define HWIO_IPA_SPARE_REG_1_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_SHFT \
+	0x15
+#define HWIO_IPA_SPARE_REG_1_REVERT_WARB_FIX_BMSK 0x100000
+#define HWIO_IPA_SPARE_REG_1_REVERT_WARB_FIX_SHFT 0x14
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT19_BMSK 0x80000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT19_SHFT 0x13
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_GEN_DEAGGR_ERROR_BMSK 0x40000
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_GEN_DEAGGR_ERROR_SHFT 0x12
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_MBIM_DEAGGR_ERROR_BMSK 0x20000
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_MBIM_DEAGGR_ERROR_SHFT 0x11
+#define HWIO_IPA_SPARE_REG_1_QMB_RAM_RD_CACHE_DISABLE_BMSK 0x10000
+#define HWIO_IPA_SPARE_REG_1_QMB_RAM_RD_CACHE_DISABLE_SHFT 0x10
+#define	\
+	HWIO_IPA_SPARE_REG_1_RX_CMDQ_SPLITTER_CMDQ_PENDING_MUX_DISABLE_BMSK \
+	0x8000
+#define	\
+	HWIO_IPA_SPARE_REG_1_RX_CMDQ_SPLITTER_CMDQ_PENDING_MUX_DISABLE_SHFT \
+	0xf
+#define	\
+	HWIO_IPA_SPARE_REG_1_FRAG_MNGR_FAIRNESS_EVICTION_ON_CONSTRUCTING_BMSK \
+	0x4000
+#define	\
+	HWIO_IPA_SPARE_REG_1_FRAG_MNGR_FAIRNESS_EVICTION_ON_CONSTRUCTING_SHFT \
+	0xe
+#define HWIO_IPA_SPARE_REG_1_TX_BLOCK_AGGR_QUERY_ON_HOLB_PACKET_BMSK \
+	0x2000
+#define HWIO_IPA_SPARE_REG_1_TX_BLOCK_AGGR_QUERY_ON_HOLB_PACKET_SHFT 0xd
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT12_BMSK 0x1000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT12_SHFT 0xc
+#define HWIO_IPA_SPARE_REG_1_TX_GIVES_SSPND_ACK_ON_OPEN_AGGR_FRAME_BMSK	\
+	0x800
+#define HWIO_IPA_SPARE_REG_1_TX_GIVES_SSPND_ACK_ON_OPEN_AGGR_FRAME_SHFT	\
+	0xb
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_PKT_CHECK_DISABLE_BMSK 0x400
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_PKT_CHECK_DISABLE_SHFT 0xa
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT8_BMSK 0x100
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT8_SHFT 0x8
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_FRAG_NOTIF_CHECK_DISABLE_BMSK \
+	0x40
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_FRAG_NOTIF_CHECK_DISABLE_SHFT \
+	0x6
+#define HWIO_IPA_SPARE_REG_1_ACL_INORDER_MULTI_DISABLE_BMSK 0x20
+#define HWIO_IPA_SPARE_REG_1_ACL_INORDER_MULTI_DISABLE_SHFT 0x5
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT4_BMSK 0x10
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT4_SHFT 0x4
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT3_BMSK 0x8
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT3_SHFT 0x3
+#define HWIO_IPA_SPARE_REG_1_GENQMB_AOOOWR_BMSK 0x4
+#define HWIO_IPA_SPARE_REG_1_GENQMB_AOOOWR_SHFT 0x2
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT1_BMSK 0x2
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT1_SHFT 0x1
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT0_BMSK 0x1
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT0_SHFT 0x0
+#define HWIO_IPA_SPARE_REG_2_ADDR (IPA_DEBUG_REG_BASE + 0x00000784)
+#define HWIO_IPA_SPARE_REG_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000784)
+#define HWIO_IPA_SPARE_REG_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000784)
+#define HWIO_IPA_SPARE_REG_2_RMSK 0xffffffff
+#define HWIO_IPA_SPARE_REG_2_ATTR 0x3
+#define HWIO_IPA_SPARE_REG_2_IN in_dword_masked(HWIO_IPA_SPARE_REG_2_ADDR, \
+						HWIO_IPA_SPARE_REG_2_RMSK)
+#define HWIO_IPA_SPARE_REG_2_INM(m) in_dword_masked( \
+		HWIO_IPA_SPARE_REG_2_ADDR, \
+		m)
+#define HWIO_IPA_SPARE_REG_2_OUT(v) out_dword(HWIO_IPA_SPARE_REG_2_ADDR, v)
+#define HWIO_IPA_SPARE_REG_2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_SPARE_REG_2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SPARE_REG_2_IN)
+#define HWIO_IPA_SPARE_REG_2_SPARE_BITS_BMSK 0xfffffffc
+#define HWIO_IPA_SPARE_REG_2_SPARE_BITS_SHFT 0x2
+#define	\
+	HWIO_IPA_SPARE_REG_2_CMDQ_SPLIT_NOT_WAIT_DATA_DESC_PRIOR_HDR_PUSH_BMSK \
+	0x2
+#define	\
+	HWIO_IPA_SPARE_REG_2_CMDQ_SPLIT_NOT_WAIT_DATA_DESC_PRIOR_HDR_PUSH_SHFT \
+	0x1
+#define HWIO_IPA_SPARE_REG_2_TX_BRESP_INJ_WITH_FLOP_BMSK 0x1
+#define HWIO_IPA_SPARE_REG_2_TX_BRESP_INJ_WITH_FLOP_SHFT 0x0
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					  0x00000794 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000794 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000794 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_RMSK 0x80010000
+#define HWIO_IPA_ENDP_GSI_CFG1_n_MAXn 30
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ATTR 0x3
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		HWIO_IPA_ENDP_GSI_CFG1_n_RMSK)
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_GSI_CFG1_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_GSI_CFG1_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_GSI_CFG1_n_INI(n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INIT_ENDP_BMSK 0x80000000
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INIT_ENDP_SHFT 0x1f
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ENDP_EN_BMSK 0x10000
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ENDP_EN_SHFT 0x10
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_1_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000908)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_1_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000908)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_1_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000908)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_2_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x0000090c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_2_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000090c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_2_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000090c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_3_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000910)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_3_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000910)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_3_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000910)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_CTRL_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000914)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_CTRL_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000914)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_CTRL_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000914)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_RDY_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000918)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_RDY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000918)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_RDY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000918)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_1_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000091c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000091c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000091c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_2_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000920)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000920)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000920)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000924 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000924 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000924 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_RMSK 0xffffff
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_MAXn 30
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_ATTR 0x3
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n), \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_RMSK)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_INI(n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_SIZE_BMSK 0xff0000
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_SIZE_SHFT 0x10
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_BASE_ADDR_BMSK 0xffff
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_BASE_ADDR_SHFT 0x0
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x000009a8 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000009a8 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000009a8 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_RMSK 0xffffff
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_MAXn 30
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_ATTR 0x3
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n), \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_RMSK)
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_INI(n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_SIZE_BMSK 0xff0000
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_SIZE_SHFT 0x10
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_BASE_ADDR_BMSK 0xffff
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_BASE_ADDR_SHFT 0x0
+#define HWIO_IPA_COAL_VP_AOS_FIFO_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000a60 + 0x4 * (n))
+#define HWIO_IPA_COAL_VP_AOS_FIFO_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000a60 + 0x4 * (n))
+#define HWIO_IPA_COAL_VP_AOS_FIFO_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000a60 + 0x4 * (n))
+#define HWIO_IPA_CTXH_CTRL_ADDR (IPA_DEBUG_REG_BASE + 0x00000afc)
+#define HWIO_IPA_CTXH_CTRL_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000afc)
+#define HWIO_IPA_CTXH_CTRL_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000afc)
+#define HWIO_IPA_CTXH_CTRL_RMSK 0x8000000f
+#define HWIO_IPA_CTXH_CTRL_ATTR 0x3
+#define HWIO_IPA_CTXH_CTRL_IN in_dword_masked(HWIO_IPA_CTXH_CTRL_ADDR, \
+					      HWIO_IPA_CTXH_CTRL_RMSK)
+#define HWIO_IPA_CTXH_CTRL_INM(m) in_dword_masked(HWIO_IPA_CTXH_CTRL_ADDR, \
+						  m)
+#define HWIO_IPA_CTXH_CTRL_OUT(v) out_dword(HWIO_IPA_CTXH_CTRL_ADDR, v)
+#define HWIO_IPA_CTXH_CTRL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_CTXH_CTRL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_CTXH_CTRL_IN)
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_BMSK 0x80000000
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_SHFT 0x1f
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_ID_BMSK 0xf
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_ID_SHFT 0x0
+#define HWIO_IPA_CTX_ID_m_CTX_NUM_n_ADDR(m, n) (IPA_DEBUG_REG_BASE + \
+						0x00000b00 + 0x80 * (m) + \
+						0x4 * (n))
+#define HWIO_IPA_CTX_ID_m_CTX_NUM_n_PHYS(m, n) (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000b00 + 0x80 * (m) + \
+						0x4 * (n))
+#define HWIO_IPA_CTX_ID_m_CTX_NUM_n_OFFS(m, n) (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000b00 + 0x80 * (m) + \
+						0x4 * (n))
+#define IPA_EE_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00043000)
+#define IPA_EE_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00043000)
+#define IPA_EE_REG_BASE_OFFS 0x00043000
+#define HWIO_IPA_IRQ_STTS_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000008 + \
+					0x1000 * (n))
+#define HWIO_IPA_IRQ_STTS_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000008 + 0x1000 * (n))
+#define HWIO_IPA_IRQ_STTS_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000008 + 0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x0000000c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + 0x0000000c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + 0x0000000c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_RMSK 0x7bffffd
+#define HWIO_IPA_IRQ_EN_EE_n_MAXn 3
+#define HWIO_IPA_IRQ_EN_EE_n_ATTR 0x3
+#define HWIO_IPA_IRQ_EN_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		HWIO_IPA_IRQ_EN_EE_n_RMSK)
+#define HWIO_IPA_IRQ_EN_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_IRQ_EN_EE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_IRQ_EN_EE_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_IRQ_EN_EE_n_INI(n))
+#define HWIO_IPA_IRQ_EN_EE_n_TLV_LEN_MIN_DSM_IRQ_EN_BMSK 0x4000000
+#define HWIO_IPA_IRQ_EN_EE_n_TLV_LEN_MIN_DSM_IRQ_EN_SHFT 0x1a
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_UC_IRQ_EN_BMSK 0x2000000
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_UC_IRQ_EN_SHFT 0x19
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_IPA_IF_TLV_RCVD_IRQ_EN_BMSK 0x1000000
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_IPA_IF_TLV_RCVD_IRQ_EN_SHFT 0x18
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_EE_IRQ_EN_BMSK 0x800000
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_EE_IRQ_EN_SHFT 0x17
+#define HWIO_IPA_IRQ_EN_EE_n_UCP_IRQ_EN_BMSK 0x200000
+#define HWIO_IPA_IRQ_EN_EE_n_UCP_IRQ_EN_SHFT 0x15
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_ABOVE_IRQ_EN_BMSK 0x100000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_ABOVE_IRQ_EN_SHFT 0x14
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_ABOVE_IRQ_EN_BMSK 0x80000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_ABOVE_IRQ_EN_SHFT 0x13
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_BELOW_IRQ_EN_BMSK 0x40000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_BELOW_IRQ_EN_SHFT 0x12
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_BELOW_IRQ_EN_BMSK 0x20000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_BELOW_IRQ_EN_SHFT 0x11
+#define HWIO_IPA_IRQ_EN_EE_n_BAM_GSI_IDLE_IRQ_EN_BMSK 0x10000
+#define HWIO_IPA_IRQ_EN_EE_n_BAM_GSI_IDLE_IRQ_EN_SHFT 0x10
+#define HWIO_IPA_IRQ_EN_EE_n_TX_HOLB_DROP_IRQ_EN_BMSK 0x8000
+#define HWIO_IPA_IRQ_EN_EE_n_TX_HOLB_DROP_IRQ_EN_SHFT 0xf
+#define HWIO_IPA_IRQ_EN_EE_n_TX_SUSPEND_IRQ_EN_BMSK 0x4000
+#define HWIO_IPA_IRQ_EN_EE_n_TX_SUSPEND_IRQ_EN_SHFT 0xe
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_ERR_IRQ_EN_BMSK 0x2000
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_ERR_IRQ_EN_SHFT 0xd
+#define HWIO_IPA_IRQ_EN_EE_n_STEP_MODE_IRQ_EN_BMSK 0x1000
+#define HWIO_IPA_IRQ_EN_EE_n_STEP_MODE_IRQ_EN_SHFT 0xc
+#define HWIO_IPA_IRQ_EN_EE_n_TX_ERR_IRQ_EN_BMSK 0x800
+#define HWIO_IPA_IRQ_EN_EE_n_TX_ERR_IRQ_EN_SHFT 0xb
+#define HWIO_IPA_IRQ_EN_EE_n_DEAGGR_ERR_IRQ_EN_BMSK 0x400
+#define HWIO_IPA_IRQ_EN_EE_n_DEAGGR_ERR_IRQ_EN_SHFT 0xa
+#define HWIO_IPA_IRQ_EN_EE_n_RX_ERR_IRQ_EN_BMSK 0x200
+#define HWIO_IPA_IRQ_EN_EE_n_RX_ERR_IRQ_EN_SHFT 0x9
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ_EN_BMSK 0x100
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ_EN_SHFT 0x8
+#define HWIO_IPA_IRQ_EN_EE_n_UC_RX_CMD_Q_NOT_FULL_IRQ_EN_BMSK 0x80
+#define HWIO_IPA_IRQ_EN_EE_n_UC_RX_CMD_Q_NOT_FULL_IRQ_EN_SHFT 0x7
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IN_Q_NOT_EMPTY_IRQ_EN_BMSK 0x40
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IN_Q_NOT_EMPTY_IRQ_EN_SHFT 0x6
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_3_IRQ_EN_BMSK 0x20
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_3_IRQ_EN_SHFT 0x5
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_2_IRQ_EN_BMSK 0x10
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_2_IRQ_EN_SHFT 0x4
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_1_IRQ_EN_BMSK 0x8
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_1_IRQ_EN_SHFT 0x3
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_0_IRQ_EN_BMSK 0x4
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_0_IRQ_EN_SHFT 0x2
+#define HWIO_IPA_IRQ_EN_EE_n_BAD_SNOC_ACCESS_IRQ_EN_BMSK 0x1
+#define HWIO_IPA_IRQ_EN_EE_n_BAD_SNOC_ACCESS_IRQ_EN_SHFT 0x0
+#define HWIO_IPA_IRQ_CLR_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000010 + \
+				       0x1000 * (n))
+#define HWIO_IPA_IRQ_CLR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + 0x00000010 + \
+				       0x1000 * (n))
+#define HWIO_IPA_IRQ_CLR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + 0x00000010 + \
+				       0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000018 + \
+					0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000018 + 0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000018 + 0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_RMSK 0x8000f1ff
+#define HWIO_IPA_SNOC_FEC_EE_n_MAXn 3
+#define HWIO_IPA_SNOC_FEC_EE_n_ATTR 0x1
+#define HWIO_IPA_SNOC_FEC_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SNOC_FEC_EE_n_ADDR(n),	\
+		HWIO_IPA_SNOC_FEC_EE_n_RMSK)
+#define HWIO_IPA_SNOC_FEC_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SNOC_FEC_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_SNOC_FEC_EE_n_READ_NOT_WRITE_BMSK 0x80000000
+#define HWIO_IPA_SNOC_FEC_EE_n_READ_NOT_WRITE_SHFT 0x1f
+#define HWIO_IPA_SNOC_FEC_EE_n_TID_BMSK 0xf000
+#define HWIO_IPA_SNOC_FEC_EE_n_TID_SHFT 0xc
+#define HWIO_IPA_SNOC_FEC_EE_n_QMB_INDEX_BMSK 0x100
+#define HWIO_IPA_SNOC_FEC_EE_n_QMB_INDEX_SHFT 0x8
+#define HWIO_IPA_SNOC_FEC_EE_n_CLIENT_BMSK 0xff
+#define HWIO_IPA_SNOC_FEC_EE_n_CLIENT_SHFT 0x0
+#define HWIO_IPA_IRQ_EE_UC_n_ADDR(n) (IPA_EE_REG_BASE + 0x0000001c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EE_UC_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + 0x0000001c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EE_UC_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + 0x0000001c + \
+				      0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000020 + \
+					0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000020 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000020 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_RMSK 0xffffffff
+#define HWIO_IPA_FEC_ADDR_EE_n_MAXn 3
+#define HWIO_IPA_FEC_ADDR_EE_n_ATTR 0x1
+#define HWIO_IPA_FEC_ADDR_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_FEC_ADDR_EE_n_ADDR(n),	\
+		HWIO_IPA_FEC_ADDR_EE_n_RMSK)
+#define HWIO_IPA_FEC_ADDR_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_FEC_ADDR_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_FEC_ADDR_EE_n_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_FEC_ADDR_EE_n_ADDR_SHFT 0x0
+#define HWIO_IPA_FEC_ADDR_MSB_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000024 + \
+					    0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_MSB_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					    0x00000024 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_MSB_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					    0x00000024 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000028 + \
+					0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000028 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000028 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_RMSK 0xffffffff
+#define HWIO_IPA_FEC_ATTR_EE_n_MAXn 3
+#define HWIO_IPA_FEC_ATTR_EE_n_ATTR 0x1
+#define HWIO_IPA_FEC_ATTR_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_FEC_ATTR_EE_n_ADDR(n),	\
+		HWIO_IPA_FEC_ATTR_EE_n_RMSK)
+#define HWIO_IPA_FEC_ATTR_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_FEC_ATTR_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_FEC_ATTR_EE_n_ERROR_INFO_BMSK 0xffffffc0
+#define HWIO_IPA_FEC_ATTR_EE_n_ERROR_INFO_SHFT 0x6
+#define HWIO_IPA_FEC_ATTR_EE_n_OPCODE_BMSK 0x3f
+#define HWIO_IPA_FEC_ATTR_EE_n_OPCODE_SHFT 0x0
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						0x00000030 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+						0x00000030 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+						0x00000030 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_RMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_MAXn 3
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ATTR 0x1
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ADDR(n),	\
+		HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_RMSK)
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n) (IPA_EE_REG_BASE +	\
+					      0x00000034 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					      0x00000034 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					      0x00000034 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_RMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_MAXn 3
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ATTR 0x3
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n), \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_RMSK)
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_INI(n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_SUSPEND_IRQ_CLR_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+					       0x00000038 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_CLR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					       0x00000038 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_CLR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					       0x00000038 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						  0x0000003c + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+						  0x0000003c + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+						  0x0000003c + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_RMSK 0x7fffe000
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_MAXn 3
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ATTR 0x1
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ADDR(n), \
+		HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_RMSK)
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ENDPOINTS_BMSK 0x7fffe000
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ENDPOINTS_SHFT 0xd
+#define HWIO_IPA_HOLB_DROP_IRQ_EN_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						0x00000040 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_EN_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+						0x00000040 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_EN_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+						0x00000040 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_CLR_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						 0x00000044 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_CLR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS +	\
+						 0x00000044 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_CLR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS +	\
+						 0x00000044 + 0x1000 * (n))
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR (IPA_EE_REG_BASE + 0x000010a0)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_PHYS (IPA_EE_REG_BASE_PHYS + \
+					   0x000010a0)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_OFFS (IPA_EE_REG_BASE_OFFS + \
+					   0x000010a0)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_IN)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR (IPA_EE_REG_BASE + \
+					       0x000010a4)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_PHYS (IPA_EE_REG_BASE_PHYS + \
+					       0x000010a4)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_OFFS (IPA_EE_REG_BASE_OFFS + \
+					       0x000010a4)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_IN)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ADDR (IPA_EE_REG_BASE + \
+						0x000010a8)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_PHYS (IPA_EE_REG_BASE_PHYS + \
+						0x000010a8)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_OFFS (IPA_EE_REG_BASE_OFFS + \
+						0x000010a8)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ADDR,	\
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ADDR,	\
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_WRITE_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_WRITE_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ADDR (IPA_EE_REG_BASE + \
+						    0x000010ac)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_PHYS (IPA_EE_REG_BASE_PHYS + \
+						    0x000010ac)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_OFFS (IPA_EE_REG_BASE_OFFS + \
+						    0x000010ac)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_WRITE_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_WRITE_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR (IPA_EE_REG_BASE + 0x000010b0)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_PHYS (IPA_EE_REG_BASE_PHYS + \
+					  0x000010b0)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_OFFS (IPA_EE_REG_BASE_OFFS + \
+					  0x000010b0)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_RMSK 0x1ffff
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_IN in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_IN)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ENABLE_BMSK 0x10000
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ENABLE_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_SIZE_BMSK 0xffff
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_SIZE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ADDR (IPA_EE_REG_BASE + 0x000010b4)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_PHYS (IPA_EE_REG_BASE_PHYS + \
+					      0x000010b4)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_OFFS (IPA_EE_REG_BASE_OFFS + \
+					      0x000010b4)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_INM(m) in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_WRITE_PTR_BMSK 0xffff0000
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_WRITE_PTR_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_READ_PTR_BMSK 0xffff
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_READ_PTR_SHFT 0x0
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_0_ADDR (IPA_EE_REG_BASE + \
+						0x000010c0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_0_PHYS (IPA_EE_REG_BASE_PHYS + \
+						0x000010c0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_0_OFFS (IPA_EE_REG_BASE_OFFS + \
+						0x000010c0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_1_ADDR (IPA_EE_REG_BASE + \
+						0x000010c4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_1_PHYS (IPA_EE_REG_BASE_PHYS + \
+						0x000010c4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_1_OFFS (IPA_EE_REG_BASE_OFFS + \
+						0x000010c4)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_0_ADDR (IPA_EE_REG_BASE + \
+						  0x000010c8)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_0_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000010c8)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_0_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000010c8)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_1_ADDR (IPA_EE_REG_BASE + \
+						  0x000010cc)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_1_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000010cc)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_1_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000010cc)
+#define HWIO_IPA_SECURED_PIPES_ADDR (IPA_EE_REG_BASE + 0x000010d0)
+#define HWIO_IPA_SECURED_PIPES_PHYS (IPA_EE_REG_BASE_PHYS + 0x000010d0)
+#define HWIO_IPA_SECURED_PIPES_OFFS (IPA_EE_REG_BASE_OFFS + 0x000010d0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_CFG_ADDR (IPA_EE_REG_BASE + \
+						  0x000010d4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_CFG_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000010d4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_CFG_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000010d4)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__CONTROL_ADDR (IPA_EE_REG_BASE + \
+						   0x00001200)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__CONTROL_PHYS (IPA_EE_REG_BASE_PHYS + \
+						   0x00001200)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__CONTROL_OFFS (IPA_EE_REG_BASE_OFFS + \
+						   0x00001200)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__NMI_ADDR (IPA_EE_REG_BASE + \
+					       0x00001204)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__NMI_PHYS (IPA_EE_REG_BASE_PHYS + \
+					       0x00001204)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__NMI_OFFS (IPA_EE_REG_BASE_OFFS + \
+					       0x00001204)
+#define HWIO_IPA_SET_UC_IRQ_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00002048 + \
+					  0x4 * (n))
+#define HWIO_IPA_SET_UC_IRQ_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					  0x00002048 + 0x4 * (n))
+#define HWIO_IPA_SET_UC_IRQ_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					  0x00002048 + 0x4 * (n))
+#define HWIO_IPA_SET_UC_IRQ_ALL_EES_ADDR (IPA_EE_REG_BASE + 0x00002058)
+#define HWIO_IPA_SET_UC_IRQ_ALL_EES_PHYS (IPA_EE_REG_BASE_PHYS + \
+					  0x00002058)
+#define HWIO_IPA_SET_UC_IRQ_ALL_EES_OFFS (IPA_EE_REG_BASE_OFFS + \
+					  0x00002058)
+#define HWIO_IPA_UCP_RESUME_ADDR (IPA_EE_REG_BASE + 0x000030a0)
+#define HWIO_IPA_UCP_RESUME_PHYS (IPA_EE_REG_BASE_PHYS + 0x000030a0)
+#define HWIO_IPA_UCP_RESUME_OFFS (IPA_EE_REG_BASE_OFFS + 0x000030a0)
+#define HWIO_IPA_PROC_UCP_CFG_ADDR (IPA_EE_REG_BASE + 0x000030a4)
+#define HWIO_IPA_PROC_UCP_CFG_PHYS (IPA_EE_REG_BASE_PHYS + 0x000030a4)
+#define HWIO_IPA_PROC_UCP_CFG_OFFS (IPA_EE_REG_BASE_OFFS + 0x000030a4)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_0_ADDR (IPA_EE_REG_BASE + \
+						  0x000030a8)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_0_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000030a8)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_0_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000030a8)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_1_ADDR (IPA_EE_REG_BASE + \
+						  0x000030ac)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_1_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000030ac)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_1_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000030ac)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_2_ADDR (IPA_EE_REG_BASE + \
+						  0x000030b0)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_2_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000030b0)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_2_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000030b0)
+#define IPA_UC_IPA_UC_PER_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x000c0000)
+#define IPA_UC_IPA_UC_PER_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + \
+					 0x000c0000)
+#define IPA_UC_IPA_UC_PER_REG_BASE_OFFS 0x000c0000
+#define HWIO_IPA_UC_STATUS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000000)
+#define HWIO_IPA_UC_STATUS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				 0x00000000)
+#define HWIO_IPA_UC_STATUS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				 0x00000000)
+#define HWIO_IPA_UC_CONTROL_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000004)
+#define HWIO_IPA_UC_CONTROL_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				  0x00000004)
+#define HWIO_IPA_UC_CONTROL_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				  0x00000004)
+#define HWIO_IPA_UC_BASE_ADDR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				    0x00000008)
+#define HWIO_IPA_UC_BASE_ADDR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				    0x00000008)
+#define HWIO_IPA_UC_BASE_ADDR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				    0x00000008)
+#define HWIO_IPA_UC_BASE_ADDR_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x0000000c)
+#define HWIO_IPA_UC_BASE_ADDR_MSB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x0000000c)
+#define HWIO_IPA_UC_BASE_ADDR_MSB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x0000000c)
+#define HWIO_IPA_UC_SYS_BUS_ATTRIB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					 0x00000010)
+#define HWIO_IPA_UC_SYS_BUS_ATTRIB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					 0x00000010)
+#define HWIO_IPA_UC_SYS_BUS_ATTRIB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					 0x00000010)
+#define HWIO_IPA_UC_PEND_IRQ_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000014)
+#define HWIO_IPA_UC_PEND_IRQ_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				   0x00000014)
+#define HWIO_IPA_UC_PEND_IRQ_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				   0x00000014)
+#define HWIO_IPA_UC_TRACE_BUFFER_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000018)
+#define HWIO_IPA_UC_TRACE_BUFFER_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000018)
+#define HWIO_IPA_UC_TRACE_BUFFER_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000018)
+#define HWIO_IPA_UC_PC_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x0000001c)
+#define HWIO_IPA_UC_PC_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_IPA_UC_PC_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x0000001c)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_LSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+					       + 0x00000024)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_LSB_PHYS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000024)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_LSB_OFFS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000024)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+					       + 0x00000028)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_MSB_PHYS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000028)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_MSB_OFFS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000028)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000100)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000100)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000100)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_RMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ATTR 0x3
+#define HWIO_IPA_UC_QMB_SYS_ADDR_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_RMSK)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_IN)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ADDR_SHFT 0x0
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE +	\
+					   0x00000104)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					   + 0x00000104)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					   + 0x00000104)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ATTR 0x3
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_RMSK)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_IN)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					 0x00000108)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					 0x00000108)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					 0x00000108)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_RMSK 0x3ffff
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ATTR 0x3
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_RMSK)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_IN)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR_BMSK 0x3ffff
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR_SHFT 0x0
+#define HWIO_IPA_UC_QMB_LENGTH_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				     0x0000010c)
+#define HWIO_IPA_UC_QMB_LENGTH_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				     0x0000010c)
+#define HWIO_IPA_UC_QMB_LENGTH_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				     0x0000010c)
+#define HWIO_IPA_UC_QMB_LENGTH_RMSK 0x7f
+#define HWIO_IPA_UC_QMB_LENGTH_ATTR 0x3
+#define HWIO_IPA_UC_QMB_LENGTH_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		HWIO_IPA_UC_QMB_LENGTH_RMSK)
+#define HWIO_IPA_UC_QMB_LENGTH_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_LENGTH_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_LENGTH_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_LENGTH_IN)
+#define HWIO_IPA_UC_QMB_LENGTH_LENGTH_BMSK 0x7f
+#define HWIO_IPA_UC_QMB_LENGTH_LENGTH_SHFT 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				      0x00000110)
+#define HWIO_IPA_UC_QMB_TRIGGER_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS +	\
+				      0x00000110)
+#define HWIO_IPA_UC_QMB_TRIGGER_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS +	\
+				      0x00000110)
+#define HWIO_IPA_UC_QMB_TRIGGER_RMSK 0x31
+#define HWIO_IPA_UC_QMB_TRIGGER_ATTR 0x3
+#define HWIO_IPA_UC_QMB_TRIGGER_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		HWIO_IPA_UC_QMB_TRIGGER_RMSK)
+#define HWIO_IPA_UC_QMB_TRIGGER_INM(m) in_dword_masked(	\
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_TRIGGER_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_TRIGGER_OUTM(m, v) out_dword_masked_ns(	\
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_TRIGGER_IN)
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_BMSK 0x30
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_SHFT 0x4
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_DATA_POSTED_FVAL 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_RESP_POSTED_FVAL 0x1
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_DATA_COMPLETE_FVAL 0x2
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_RESP_COMPLETE_FVAL 0x3
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_BMSK 0x1
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_SHFT 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_READ_FVAL 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_WRITE_FVAL 0x1
+#define HWIO_IPA_UC_QMB_PENDING_TID_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					  0x00000114)
+#define HWIO_IPA_UC_QMB_PENDING_TID_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					  + 0x00000114)
+#define HWIO_IPA_UC_QMB_PENDING_TID_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					  + 0x00000114)
+#define HWIO_IPA_UC_QMB_PENDING_TID_RMSK 0x11113f
+#define HWIO_IPA_UC_QMB_PENDING_TID_ATTR 0x1
+#define HWIO_IPA_UC_QMB_PENDING_TID_IN in_dword_masked(	\
+		HWIO_IPA_UC_QMB_PENDING_TID_ADDR, \
+		HWIO_IPA_UC_QMB_PENDING_TID_RMSK)
+#define HWIO_IPA_UC_QMB_PENDING_TID_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_PENDING_TID_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_SECURITY_BMSK 0x100000
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_SECURITY_SHFT 0x14
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_COMP_BMSK 0x10000
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_COMP_SHFT 0x10
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_OS_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_OS_SHFT 0xc
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_BUS_BMSK 0x100
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_BUS_SHFT 0x8
+#define HWIO_IPA_UC_QMB_PENDING_TID_TID_BMSK 0x3f
+#define HWIO_IPA_UC_QMB_PENDING_TID_TID_SHFT 0x0
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+						+ 0x00000118)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ADDR ( \
+		IPA_UC_IPA_UC_PER_REG_BASE + 0x0000011c)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_RMSK 0x113f
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ATTR 0x1
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ADDR, \
+		HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_RMSK)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_VALID_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_VALID_SHFT 0xc
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ERROR_BMSK 0x100
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ERROR_SHFT 0x8
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_TID_BMSK 0x3f
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_TID_SHFT 0x0
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+						+ 0x00000120)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000120)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000120)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ADDR ( \
+		IPA_UC_IPA_UC_PER_REG_BASE + 0x00000124)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000124)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000124)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_RMSK 0x113f
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ATTR 0x1
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ADDR, \
+		HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_RMSK)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_VALID_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_VALID_SHFT 0xc
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ERROR_BMSK 0x100
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ERROR_SHFT 0x8
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_TID_BMSK 0x3f
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_TID_SHFT 0x0
+#define HWIO_IPA_UC_QMB_MISC_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000128)
+#define HWIO_IPA_UC_QMB_MISC_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				   0x00000128)
+#define HWIO_IPA_UC_QMB_MISC_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				   0x00000128)
+#define HWIO_IPA_UC_QMB_MISC_RMSK 0xf11333ff
+#define HWIO_IPA_UC_QMB_MISC_ATTR 0x3
+#define HWIO_IPA_UC_QMB_MISC_IN in_dword_masked(HWIO_IPA_UC_QMB_MISC_ADDR, \
+						HWIO_IPA_UC_QMB_MISC_RMSK)
+#define HWIO_IPA_UC_QMB_MISC_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_MISC_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_MISC_OUT(v) out_dword(HWIO_IPA_UC_QMB_MISC_ADDR, v)
+#define HWIO_IPA_UC_QMB_MISC_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_MISC_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_MISC_IN)
+#define HWIO_IPA_UC_QMB_MISC_QMB_HREADY_BCR_BMSK 0x80000000
+#define HWIO_IPA_UC_QMB_MISC_QMB_HREADY_BCR_SHFT 0x1f
+#define HWIO_IPA_UC_QMB_MISC_POSTED_STALL_BMSK 0x40000000
+#define HWIO_IPA_UC_QMB_MISC_POSTED_STALL_SHFT 0x1e
+#define HWIO_IPA_UC_QMB_MISC_IRQ_COAL_BMSK 0x20000000
+#define HWIO_IPA_UC_QMB_MISC_IRQ_COAL_SHFT 0x1d
+#define HWIO_IPA_UC_QMB_MISC_SWAP_BMSK 0x10000000
+#define HWIO_IPA_UC_QMB_MISC_SWAP_SHFT 0x1c
+#define HWIO_IPA_UC_QMB_MISC_OOOWR_BMSK 0x1000000
+#define HWIO_IPA_UC_QMB_MISC_OOOWR_SHFT 0x18
+#define HWIO_IPA_UC_QMB_MISC_OOORD_BMSK 0x100000
+#define HWIO_IPA_UC_QMB_MISC_OOORD_SHFT 0x14
+#define HWIO_IPA_UC_QMB_MISC_WR_PRIORITY_BMSK 0x30000
+#define HWIO_IPA_UC_QMB_MISC_WR_PRIORITY_SHFT 0x10
+#define HWIO_IPA_UC_QMB_MISC_RD_PRIORITY_BMSK 0x3000
+#define HWIO_IPA_UC_QMB_MISC_RD_PRIORITY_SHFT 0xc
+#define HWIO_IPA_UC_QMB_MISC_USER_BMSK 0x3ff
+#define HWIO_IPA_UC_QMB_MISC_USER_SHFT 0x0
+#define HWIO_IPA_UC_QMB_STATUS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				     0x0000012c)
+#define HWIO_IPA_UC_QMB_STATUS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				     0x0000012c)
+#define HWIO_IPA_UC_QMB_STATUS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				     0x0000012c)
+#define HWIO_IPA_UC_QMB_STATUS_RMSK 0x1fff1fff
+#define HWIO_IPA_UC_QMB_STATUS_ATTR 0x1
+#define HWIO_IPA_UC_QMB_STATUS_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_STATUS_ADDR, \
+		HWIO_IPA_UC_QMB_STATUS_RMSK)
+#define HWIO_IPA_UC_QMB_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_FIFO_FULL_BMSK 0x10000000
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_FIFO_FULL_SHFT 0x1c
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_CNT_BMSK 0xf000000
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_CNT_SHFT 0x18
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_WR_CNT_BMSK 0xf00000
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_WR_CNT_SHFT 0x14
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_WR_BMSK 0xf0000
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_WR_SHFT 0x10
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_FIFO_FULL_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_FIFO_FULL_SHFT 0xc
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_CNT_BMSK 0xf00
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_CNT_SHFT 0x8
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_RD_CNT_BMSK 0xf0
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_RD_CNT_SHFT 0x4
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_RD_BMSK 0xf
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_RD_SHFT 0x0
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					 0x00000130)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					 0x00000130)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					 0x00000130)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_RMSK 0x1117
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_ATTR 0x3
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_RMSK)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_IN)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_SHARED_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_SHARED_SHFT 0xc
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_INNERSHARED_BMSK 0x100
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_INNERSHARED_SHFT 0x8
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_NOALLOCATE_BMSK 0x10
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_NOALLOCATE_SHFT 0x4
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_BMSK 0x7
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_SHFT 0x0
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_STRONGLY_ORDERED_FVAL 0x0
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_DEVICE_FVAL 0x1
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_NON_CACHEABLE_FVAL 0x2
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_COPYBACK_WRITEALLOCATE_FVAL 0x3
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_WRITETHROUGH_NOALLOCATE_FVAL	\
+	0x6
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_COPYBACK_NOALLOCATE_FVAL 0x7
+#define HWIO_IPA_UC_MBOX_INT_STTS_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					     0x00000200 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_STTS_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000200 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_STTS_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000200 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_EN_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE +	\
+					   0x00000204 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_EN_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					   + 0x00000204 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_EN_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					   + 0x00000204 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_CLR_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					    0x00000208 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_CLR_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000208 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_CLR_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000208 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_STTS_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					    0x00000300 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_STTS_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000300 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_STTS_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000300 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_EN_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					  0x00000304 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_EN_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					  + 0x00000304 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_EN_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					  + 0x00000304 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_CLR_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE +	\
+					   0x00000308 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_CLR_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					   + 0x00000308 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_CLR_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					   + 0x00000308 + 0x10 * (n))
+#define HWIO_IPA_UC_HWEV_INT_STTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x00000400)
+#define HWIO_IPA_UC_HWEV_INT_STTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x00000400)
+#define HWIO_IPA_UC_HWEV_INT_STTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x00000400)
+#define HWIO_IPA_UC_HWEV_INT_EN_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				      0x00000404)
+#define HWIO_IPA_UC_HWEV_INT_EN_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS +	\
+				      0x00000404)
+#define HWIO_IPA_UC_HWEV_INT_EN_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS +	\
+				      0x00000404)
+#define HWIO_IPA_UC_HWEV_INT_CLR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000408)
+#define HWIO_IPA_UC_HWEV_INT_CLR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000408)
+#define HWIO_IPA_UC_HWEV_INT_CLR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000408)
+#define HWIO_IPA_UC_SWEV_INT_STTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x00000410)
+#define HWIO_IPA_UC_SWEV_INT_STTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x00000410)
+#define HWIO_IPA_UC_SWEV_INT_STTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x00000410)
+#define HWIO_IPA_UC_SWEV_INT_EN_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				      0x00000414)
+#define HWIO_IPA_UC_SWEV_INT_EN_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS +	\
+				      0x00000414)
+#define HWIO_IPA_UC_SWEV_INT_EN_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS +	\
+				      0x00000414)
+#define HWIO_IPA_UC_SWEV_INT_CLR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000418)
+#define HWIO_IPA_UC_SWEV_INT_CLR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000418)
+#define HWIO_IPA_UC_SWEV_INT_CLR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000418)
+#define HWIO_IPA_UC_VUIC_INT_STTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x0000041c)
+#define HWIO_IPA_UC_VUIC_INT_STTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x0000041c)
+#define HWIO_IPA_UC_VUIC_INT_STTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x0000041c)
+#define HWIO_IPA_UC_VUIC_INT_CLR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000420)
+#define HWIO_IPA_UC_VUIC_INT_CLR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000420)
+#define HWIO_IPA_UC_VUIC_INT_CLR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000420)
+#define HWIO_IPA_UC_TIMER_CTRL_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					  0x00000500 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_CTRL_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					  + 0x00000500 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_CTRL_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					  + 0x00000500 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_STATUS_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					    0x00000508 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_STATUS_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000508 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_STATUS_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000508 + 0x10 * (n))
+#define HWIO_IPA_UC_EVENTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000600)
+#define HWIO_IPA_UC_EVENTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				 0x00000600)
+#define HWIO_IPA_UC_EVENTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				 0x00000600)
+#define HWIO_IPA_UC_VUIC_BUS_ADDR_TRANSLATE_EN_ADDR ( \
+		IPA_UC_IPA_UC_PER_REG_BASE + 0x00000710)
+#define HWIO_IPA_UC_VUIC_BUS_ADDR_TRANSLATE_EN_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000710)
+#define HWIO_IPA_UC_VUIC_BUS_ADDR_TRANSLATE_EN_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000710)
+#define HWIO_IPA_UC_SYS_ADDR_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000714)
+#define HWIO_IPA_UC_SYS_ADDR_MSB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000714)
+#define HWIO_IPA_UC_SYS_ADDR_MSB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000714)
+#define HWIO_IPA_UC_PC_RESTORE_WR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x00000718)
+#define HWIO_IPA_UC_PC_RESTORE_WR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x00000718)
+#define HWIO_IPA_UC_PC_RESTORE_WR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x00000718)
+#define HWIO_IPA_UC_PC_RESTORE_RD_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x0000071c)
+#define HWIO_IPA_UC_PC_RESTORE_RD_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x0000071c)
+#define HWIO_IPA_UC_PC_RESTORE_RD_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x0000071c)
+#define HWIO_IPA_UC_SPARE_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00001ffc)
+#define HWIO_IPA_UC_SPARE_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				0x00001ffc)
+#define HWIO_IPA_UC_SPARE_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				0x00001ffc)
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
new file mode 100644
index 0000000..306dfec
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
@@ -0,0 +1,2925 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_HWIO_DEF_H_)
+#define _IPA_HWIO_DEF_H_
+struct ipa_hwio_def_ipa_gsi_top_gsi_cfg_s {
+	u32	gsi_enable : 1;
+	u32	mcs_enable : 1;
+	u32	double_mcs_clk_freq : 1;
+	u32	uc_is_mcs : 1;
+	u32	gsi_pwr_clps : 1;
+	u32	bp_mtrix_disable : 1;
+	u32	reserved0 : 2;
+	u32	sleep_clk_div : 4;
+	u32	reserved1 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_cfg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_ree_cfg_s {
+	u32	move_to_esc_clr_mode_trsh : 1;
+	u32	channel_empty_int_enable : 1;
+	u32	reserved0 : 6;
+	u32	max_burst_size : 8;
+	u32	reserved1 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_ree_cfg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_ree_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_manager_ee_qos_n_s {
+	u32	ee_prio : 2;
+	u32	reserved0 : 6;
+	u32	max_ch_alloc : 5;
+	u32	reserved1 : 3;
+	u32	max_ev_alloc : 5;
+	u32	reserved2 : 11;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_manager_ee_qos_n_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_manager_ee_qos_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_s {
+	u32 inst_byte_0 : 8;
+	u32 inst_byte_1 : 8;
+	u32 inst_byte_2 : 8;
+	u32 inst_byte_3 : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_n_s {
+	u32 shram : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_n_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_map_ee_n_ch_k_vp_table_s {
+	u32	phy_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_map_ee_n_ch_k_vp_table_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_map_ee_n_ch_k_vp_table_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_sel_s {
+	u32	gsi_testbus_sel : 8;
+	u32	reserved0 : 8;
+	u32	gsi_hw_events_sel : 4;
+	u32	reserved1 : 12;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_test_bus_sel_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_sel_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_reg_s {
+	u32 gsi_testbus_reg : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_test_bus_reg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_reg_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_s {
+	u32	csr_busy : 1;
+	u32	ree_busy : 1;
+	u32	mcs_busy : 1;
+	u32	timer_busy : 1;
+	u32	rd_wr_busy : 1;
+	u32	ev_eng_busy : 1;
+	u32	int_eng_busy : 1;
+	u32	ree_pwr_clps_busy : 1;
+	u32	db_eng_busy : 1;
+	u32	dbg_cnt_busy : 1;
+	u32	uc_busy : 1;
+	u32	ic_busy : 1;
+	u32	sdma_busy : 1;
+	u32	reserved0 : 19;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_s {
+	u32 chid_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_s {
+	u32 chid_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_s {
+	u32 chid_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_countern_s {
+	u32	counter_value : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_countern_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_countern_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_s {
+	u32	mcs_stall : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s {
+	u32	err_write : 1;
+	u32	reserved0 : 7;
+	u32	err_tid : 8;
+	u32	err_mid : 8;
+	u32	err_saved : 1;
+	u32	reserved1 : 7;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_rf_n_read_s {
+	u32 rf_reg : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_rf_n_read_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_rf_n_read_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_ee_n_ev_k_vp_table_s {
+	u32	phy_ev_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_ee_n_ev_k_vp_table_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_ee_n_ev_k_vp_table_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_0_s {
+	u32	chtype_protocol : 3;
+	u32	chtype_dir : 1;
+	u32	ee : 4;
+	u32	chid : 5;
+	u32	chtype_protocol_msb : 1;
+	u32	erindex : 5;
+	u32	reserved0 : 1;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_0_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_1_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_2_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_2_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_3_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_3_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_4_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_4_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_5_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_5_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_6_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_6_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_7_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_7_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_read_ptr_s {
+	u32	read_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_read_ptr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_read_ptr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_write_ptr_s {
+	u32	re_intr_db : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_write_ptr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_write_ptr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_qos_s {
+	u32	wrr_weight : 4;
+	u32	reserved0 : 4;
+	u32	max_prefetch : 1;
+	u32	use_db_eng : 1;
+	u32	prefetch_mode : 4;
+	u32	reserved1 : 2;
+	u32	empty_lvl_thrshold : 8;
+	u32	reserved2 : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_qos_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_qos_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_0_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_1_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_2_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_2_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_2_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_3_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_3_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_3_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_0_s {
+	u32	chtype : 4;
+	u32	ee : 4;
+	u32	evchid : 8;
+	u32	intype : 1;
+	u32	reserved0 : 3;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_1_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_2_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_2_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_3_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_3_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_4_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_4_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_5_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_5_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_6_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_6_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_7_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_7_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_8_s {
+	u32	int_modt : 16;
+	u32	int_modc : 8;
+	u32	int_mod_cnt : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_8_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_8_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_9_s {
+	u32 intvec : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_9_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_9_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_10_s {
+	u32 msi_addr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_10_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_10_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_11_s {
+	u32 msi_addr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_11_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_11_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_12_s {
+	u32 rp_update_addr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_12_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_12_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_13_s {
+	u32 rp_update_addr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_13_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_13_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_0_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_1_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_status_s {
+	u32	enabled : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_status_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_status_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_msk_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_msk_s {
+	u32	gsi_ch_bit_map_msk : 23;
+	u32	reserved0 : 9;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_clr_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_clr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_clr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_clr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_glob_irq_stts_s {
+	u32	error_int : 1;
+	u32	gp_int1 : 1;
+	u32	gp_int2 : 1;
+	u32	gp_int3 : 1;
+	u32	reserved0 : 28;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_glob_irq_stts_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_glob_irq_stts_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_gsi_irq_stts_s {
+	u32	gsi_break_point : 1;
+	u32	gsi_bus_error : 1;
+	u32	gsi_cmd_fifo_ovrflow : 1;
+	u32	gsi_mcs_stack_ovrflow : 1;
+	u32	reserved0 : 28;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_gsi_irq_stts_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_gsi_irq_stts_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_intset_s {
+	u32	intype : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_intset_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_intset_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_lsb_s {
+	u32 msi_addr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_lsb_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_lsb_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_msb_s {
+	u32 msi_addr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_msb_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_msb_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_s {
+	u32 error_log : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_error_log_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_clr_s {
+	u32 error_log_clr : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_error_log_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_clr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_0_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_1_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_1_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_comp_hw_version_s {
+	u32	step : 16;
+	u32	minor : 12;
+	u32	major : 4;
+};
+union ipa_hwio_def_ipa_comp_hw_version_u {
+	struct ipa_hwio_def_ipa_comp_hw_version_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_comp_cfg_s {
+	u32	reserved0 : 1;
+	u32	gsi_snoc_bypass_dis : 1;
+	u32	gen_qmb_0_snoc_bypass_dis : 1;
+	u32	gen_qmb_1_snoc_bypass_dis : 1;
+	u32	reserved1 : 1;
+	u32	ipa_qmb_select_by_address_cons_en : 1;
+	u32	ipa_qmb_select_by_address_prod_en : 1;
+	u32	gsi_multi_inorder_rd_dis : 1;
+	u32	gsi_multi_inorder_wr_dis : 1;
+	u32	gen_qmb_0_multi_inorder_rd_dis : 1;
+	u32	gen_qmb_1_multi_inorder_rd_dis : 1;
+	u32	gen_qmb_0_multi_inorder_wr_dis : 1;
+	u32	gen_qmb_1_multi_inorder_wr_dis : 1;
+	u32	gen_qmb_0_snoc_cnoc_loop_protection_disable : 1;
+	u32	gsi_snoc_cnoc_loop_protection_disable : 1;
+	u32	gsi_multi_axi_masters_dis : 1;
+	u32	ipa_qmb_select_by_address_global_en : 1;
+	u32	ipa_atomic_fetcher_arb_lock_dis : 4;
+	u32	ipa_full_flush_wait_rsc_closure_en : 1;
+	u32	reserved2 : 10;
+};
+union ipa_hwio_def_ipa_comp_cfg_u {
+	struct ipa_hwio_def_ipa_comp_cfg_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_route_s {
+	u32	route_dis : 1;
+	u32	route_def_pipe : 5;
+	u32	route_def_hdr_table : 1;
+	u32	route_def_hdr_ofst : 10;
+	u32	route_frag_def_pipe : 5;
+	u32	reserved0 : 2;
+	u32	route_def_retain_hdr : 1;
+	u32	reserved1 : 7;
+};
+union ipa_hwio_def_ipa_route_u {
+	struct ipa_hwio_def_ipa_route_s def;
+	u32				value;
+};
+struct ipa_hwio_def_ipa_proc_iph_cfg_s {
+	u32	iph_threshold : 2;
+	u32	iph_pipelining_disable : 1;
+	u32	reserved0 : 1;
+	u32	status_from_iph_frst_always : 1;
+	u32	iph_nat_blind_invalidate_tport_offset_disable : 1;
+	u32	pipestage_overlap_disable : 1;
+	u32	ftch_dcph_overlap_enable : 1;
+	u32	iph_pkt_parser_protocol_stop_enable : 1;
+	u32	iph_pkt_parser_protocol_stop_hop : 1;
+	u32	iph_pkt_parser_protocol_stop_dest : 1;
+	u32	iph_pkt_parser_ihl_to_2nd_frag_en : 1;
+	u32	reserved1 : 4;
+	u32	iph_pkt_parser_protocol_stop_value : 8;
+	u32	d_dcph_multi_engine_disable : 1;
+	u32	reserved2 : 7;
+};
+union ipa_hwio_def_ipa_proc_iph_cfg_u {
+	struct ipa_hwio_def_ipa_proc_iph_cfg_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_dpl_timer_lsb_s {
+	u32 tod_lsb : 32;
+};
+union ipa_hwio_def_ipa_dpl_timer_lsb_u {
+	struct ipa_hwio_def_ipa_dpl_timer_lsb_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_dpl_timer_msb_s {
+	u32	tod_msb : 16;
+	u32	reserved0 : 15;
+	u32	timer_en : 1;
+};
+union ipa_hwio_def_ipa_dpl_timer_msb_u {
+	struct ipa_hwio_def_ipa_dpl_timer_msb_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_tx_wrapper_s {
+	u32	tx0_idle : 1;
+	u32	tx1_idle : 1;
+	u32	ipa_prod_ackmngr_db_empty : 1;
+	u32	ipa_prod_ackmngr_state_idle : 1;
+	u32	ipa_prod_bresp_empty : 1;
+	u32	ipa_prod_bresp_toggle_idle : 1;
+	u32	ipa_mbim_pkt_fms_idle : 1;
+	u32	mbim_direct_dma : 2;
+	u32	trnseq_force_valid : 1;
+	u32	pkt_drop_cnt_idle : 1;
+	u32	nlo_direct_dma : 2;
+	u32	coal_direct_dma : 2;
+	u32	coal_slave_idle : 1;
+	u32	coal_slave_ctx_idle : 1;
+	u32	reserved0 : 8;
+	u32	coal_slave_open_frame : 4;
+	u32	reserved1 : 3;
+};
+union ipa_hwio_def_ipa_state_tx_wrapper_u {
+	struct ipa_hwio_def_ipa_state_tx_wrapper_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_tx1_s {
+	u32	flopped_arbit_type : 3;
+	u32	arbit_type : 3;
+	u32	pa_idle : 1;
+	u32	pa_ctx_idle : 1;
+	u32	pa_rst_idle : 1;
+	u32	pa_pub_cnt_empty : 1;
+	u32	tx_cmd_main_idle : 1;
+	u32	tx_cmd_trnseq_idle : 1;
+	u32	tx_cmd_snif_idle : 1;
+	u32	tx_cmd_bresp_aloc_idle : 1;
+	u32	tx_cmd_bresp_inj_idle : 1;
+	u32	ar_idle : 1;
+	u32	dmaw_idle : 1;
+	u32	dmaw_last_outsd_idle : 1;
+	u32	pf_idle : 1;
+	u32	pf_empty : 1;
+	u32	aligner_empty : 1;
+	u32	holb_idle : 1;
+	u32	holb_mask_idle : 1;
+	u32	rsrcrel_idle : 1;
+	u32	suspend_empty : 1;
+	u32	cs_snif_idle : 1;
+	u32	last_cmd_pipe : 5;
+	u32	suspend_req_empty : 1;
+};
+union ipa_hwio_def_ipa_state_tx1_u {
+	struct ipa_hwio_def_ipa_state_tx1_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_fetcher_s {
+	u32	ipa_hps_ftch_state_idle : 1;
+	u32	ipa_hps_ftch_alloc_state_idle : 1;
+	u32	ipa_hps_ftch_pkt_state_idle : 1;
+	u32	ipa_hps_ftch_imm_state_idle : 1;
+	u32	ipa_hps_ftch_cmplt_state_idle : 1;
+	u32	ipa_hps_dmar_state_idle : 7;
+	u32	ipa_hps_dmar_slot_state_idle : 7;
+	u32	ipa_hps_imm_cmd_exec_state_idle : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_state_fetcher_u {
+	struct ipa_hwio_def_ipa_state_fetcher_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_fetcher_mask_0_s {
+	u32	mask_queue_dmar_uses_queue : 8;
+	u32	mask_queue_imm_exec : 8;
+	u32	mask_queue_no_resources_context : 8;
+	u32	mask_queue_no_resources_hps_dmar : 8;
+};
+union ipa_hwio_def_ipa_state_fetcher_mask_0_u {
+	struct ipa_hwio_def_ipa_state_fetcher_mask_0_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_fetcher_mask_1_s {
+	u32	mask_queue_no_resources_ack_entry : 8;
+	u32	mask_queue_arb_lock : 8;
+	u32	mask_queue_step_mode : 8;
+	u32	mask_queue_no_space_dpl_fifo : 8;
+};
+union ipa_hwio_def_ipa_state_fetcher_mask_1_u {
+	struct ipa_hwio_def_ipa_state_fetcher_mask_1_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_dpl_fifo_s {
+	u32	pop_fsm_state : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_state_dpl_fifo_u {
+	struct ipa_hwio_def_ipa_state_dpl_fifo_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_coal_master_s {
+	u32	vp_vld : 4;
+	u32	main_fsm_state : 4;
+	u32	find_open_fsm_state : 4;
+	u32	hash_calc_fsm_state : 4;
+	u32	check_fit_fsm_state : 4;
+	u32	init_vp_fsm_state : 4;
+	u32	lru_vp : 4;
+	u32	vp_timer_expired : 4;
+};
+union ipa_hwio_def_ipa_state_coal_master_u {
+	struct ipa_hwio_def_ipa_state_coal_master_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_dfetcher_s {
+	u32	ipa_dps_ftch_pkt_state_idle : 1;
+	u32	ipa_dps_ftch_cmplt_state_idle : 1;
+	u32	reserved0 : 2;
+	u32	ipa_dps_dmar_state_idle : 6;
+	u32	reserved1 : 2;
+	u32	ipa_dps_dmar_slot_state_idle : 6;
+	u32	reserved2 : 14;
+};
+union ipa_hwio_def_ipa_state_dfetcher_u {
+	struct ipa_hwio_def_ipa_state_dfetcher_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_acl_s {
+	u32	ipa_hps_h_dcph_empty : 1;
+	u32	ipa_hps_h_dcph_active : 1;
+	u32	ipa_hps_pkt_parser_empty : 1;
+	u32	ipa_hps_pkt_parser_active : 1;
+	u32	ipa_hps_filter_nat_empty : 1;
+	u32	ipa_hps_filter_nat_active : 1;
+	u32	ipa_hps_router_empty : 1;
+	u32	ipa_hps_router_active : 1;
+	u32	ipa_hps_hdri_empty : 1;
+	u32	ipa_hps_hdri_active : 1;
+	u32	ipa_hps_ucp_empty : 1;
+	u32	ipa_hps_ucp_active : 1;
+	u32	ipa_hps_enqueuer_empty : 1;
+	u32	ipa_hps_enqueuer_active : 1;
+	u32	ipa_dps_d_dcph_empty : 1;
+	u32	ipa_dps_d_dcph_active : 1;
+	u32	reserved0 : 2;
+	u32	ipa_dps_dispatcher_empty : 1;
+	u32	ipa_dps_dispatcher_active : 1;
+	u32	ipa_dps_d_dcph_2_empty : 1;
+	u32	ipa_dps_d_dcph_2_active : 1;
+	u32	ipa_hps_sequencer_idle : 1;
+	u32	ipa_dps_sequencer_idle : 1;
+	u32	ipa_dps_d_dcph_2nd_empty : 1;
+	u32	ipa_dps_d_dcph_2nd_active : 1;
+	u32	ipa_hps_coal_master_empty : 1;
+	u32	ipa_hps_coal_master_active : 1;
+	u32	reserved1 : 4;
+};
+union ipa_hwio_def_ipa_state_acl_u {
+	struct ipa_hwio_def_ipa_state_acl_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_tlv_s {
+	u32	ipa_gsi_toggle_fsm_idle : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_state_gsi_tlv_u {
+	struct ipa_hwio_def_ipa_state_gsi_tlv_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_aos_s {
+	u32	ipa_gsi_aos_fsm_idle : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_state_gsi_aos_u {
+	struct ipa_hwio_def_ipa_state_gsi_aos_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_if_s {
+	u32	ipa_gsi_prod_fsm_tx_0 : 4;
+	u32	ipa_gsi_prod_fsm_tx_1 : 4;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_state_gsi_if_u {
+	struct ipa_hwio_def_ipa_state_gsi_if_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_skip_s {
+	u32	ipa_gsi_skip_fsm : 2;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_state_gsi_skip_u {
+	struct ipa_hwio_def_ipa_state_gsi_skip_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_gsi_if_cons_s {
+	u32	state : 1;
+	u32	cache_vld : 6;
+	u32	rx_req : 10;
+	u32	rx_req_no_zero : 10;
+	u32	reserved0 : 5;
+};
+union ipa_hwio_def_ipa_state_gsi_if_cons_u {
+	struct ipa_hwio_def_ipa_state_gsi_if_cons_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_s {
+	u32	rx_wait : 1;
+	u32	rx_idle : 1;
+	u32	tx_idle : 1;
+	u32	dpl_fifo_idle : 1;
+	u32	bam_gsi_idle : 1;
+	u32	ipa_status_sniffer_idle : 1;
+	u32	ipa_noc_idle : 1;
+	u32	aggr_idle : 1;
+	u32	mbim_aggr_idle : 1;
+	u32	ipa_rsrc_mngr_db_empty : 1;
+	u32	ipa_rsrc_state_idle : 1;
+	u32	ipa_ackmngr_db_empty : 1;
+	u32	ipa_ackmngr_state_idle : 1;
+	u32	ipa_tx_ackq_full : 1;
+	u32	ipa_prod_ackmngr_db_empty : 1;
+	u32	ipa_prod_ackmngr_state_idle : 1;
+	u32	ipa_prod_bresp_idle : 1;
+	u32	ipa_full_idle : 1;
+	u32	ipa_ntf_tx_empty : 1;
+	u32	ipa_tx_ackq_empty : 1;
+	u32	ipa_uc_ackq_empty : 1;
+	u32	ipa_rx_ackq_empty : 1;
+	u32	ipa_tx_commander_cmdq_empty : 1;
+	u32	ipa_rx_splt_cmdq_empty : 4;
+	u32	reserved0 : 1;
+	u32	ipa_rx_hps_empty : 1;
+	u32	ipa_hps_dps_empty : 1;
+	u32	ipa_dps_tx_empty : 1;
+	u32	ipa_uc_rx_hnd_cmdq_empty : 1;
+};
+union ipa_hwio_def_ipa_state_u {
+	struct ipa_hwio_def_ipa_state_s def;
+	u32				value;
+};
+struct ipa_hwio_def_ipa_state_rx_active_s {
+	u32	endpoints : 13;
+	u32	reserved0 : 19;
+};
+union ipa_hwio_def_ipa_state_rx_active_u {
+	struct ipa_hwio_def_ipa_state_rx_active_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_tx0_s {
+	u32	last_arbit_type : 2;
+	u32	next_arbit_type : 2;
+	u32	pa_idle : 1;
+	u32	pa_ctx_idle : 1;
+	u32	pa_pub_cnt_empty : 1;
+	u32	tx_cmd_main_idle : 1;
+	u32	tx_cmd_trnseq_idle : 1;
+	u32	tx_cmd_snif_idle : 1;
+	u32	tx_cmd_bresp_aloc_idle : 1;
+	u32	tx_cmd_bresp_inj_idle : 1;
+	u32	ar_idle : 1;
+	u32	dmaw_idle : 1;
+	u32	dmaw_last_outsd_idle : 1;
+	u32	pf_idle : 1;
+	u32	pf_empty : 1;
+	u32	aligner_empty : 1;
+	u32	holb_idle : 1;
+	u32	holb_mask_idle : 1;
+	u32	rsrcrel_idle : 1;
+	u32	suspend_empty : 1;
+	u32	cs_snif_idle : 1;
+	u32	last_cmd_pipe : 5;
+	u32	reserved0 : 4;
+};
+union ipa_hwio_def_ipa_state_tx0_u {
+	struct ipa_hwio_def_ipa_state_tx0_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_aggr_active_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_state_aggr_active_u {
+	struct ipa_hwio_def_ipa_state_aggr_active_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_nlo_aggr_s {
+	u32 nlo_aggr_state : 32;
+};
+union ipa_hwio_def_ipa_state_nlo_aggr_u {
+	struct ipa_hwio_def_ipa_state_nlo_aggr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_coal_master_1_s {
+	u32	init_vp_wr_ctx_line : 6;
+	u32	init_vp_rd_pkt_line : 6;
+	u32	init_vp_fsm_state : 4;
+	u32	check_fit_rd_ctx_line : 6;
+	u32	check_fit_fsm_state : 4;
+	u32	arbiter_state : 4;
+	u32	reserved0 : 2;
+};
+union ipa_hwio_def_ipa_state_coal_master_1_u {
+	struct ipa_hwio_def_ipa_state_coal_master_1_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_filt_rout_hash_en_s {
+	u32	ipv6_router_hash_en : 1;
+	u32	reserved0 : 3;
+	u32	ipv6_filter_hash_en : 1;
+	u32	reserved1 : 3;
+	u32	ipv4_router_hash_en : 1;
+	u32	reserved2 : 3;
+	u32	ipv4_filter_hash_en : 1;
+	u32	reserved3 : 19;
+};
+union ipa_hwio_def_ipa_filt_rout_hash_en_u {
+	struct ipa_hwio_def_ipa_filt_rout_hash_en_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_filt_rout_hash_flush_s {
+	u32	ipv6_router_hash_flush : 1;
+	u32	reserved0 : 3;
+	u32	ipv6_filter_hash_flush : 1;
+	u32	reserved1 : 3;
+	u32	ipv4_router_hash_flush : 1;
+	u32	reserved2 : 3;
+	u32	ipv4_filter_hash_flush : 1;
+	u32	reserved3 : 19;
+};
+union ipa_hwio_def_ipa_filt_rout_hash_flush_u {
+	struct ipa_hwio_def_ipa_filt_rout_hash_flush_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ipv4_filter_init_values_s {
+	u32	ip_v4_filter_init_hashed_addr : 16;
+	u32	ip_v4_filter_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv4_filter_init_values_u {
+	struct ipa_hwio_def_ipa_ipv4_filter_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ipv6_filter_init_values_s {
+	u32	ip_v6_filter_init_hashed_addr : 16;
+	u32	ip_v6_filter_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv6_filter_init_values_u {
+	struct ipa_hwio_def_ipa_ipv6_filter_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ipv4_route_init_values_s {
+	u32	ip_v4_route_init_hashed_addr : 16;
+	u32	ip_v4_route_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv4_route_init_values_u {
+	struct ipa_hwio_def_ipa_ipv4_route_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ipv6_route_init_values_s {
+	u32	ip_v6_route_init_hashed_addr : 16;
+	u32	ip_v6_route_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv6_route_init_values_u {
+	struct ipa_hwio_def_ipa_ipv6_route_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_bam_activated_ports_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_bam_activated_ports_u {
+	struct ipa_hwio_def_ipa_bam_activated_ports_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_s {
+	u32	zero : 3;
+	u32	addr : 29;
+};
+union ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_u {
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_s {
+	u32 addr : 32;
+};
+union ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_u {
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_s {
+	u32	zero : 3;
+	u32	addr : 15;
+	u32	reserved0 : 14;
+};
+union ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_u {
+	struct ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_s {
+	u32	src_rsrc_grp_0_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	src_rsrc_grp_0_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	src_rsrc_grp_1_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	src_rsrc_grp_1_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_s {
+	u32	src_rsrc_grp_2_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	src_rsrc_grp_2_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	src_rsrc_grp_3_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	src_rsrc_grp_3_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_s {
+	u32	src_rsrc_grp_0_cnt : 6;
+	u32	reserved0 : 2;
+	u32	src_rsrc_grp_1_cnt : 6;
+	u32	reserved1 : 2;
+	u32	src_rsrc_grp_2_cnt : 6;
+	u32	reserved2 : 2;
+	u32	src_rsrc_grp_3_cnt : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_s {
+	u32	dst_rsrc_grp_0_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	dst_rsrc_grp_0_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	dst_rsrc_grp_1_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	dst_rsrc_grp_1_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_s {
+	u32	dst_rsrc_grp_2_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	dst_rsrc_grp_2_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	dst_rsrc_grp_3_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	dst_rsrc_grp_3_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_s {
+	u32	dst_rsrc_grp_0_cnt : 6;
+	u32	reserved0 : 2;
+	u32	dst_rsrc_grp_1_cnt : 6;
+	u32	reserved1 : 2;
+	u32	dst_rsrc_grp_2_cnt : 6;
+	u32	reserved2 : 2;
+	u32	dst_rsrc_grp_3_cnt : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_rsrc_grp_cfg_s {
+	u32	src_grp_special_valid : 1;
+	u32	reserved0 : 3;
+	u32	src_grp_special_index : 3;
+	u32	reserved1 : 1;
+	u32	dst_pipe_special_valid : 1;
+	u32	reserved2 : 3;
+	u32	dst_pipe_special_index : 5;
+	u32	reserved3 : 3;
+	u32	dst_grp_special_valid : 1;
+	u32	reserved4 : 3;
+	u32	dst_grp_special_index : 6;
+	u32	reserved5 : 2;
+};
+union ipa_hwio_def_ipa_rsrc_grp_cfg_u {
+	struct ipa_hwio_def_ipa_rsrc_grp_cfg_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_pipeline_disable_s {
+	u32	reserved0 : 3;
+	u32	rx_cmdq_splitter_dis : 1;
+	u32	reserved1 : 28;
+};
+union ipa_hwio_def_ipa_pipeline_disable_u {
+	struct ipa_hwio_def_ipa_pipeline_disable_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_ctrl_n_s {
+	u32	endp_suspend : 1;
+	u32	endp_delay : 1;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_endp_init_ctrl_n_u {
+	struct ipa_hwio_def_ipa_endp_init_ctrl_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_s {
+	u32	reserved0 : 1;
+	u32	endp_delay : 1;
+	u32	reserved1 : 30;
+};
+union ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_u {
+	struct ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_cfg_n_s {
+	u32	frag_offload_en : 1;
+	u32	cs_offload_en : 2;
+	u32	cs_metadata_hdr_offset : 4;
+	u32	reserved0 : 1;
+	u32	gen_qmb_master_sel : 1;
+	u32	reserved1 : 23;
+};
+union ipa_hwio_def_ipa_endp_init_cfg_n_u {
+	struct ipa_hwio_def_ipa_endp_init_cfg_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_nat_n_s {
+	u32	nat_en : 2;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_endp_init_nat_n_u {
+	struct ipa_hwio_def_ipa_endp_init_nat_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_n_s {
+	u32	hdr_len : 6;
+	u32	hdr_ofst_metadata_valid : 1;
+	u32	hdr_ofst_metadata : 6;
+	u32	hdr_additional_const_len : 6;
+	u32	hdr_ofst_pkt_size_valid : 1;
+	u32	hdr_ofst_pkt_size : 6;
+	u32	hdr_a5_mux : 1;
+	u32	hdr_len_inc_deagg_hdr : 1;
+	u32	hdr_len_msb : 2;
+	u32	hdr_ofst_metadata_msb : 2;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_ext_n_s {
+	u32	hdr_endianness : 1;
+	u32	hdr_total_len_or_pad_valid : 1;
+	u32	hdr_total_len_or_pad : 1;
+	u32	hdr_payload_len_inc_padding : 1;
+	u32	hdr_total_len_or_pad_offset : 6;
+	u32	hdr_pad_to_alignment : 4;
+	u32	reserved0 : 2;
+	u32	hdr_total_len_or_pad_offset_msb : 2;
+	u32	hdr_ofst_pkt_size_msb : 2;
+	u32	hdr_additional_const_len_msb : 2;
+	u32	reserved1 : 10;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_ext_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_ext_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_s {
+	u32 metadata_mask : 32;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_metadata_n_s {
+	u32 metadata : 32;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_metadata_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_mode_n_s {
+	u32	mode : 3;
+	u32	dcph_enable : 1;
+	u32	dest_pipe_index : 5;
+	u32	reserved0 : 3;
+	u32	byte_threshold : 16;
+	u32	pipe_replicate_en : 1;
+	u32	pad_en : 1;
+	u32	reserved1 : 2;
+};
+union ipa_hwio_def_ipa_endp_init_mode_n_u {
+	struct ipa_hwio_def_ipa_endp_init_mode_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_aggr_n_s {
+	u32	aggr_en : 2;
+	u32	aggr_type : 3;
+	u32	aggr_byte_limit : 6;
+	u32	reserved0 : 1;
+	u32	aggr_time_limit : 5;
+	u32	aggr_pkt_limit : 6;
+	u32	aggr_sw_eof_active : 1;
+	u32	aggr_force_close : 1;
+	u32	reserved1 : 1;
+	u32	aggr_hard_byte_limit_enable : 1;
+	u32	aggr_gran_sel : 1;
+	u32	reserved2 : 4;
+};
+union ipa_hwio_def_ipa_endp_init_aggr_n_u {
+	struct ipa_hwio_def_ipa_endp_init_aggr_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hol_block_en_n_s {
+	u32	en : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_endp_init_hol_block_en_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hol_block_en_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_hol_block_timer_n_s {
+	u32	time_limit : 5;
+	u32	reserved0 : 3;
+	u32	gran_sel : 1;
+	u32	reserved1 : 23;
+};
+union ipa_hwio_def_ipa_endp_init_hol_block_timer_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hol_block_timer_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_deaggr_n_s {
+	u32	deaggr_hdr_len : 6;
+	u32	syspipe_err_detection : 1;
+	u32	packet_offset_valid : 1;
+	u32	packet_offset_location : 6;
+	u32	ignore_min_pkt_err : 1;
+	u32	reserved0 : 1;
+	u32	max_packet_len : 16;
+};
+union ipa_hwio_def_ipa_endp_init_deaggr_n_u {
+	struct ipa_hwio_def_ipa_endp_init_deaggr_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_rsrc_grp_n_s {
+	u32	rsrc_grp : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_endp_init_rsrc_grp_n_u {
+	struct ipa_hwio_def_ipa_endp_init_rsrc_grp_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_seq_n_s {
+	u32	hps_seq_type : 4;
+	u32	dps_seq_type : 4;
+	u32	hps_rep_seq_type : 4;
+	u32	dps_rep_seq_type : 4;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_endp_init_seq_n_u {
+	struct ipa_hwio_def_ipa_endp_init_seq_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_status_n_s {
+	u32	status_en : 1;
+	u32	status_endp : 5;
+	u32	reserved0 : 3;
+	u32	status_pkt_suppress : 1;
+	u32	reserved1 : 22;
+};
+union ipa_hwio_def_ipa_endp_status_n_u {
+	struct ipa_hwio_def_ipa_endp_status_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_s {
+	u32	filter_hash_msk_src_id : 1;
+	u32	filter_hash_msk_src_ip_add : 1;
+	u32	filter_hash_msk_dst_ip_add : 1;
+	u32	filter_hash_msk_src_port : 1;
+	u32	filter_hash_msk_dst_port : 1;
+	u32	filter_hash_msk_protocol : 1;
+	u32	filter_hash_msk_metadata : 1;
+	u32	reserved0 : 9;
+	u32	router_hash_msk_src_id : 1;
+	u32	router_hash_msk_src_ip_add : 1;
+	u32	router_hash_msk_dst_ip_add : 1;
+	u32	router_hash_msk_src_port : 1;
+	u32	router_hash_msk_dst_port : 1;
+	u32	router_hash_msk_protocol : 1;
+	u32	router_hash_msk_metadata : 1;
+	u32	reserved1 : 9;
+};
+union ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_u {
+	struct ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_cfg1_s {
+	u32	nlo_ack_pp : 8;
+	u32	nlo_data_pp : 8;
+	u32	nlo_status_pp : 8;
+	u32	nlo_ack_max_vp : 6;
+	u32	reserved0 : 2;
+};
+union ipa_hwio_def_ipa_nlo_pp_cfg1_u {
+	struct ipa_hwio_def_ipa_nlo_pp_cfg1_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_cfg2_s {
+	u32	nlo_ack_close_padd : 8;
+	u32	nlo_data_close_padd : 8;
+	u32	nlo_ack_buffer_mode : 1;
+	u32	nlo_data_buffer_mode : 1;
+	u32	nlo_status_buffer_mode : 1;
+	u32	reserved0 : 13;
+};
+union ipa_hwio_def_ipa_nlo_pp_cfg2_u {
+	struct ipa_hwio_def_ipa_nlo_pp_cfg2_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_s {
+	u32	nlo_ack_lower_size : 16;
+	u32	nlo_ack_upper_size : 16;
+};
+union ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_u {
+	struct ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_s {
+	u32	nlo_data_lower_size : 16;
+	u32	nlo_data_upper_size : 16;
+};
+union ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_u {
+	struct ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_min_dsm_cfg_s {
+	u32	nlo_ack_min_dsm_len : 16;
+	u32	nlo_data_min_dsm_len : 16;
+};
+union ipa_hwio_def_ipa_nlo_min_dsm_cfg_u {
+	struct ipa_hwio_def_ipa_nlo_min_dsm_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_flush_req_s {
+	u32	vp_flush_pp_indx : 8;
+	u32	reserved0 : 8;
+	u32	vp_flush_vp_indx : 8;
+	u32	reserved1 : 7;
+	u32	vp_flush_req : 1;
+};
+union ipa_hwio_def_ipa_nlo_vp_flush_req_u {
+	struct ipa_hwio_def_ipa_nlo_vp_flush_req_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_flush_cookie_s {
+	u32 vp_flush_cookie : 32;
+};
+union ipa_hwio_def_ipa_nlo_vp_flush_cookie_u {
+	struct ipa_hwio_def_ipa_nlo_vp_flush_cookie_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_flush_ack_s {
+	u32	vp_flush_ack : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_nlo_vp_flush_ack_u {
+	struct ipa_hwio_def_ipa_nlo_vp_flush_ack_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_dsm_open_s {
+	u32 vp_dsm_open : 32;
+};
+union ipa_hwio_def_ipa_nlo_vp_dsm_open_u {
+	struct ipa_hwio_def_ipa_nlo_vp_dsm_open_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_qbap_open_s {
+	u32 vp_qbap_open : 32;
+};
+union ipa_hwio_def_ipa_nlo_vp_qbap_open_u {
+	struct ipa_hwio_def_ipa_nlo_vp_qbap_open_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rsrc_mngr_db_cfg_s {
+	u32	rsrc_grp_sel : 3;
+	u32	reserved0 : 1;
+	u32	rsrc_type_sel : 3;
+	u32	reserved1 : 1;
+	u32	rsrc_id_sel : 6;
+	u32	reserved2 : 18;
+};
+union ipa_hwio_def_ipa_rsrc_mngr_db_cfg_u {
+	struct ipa_hwio_def_ipa_rsrc_mngr_db_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_s {
+	u32	rsrc_occupied : 1;
+	u32	rsrc_next_valid : 1;
+	u32	reserved0 : 2;
+	u32	rsrc_next_index : 6;
+	u32	reserved1 : 22;
+};
+union ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_u {
+	struct ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rsrc_mngr_db_list_read_s {
+	u32	rsrc_list_valid : 1;
+	u32	rsrc_list_hold : 1;
+	u32	reserved0 : 2;
+	u32	rsrc_list_head_rsrc : 6;
+	u32	reserved1 : 2;
+	u32	rsrc_list_head_cnt : 7;
+	u32	reserved2 : 1;
+	u32	rsrc_list_entry_cnt : 7;
+	u32	reserved3 : 5;
+};
+union ipa_hwio_def_ipa_rsrc_mngr_db_list_read_u {
+	struct ipa_hwio_def_ipa_rsrc_mngr_db_list_read_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_debug_data_s {
+	u32 debug_data : 32;
+};
+union ipa_hwio_def_ipa_debug_data_u {
+	struct ipa_hwio_def_ipa_debug_data_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_testbus_sel_s {
+	u32	testbus_en : 1;
+	u32	reserved0 : 3;
+	u32	external_block_select : 8;
+	u32	internal_block_select : 8;
+	u32	pipe_select : 5;
+	u32	reserved1 : 7;
+};
+union ipa_hwio_def_ipa_testbus_sel_u {
+	struct ipa_hwio_def_ipa_testbus_sel_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_step_mode_breakpoints_s {
+	u32 hw_en : 32;
+};
+union ipa_hwio_def_ipa_step_mode_breakpoints_u {
+	struct ipa_hwio_def_ipa_step_mode_breakpoints_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_step_mode_status_s {
+	u32 hw_en : 32;
+};
+union ipa_hwio_def_ipa_step_mode_status_u {
+	struct ipa_hwio_def_ipa_step_mode_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_s {
+	u32	reserved0 : 1;
+	u32	log_en : 1;
+	u32	reserved1 : 2;
+	u32	log_pipe : 5;
+	u32	reserved2 : 3;
+	u32	log_length : 8;
+	u32	log_reduction_en : 1;
+	u32	log_dpl_l2_remove_en : 1;
+	u32	reserved3 : 10;
+};
+union ipa_hwio_def_ipa_log_u {
+	struct ipa_hwio_def_ipa_log_s	def;
+	u32				value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_addr_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_msb_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_addr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_s {
+	u32 writr_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_msb_s {
+	u32 writr_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_s {
+	u32	size : 16;
+	u32	enable : 1;
+	u32	skip_ddr_dma : 1;
+	u32	reserved0 : 14;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_s {
+	u32	read_ptr : 14;
+	u32	reserved0 : 2;
+	u32	write_ptr : 14;
+	u32	reserved1 : 1;
+	u32	skip_ddr_wrap_happened : 1;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	release_rd_cmd : 1;
+	u32	release_wr_cmd : 1;
+	u32	release_rd_pkt : 1;
+	u32	release_wr_pkt : 1;
+	u32	release_rd_pkt_enhanced : 1;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_s {
+	u32	block_rd : 1;
+	u32	block_wr : 1;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_s {
+	u32	cmdq_packet_len_f : 16;
+	u32	cmdq_src_len_f : 16;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_s {
+	u32	cmdq_src_pipe_f : 8;
+	u32	cmdq_order_f : 2;
+	u32	cmdq_flags_f : 6;
+	u32	cmdq_opcode_f : 8;
+	u32	cmdq_metadata_f : 8;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_s {
+	u32 cmdq_addr_lsb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_s {
+	u32 cmdq_addr_msb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_s {
+	u32	cmdq_packet_len_f : 16;
+	u32	cmdq_src_len_f : 16;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_s {
+	u32	cmdq_src_pipe_f : 8;
+	u32	cmdq_order_f : 2;
+	u32	cmdq_flags_f : 6;
+	u32	cmdq_opcode_f : 8;
+	u32	cmdq_metadata_f : 8;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_s {
+	u32 cmdq_addr_lsb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_s {
+	u32 cmdq_addr_msb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s {
+	u32	status : 1;
+	u32	cmdq_empty : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_count : 2;
+	u32	cmdq_depth : 2;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_status_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_tx_commander_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_empty : 1;
+	u32	cmdq_full : 1;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_tx_commander_cmdq_status_u {
+	struct ipa_hwio_def_ipa_tx_commander_cmdq_status_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 3;
+	u32	rd_req : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_s {
+	u32	block_wr : 5;
+	u32	reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_s {
+	u32	block_rd : 5;
+	u32	reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_s {
+	u32	cmdq_packet_len_f : 16;
+	u32	cmdq_dest_len_f : 16;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_s {
+	u32	cmdq_src_pipe_f : 8;
+	u32	cmdq_order_f : 2;
+	u32	cmdq_flags_f : 6;
+	u32	cmdq_opcode_f : 8;
+	u32	cmdq_metadata_f : 8;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_s {
+	u32 cmdq_addr_lsb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_s {
+	u32 cmdq_addr_msb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_status_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_s {
+	u32	cmdq_empty : 5;
+	u32	reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_count_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_s {
+	u32	client_0_min_depth : 4;
+	u32	reserved0 : 4;
+	u32	client_1_min_depth : 4;
+	u32	reserved1 : 4;
+	u32	client_2_min_depth : 4;
+	u32	reserved2 : 4;
+	u32	client_3_min_depth : 4;
+	u32	client_4_min_depth : 4;
+};
+union ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_u {
+	struct ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_s {
+	u32	client_0_max_depth : 4;
+	u32	reserved0 : 4;
+	u32	client_1_max_depth : 4;
+	u32	reserved1 : 4;
+	u32	client_2_max_depth : 4;
+	u32	reserved2 : 4;
+	u32	client_3_max_depth : 4;
+	u32	client_4_max_depth : 4;
+};
+union ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_u {
+	struct ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_s {
+	u32	cmdq_ctx_id_f : 4;
+	u32	cmdq_src_id_f : 8;
+	u32	cmdq_src_pipe_f : 5;
+	u32	cmdq_opcode_f : 2;
+	u32	cmdq_rep_f : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 6;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_status_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_s {
+	u32	cmdq_empty : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_count_s {
+	u32	fifo_count : 6;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_count_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 4;
+	u32	reserved0 : 1;
+	u32	rd_req : 1;
+	u32	reserved1 : 24;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_s {
+	u32	cmdq_ctx_id_f : 4;
+	u32	cmdq_src_id_f : 8;
+	u32	cmdq_src_pipe_f : 5;
+	u32	cmdq_opcode_f : 2;
+	u32	cmdq_rep_f : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_status_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_s {
+	u32	cmdq_empty : 10;
+	u32	reserved0 : 22;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_count_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_en_s {
+	u32	bitmap : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_en_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_en_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_s {
+	u32	bitmap : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_s {
+	u32	all_cli_mux_concat : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_0_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_0_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_0_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_1_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_1_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_1_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_2_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_2_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_2_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_3_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_3_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_3_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_0_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_0_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_0_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_1_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_1_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_1_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_2_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_2_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_2_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_3_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_3_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_3_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_legacy_rx_s {
+	u32	src_group_sel : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_legacy_rx_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_legacy_rx_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_s {
+	u32	cmdq_src_id : 8;
+	u32	cmdq_length : 16;
+	u32	cmdq_origin : 1;
+	u32	cmdq_sent : 1;
+	u32	cmdq_src_id_valid : 1;
+	u32	reserved0 : 5;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_status_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_s {
+	u32	cmdq_empty : 13;
+	u32	reserved0 : 19;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_count_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_fifo_status_ctrl_s {
+	u32	ipa_gsi_fifo_status_port_sel : 5;
+	u32	ipa_gsi_fifo_status_en : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u {
+	struct ipa_hwio_def_ipa_gsi_fifo_status_ctrl_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_tlv_fifo_status_s {
+	u32	fifo_wr_ptr : 8;
+	u32	fifo_rd_ptr : 8;
+	u32	fifo_rd_pub_ptr : 8;
+	u32	fifo_empty : 1;
+	u32	fifo_empty_pub : 1;
+	u32	fifo_almost_full : 1;
+	u32	fifo_full : 1;
+	u32	fifo_almost_full_pub : 1;
+	u32	fifo_full_pub : 1;
+	u32	fifo_head_is_bubble : 1;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_gsi_tlv_fifo_status_u {
+	struct ipa_hwio_def_ipa_gsi_tlv_fifo_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_aos_fifo_status_s {
+	u32	fifo_wr_ptr : 8;
+	u32	fifo_rd_ptr : 8;
+	u32	fifo_rd_pub_ptr : 8;
+	u32	fifo_empty : 1;
+	u32	fifo_empty_pub : 1;
+	u32	fifo_almost_full : 1;
+	u32	fifo_full : 1;
+	u32	fifo_almost_full_pub : 1;
+	u32	fifo_full_pub : 1;
+	u32	fifo_head_is_bubble : 1;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_gsi_aos_fifo_status_u {
+	struct ipa_hwio_def_ipa_gsi_aos_fifo_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_0_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_0_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_1_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_1_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_1_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_2_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_2_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_2_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_3_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_3_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_3_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_4_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_4_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_4_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_5_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_5_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_5_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_6_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_6_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_6_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_7_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_7_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_7_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_0_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_0_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_1_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_1_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_1_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_2_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_2_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_2_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_3_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_3_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_3_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_4_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_4_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_4_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_5_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_5_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_5_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_6_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_6_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_6_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_7_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_7_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_7_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_s {
+	u32	cmdq_ctx_id_f : 4;
+	u32	cmdq_src_id_f : 8;
+	u32	cmdq_src_pipe_f : 5;
+	u32	cmdq_opcode_f : 2;
+	u32	cmdq_rep_f : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_status_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_s {
+	u32	cmdq_empty : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_count_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_s {
+	u32	cmdq_src_id : 8;
+	u32	cmdq_length : 16;
+	u32	cmdq_origin : 1;
+	u32	cmdq_sent : 1;
+	u32	cmdq_src_id_valid : 1;
+	u32	cmdq_userdata : 5;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_s {
+	u32	cmdq_empty : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_spare_reg_1_s {
+	u32	spare_bit0 : 1;
+	u32	spare_bit1 : 1;
+	u32	genqmb_aooowr : 1;
+	u32	spare_bit3 : 1;
+	u32	spare_bit4 : 1;
+	u32	acl_inorder_multi_disable : 1;
+	u32	acl_dispatcher_frag_notif_check_disable : 1;
+	u32	acl_dispatcher_frag_notif_check_each_cmd_disable : 1;
+	u32	spare_bit8 : 1;
+	u32	acl_dispatcher_frag_notif_check_notif_mid_disable : 1;
+	u32	acl_dispatcher_pkt_check_disable : 1;
+	u32	tx_gives_sspnd_ack_on_open_aggr_frame : 1;
+	u32	spare_bit12 : 1;
+	u32	tx_block_aggr_query_on_holb_packet : 1;
+	u32	frag_mngr_fairness_eviction_on_constructing : 1;
+	u32	rx_cmdq_splitter_cmdq_pending_mux_disable : 1;
+	u32	qmb_ram_rd_cache_disable : 1;
+	u32	rx_stall_on_mbim_deaggr_error : 1;
+	u32	rx_stall_on_gen_deaggr_error : 1;
+	u32	spare_bit19 : 1;
+	u32	revert_warb_fix : 1;
+	u32	gsi_if_out_of_buf_stop_reset_mask_enable : 1;
+	u32	bam_idle_in_ipa_misc_cgc_en : 1;
+	u32	spare_bit23 : 1;
+	u32	spare_bit24 : 1;
+	u32	spare_bit25 : 1;
+	u32	ram_slaveway_access_protection_disable : 1;
+	u32	dcph_ram_rd_prefetch_disable : 1;
+	u32	warb_force_arb_round_finish_special_disable : 1;
+	u32	spare_ackinj_pipe8_mask_enable : 1;
+	u32	spare_bit30 : 1;
+	u32	spare_bit31 : 1;
+};
+union ipa_hwio_def_ipa_spare_reg_1_u {
+	struct ipa_hwio_def_ipa_spare_reg_1_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_spare_reg_2_s {
+	u32	tx_bresp_inj_with_flop : 1;
+	u32	cmdq_split_not_wait_data_desc_prior_hdr_push : 1;
+	u32	spare_bits : 30;
+};
+union ipa_hwio_def_ipa_spare_reg_2_u {
+	struct ipa_hwio_def_ipa_spare_reg_2_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_endp_gsi_cfg1_n_s {
+	u32	reserved0 : 16;
+	u32	endp_en : 1;
+	u32	reserved1 : 14;
+	u32	init_endp : 1;
+};
+union ipa_hwio_def_ipa_endp_gsi_cfg1_n_u {
+	struct ipa_hwio_def_ipa_endp_gsi_cfg1_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_s {
+	u32	fifo_base_addr : 16;
+	u32	fifo_size : 8;
+	u32	reserved0 : 8;
+};
+union ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_u {
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_s {
+	u32	fifo_base_addr : 16;
+	u32	fifo_size : 8;
+	u32	reserved0 : 8;
+};
+union ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_u {
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ctxh_ctrl_s {
+	u32	ctxh_lock_id : 4;
+	u32	reserved0 : 27;
+	u32	ctxh_lock : 1;
+};
+union ipa_hwio_def_ipa_ctxh_ctrl_u {
+	struct ipa_hwio_def_ipa_ctxh_ctrl_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_irq_stts_ee_n_s {
+	u32	bad_snoc_access_irq : 1;
+	u32	reserved0 : 1;
+	u32	uc_irq_0 : 1;
+	u32	uc_irq_1 : 1;
+	u32	uc_irq_2 : 1;
+	u32	uc_irq_3 : 1;
+	u32	uc_in_q_not_empty_irq : 1;
+	u32	uc_rx_cmd_q_not_full_irq : 1;
+	u32	proc_to_uc_ack_q_not_empty_irq : 1;
+	u32	rx_err_irq : 1;
+	u32	deaggr_err_irq : 1;
+	u32	tx_err_irq : 1;
+	u32	step_mode_irq : 1;
+	u32	proc_err_irq : 1;
+	u32	tx_suspend_irq : 1;
+	u32	tx_holb_drop_irq : 1;
+	u32	bam_gsi_idle_irq : 1;
+	u32	pipe_yellow_marker_below_irq : 1;
+	u32	pipe_red_marker_below_irq : 1;
+	u32	pipe_yellow_marker_above_irq : 1;
+	u32	pipe_red_marker_above_irq : 1;
+	u32	ucp_irq : 1;
+	u32	reserved1 : 1;
+	u32	gsi_ee_irq : 1;
+	u32	gsi_ipa_if_tlv_rcvd_irq : 1;
+	u32	gsi_uc_irq : 1;
+	u32	tlv_len_min_dsm_irq : 1;
+	u32	reserved2 : 5;
+};
+union ipa_hwio_def_ipa_irq_stts_ee_n_u {
+	struct ipa_hwio_def_ipa_irq_stts_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_irq_en_ee_n_s {
+	u32	bad_snoc_access_irq_en : 1;
+	u32	reserved0 : 1;
+	u32	uc_irq_0_irq_en : 1;
+	u32	uc_irq_1_irq_en : 1;
+	u32	uc_irq_2_irq_en : 1;
+	u32	uc_irq_3_irq_en : 1;
+	u32	uc_in_q_not_empty_irq_en : 1;
+	u32	uc_rx_cmd_q_not_full_irq_en : 1;
+	u32	proc_to_uc_ack_q_not_empty_irq_en : 1;
+	u32	rx_err_irq_en : 1;
+	u32	deaggr_err_irq_en : 1;
+	u32	tx_err_irq_en : 1;
+	u32	step_mode_irq_en : 1;
+	u32	proc_err_irq_en : 1;
+	u32	tx_suspend_irq_en : 1;
+	u32	tx_holb_drop_irq_en : 1;
+	u32	bam_gsi_idle_irq_en : 1;
+	u32	pipe_yellow_marker_below_irq_en : 1;
+	u32	pipe_red_marker_below_irq_en : 1;
+	u32	pipe_yellow_marker_above_irq_en : 1;
+	u32	pipe_red_marker_above_irq_en : 1;
+	u32	ucp_irq_en : 1;
+	u32	reserved1 : 1;
+	u32	gsi_ee_irq_en : 1;
+	u32	gsi_ipa_if_tlv_rcvd_irq_en : 1;
+	u32	gsi_uc_irq_en : 1;
+	u32	tlv_len_min_dsm_irq_en : 1;
+	u32	reserved2 : 5;
+};
+union ipa_hwio_def_ipa_irq_en_ee_n_u {
+	struct ipa_hwio_def_ipa_irq_en_ee_n_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_snoc_fec_ee_n_s {
+	u32	client : 8;
+	u32	qmb_index : 1;
+	u32	reserved0 : 3;
+	u32	tid : 4;
+	u32	reserved1 : 15;
+	u32	read_not_write : 1;
+};
+union ipa_hwio_def_ipa_snoc_fec_ee_n_u {
+	struct ipa_hwio_def_ipa_snoc_fec_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_fec_addr_ee_n_s {
+	u32 addr : 32;
+};
+union ipa_hwio_def_ipa_fec_addr_ee_n_u {
+	struct ipa_hwio_def_ipa_fec_addr_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_fec_attr_ee_n_s {
+	u32	opcode : 6;
+	u32	error_info : 26;
+};
+union ipa_hwio_def_ipa_fec_attr_ee_n_u {
+	struct ipa_hwio_def_ipa_fec_attr_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_suspend_irq_info_ee_n_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_suspend_irq_info_ee_n_u {
+	struct ipa_hwio_def_ipa_suspend_irq_info_ee_n_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_suspend_irq_en_ee_n_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_suspend_irq_en_ee_n_u {
+	struct ipa_hwio_def_ipa_suspend_irq_en_ee_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_s {
+	u32	reserved0 : 13;
+	u32	endpoints : 18;
+	u32	reserved1 : 1;
+};
+union ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_u {
+	struct ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_addr_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_addr_u {
+	struct ipa_hwio_def_ipa_log_buf_status_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_addr_msb_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_addr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_status_addr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_write_ptr_s {
+	u32 write_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_write_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_status_write_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_write_ptr_msb_s {
+	u32 write_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_write_ptr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_status_write_ptr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_cfg_s {
+	u32	size : 16;
+	u32	enable : 1;
+	u32	reserved0 : 15;
+};
+union ipa_hwio_def_ipa_log_buf_status_cfg_u {
+	struct ipa_hwio_def_ipa_log_buf_status_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_ram_ptr_s {
+	u32	read_ptr : 16;
+	u32	write_ptr : 16;
+};
+union ipa_hwio_def_ipa_log_buf_status_ram_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_status_ram_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_sys_addr_s {
+	u32 addr : 32;
+};
+union ipa_hwio_def_ipa_uc_qmb_sys_addr_u {
+	struct ipa_hwio_def_ipa_uc_qmb_sys_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_sys_addr_msb_s {
+	u32 addr_msb : 32;
+};
+union ipa_hwio_def_ipa_uc_qmb_sys_addr_msb_u {
+	struct ipa_hwio_def_ipa_uc_qmb_sys_addr_msb_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_local_addr_s {
+	u32	addr : 18;
+	u32	reserved0 : 14;
+};
+union ipa_hwio_def_ipa_uc_qmb_local_addr_u {
+	struct ipa_hwio_def_ipa_uc_qmb_local_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_length_s {
+	u32	length : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_uc_qmb_length_u {
+	struct ipa_hwio_def_ipa_uc_qmb_length_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_trigger_s {
+	u32	direction : 1;
+	u32	reserved0 : 3;
+	u32	posting : 2;
+	u32	reserved1 : 26;
+};
+union ipa_hwio_def_ipa_uc_qmb_trigger_u {
+	struct ipa_hwio_def_ipa_uc_qmb_trigger_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_pending_tid_s {
+	u32	tid : 6;
+	u32	reserved0 : 2;
+	u32	error_bus : 1;
+	u32	reserved1 : 3;
+	u32	error_max_os : 1;
+	u32	reserved2 : 3;
+	u32	error_max_comp : 1;
+	u32	reserved3 : 3;
+	u32	error_security : 1;
+	u32	reserved4 : 11;
+};
+union ipa_hwio_def_ipa_uc_qmb_pending_tid_u {
+	struct ipa_hwio_def_ipa_uc_qmb_pending_tid_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_s {
+	u32	tid : 6;
+	u32	reserved0 : 2;
+	u32	error : 1;
+	u32	reserved1 : 3;
+	u32	valid : 1;
+	u32	reserved2 : 19;
+};
+union ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_u {
+	struct ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_s {
+	u32	tid : 6;
+	u32	reserved0 : 2;
+	u32	error : 1;
+	u32	reserved1 : 3;
+	u32	valid : 1;
+	u32	reserved2 : 19;
+};
+union ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_u {
+	struct ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_misc_s {
+	u32	user : 10;
+	u32	reserved0 : 2;
+	u32	rd_priority : 2;
+	u32	reserved1 : 2;
+	u32	wr_priority : 2;
+	u32	reserved2 : 2;
+	u32	ooord : 1;
+	u32	reserved3 : 3;
+	u32	ooowr : 1;
+	u32	reserved4 : 3;
+	u32	swap : 1;
+	u32	irq_coal : 1;
+	u32	posted_stall : 1;
+	u32	qmb_hready_bcr : 1;
+};
+union ipa_hwio_def_ipa_uc_qmb_misc_u {
+	struct ipa_hwio_def_ipa_uc_qmb_misc_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_status_s {
+	u32	max_outstanding_rd : 4;
+	u32	outstanding_rd_cnt : 4;
+	u32	completed_rd_cnt : 4;
+	u32	completed_rd_fifo_full : 1;
+	u32	reserved0 : 3;
+	u32	max_outstanding_wr : 4;
+	u32	outstanding_wr_cnt : 4;
+	u32	completed_wr_cnt : 4;
+	u32	completed_wr_fifo_full : 1;
+	u32	reserved1 : 3;
+};
+union ipa_hwio_def_ipa_uc_qmb_status_u {
+	struct ipa_hwio_def_ipa_uc_qmb_status_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_bus_attrib_s {
+	u32	memtype : 3;
+	u32	reserved0 : 1;
+	u32	noallocate : 1;
+	u32	reserved1 : 3;
+	u32	innershared : 1;
+	u32	reserved2 : 3;
+	u32	shared : 1;
+	u32	reserved3 : 19;
+};
+union ipa_hwio_def_ipa_uc_qmb_bus_attrib_u {
+	struct ipa_hwio_def_ipa_uc_qmb_bus_attrib_s	def;
+	u32						value;
+};
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_pkt_cntxt.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_pkt_cntxt.h
new file mode 100644
index 0000000..ab31a4f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_pkt_cntxt.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_PKT_CNTXT_H_)
+#define _IPA_PKT_CNTXT_H_
+
+#define IPA_HW_PKT_CTNTX_MAX        0x10
+#define IPA_HW_NUM_SAVE_PKT_CTNTX   0x8
+#define IPA_HW_PKT_CTNTX_START_ADDR 0xE434CA00
+#define IPA_HW_PKT_CTNTX_SIZE       (sizeof(ipa_pkt_ctntx_opcode_state_s) + \
+				     sizeof(ipa_pkt_ctntx_u))
+
+/*
+ * Packet Context States
+ */
+enum ipa_hw_pkt_cntxt_state_e {
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_INIT = 1,
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR,
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR_REP,
+	IPA_HW_PKT_CNTXT_STATE_H_DCPH,
+	IPA_HW_PKT_CNTXT_STATE_PKT_PARSER,
+	IPA_HW_PKT_CNTXT_STATE_FILTER_NAT,
+	IPA_HW_PKT_CNTXT_STATE_ROUTER,
+	IPA_HW_PKT_CNTXT_STATE_HDRI,
+	IPA_HW_PKT_CNTXT_STATE_UCP,
+	IPA_HW_PKT_CNTXT_STATE_ENQUEUER,
+	IPA_HW_PKT_CNTXT_STATE_DFETCHER,
+	IPA_HW_PKT_CNTXT_STATE_D_DCPH,
+	IPA_HW_PKT_CNTXT_STATE_DISPATCHER,
+	IPA_HW_PKT_CNTXT_STATE_TX,
+	IPA_HW_PKT_CNTXT_STATE_TX_ZLT,
+	IPA_HW_PKT_CNTXT_STATE_DFETCHER_DMAR,
+	IPA_HW_PKT_CNTXT_STATE_DCMP,
+};
+
+/*
+ * Packet Context fields as received from VI/Design
+ */
+struct ipa_pkt_ctntx_s {
+	u64	opcode                           : 8;
+	u64	state                            : 5;
+	u64	not_used_1                       : 2;
+	u64	tx_pkt_dma_done                  : 1;
+	u64	exc_deagg                        : 1;
+	u64	exc_pkt_version                  : 1;
+	u64	exc_pkt_len                      : 1;
+	u64	exc_threshold                    : 1;
+	u64	exc_sw                           : 1;
+	u64	exc_nat                          : 1;
+	u64	exc_frag_miss                    : 1;
+	u64	filter_bypass                    : 1;
+	u64	router_bypass                    : 1;
+	u64	nat_bypass                       : 1;
+	u64	hdri_bypass                      : 1;
+	u64	dcph_bypass                      : 1;
+	u64	security_credentials_select      : 1;
+	u64	pkt_2nd_pass                     : 1;
+	u64	xlat_bypass                      : 1;
+	u64	dcph_valid                       : 1;
+	u64	ucp_on                           : 1;
+	u64	replication                      : 1;
+	u64	src_status_en                    : 1;
+	u64	dest_status_en                   : 1;
+	u64	frag_status_en                   : 1;
+	u64	eot_dest                         : 1;
+	u64	eot_notif                        : 1;
+	u64	prev_eot_dest                    : 1;
+	u64	src_hdr_len                      : 8;
+	u64	tx_valid_sectors                 : 8;
+	u64	rx_flags                         : 8;
+	u64	rx_packet_length                 : 16;
+	u64	revised_packet_length            : 16;
+	u64	frag_en                          : 1;
+	u64	frag_bypass                      : 1;
+	u64	frag_process                     : 1;
+	u64	notif_pipe                       : 5;
+	u64	src_id                           : 8;
+	u64	tx_pkt_transferred               : 1;
+	u64	src_pipe                         : 5;
+	u64	dest_pipe                        : 5;
+	u64	frag_pipe                        : 5;
+	u64	ihl_offset                       : 8;
+	u64	protocol                         : 8;
+	u64	tos                              : 8;
+	u64	id                               : 16;
+	u64	v6_reserved                      : 4;
+	u64	ff                               : 1;
+	u64	mf                               : 1;
+	u64	pkt_israg                        : 1;
+	u64	tx_holb_timer_overflow           : 1;
+	u64	tx_holb_timer_running            : 1;
+	u64	trnseq_0                         : 3;
+	u64	trnseq_1                         : 3;
+	u64	trnseq_2                         : 3;
+	u64	trnseq_3                         : 3;
+	u64	trnseq_4                         : 3;
+	u64	trnseq_ex_length                 : 8;
+	u64	trnseq_4_length                  : 8;
+	u64	trnseq_4_offset                  : 8;
+	u64	dps_tx_pop_cnt                   : 2;
+	u64	dps_tx_push_cnt                  : 2;
+	u64	vol_ic_dcph_cfg                  : 1;
+	u64	vol_ic_tag_stts                  : 1;
+	u64	vol_ic_pxkt_init_e               : 1;
+	u64	vol_ic_pkt_init                  : 1;
+	u64	tx_holb_counter                  : 32;
+	u64	trnseq_0_length                  : 8;
+	u64	trnseq_0_offset                  : 8;
+	u64	trnseq_1_length                  : 8;
+	u64	trnseq_1_offset                  : 8;
+	u64	trnseq_2_length                  : 8;
+	u64	trnseq_2_offset                  : 8;
+	u64	trnseq_3_length                  : 8;
+	u64	trnseq_3_offset                  : 8;
+	u64	dmar_valid_length                : 16;
+	u64	dcph_valid_length                : 16;
+	u64	frag_hdr_offset                  : 9;
+	u64	ip_payload_offset                : 9;
+	u64	frag_rule                        : 4;
+	u64	frag_table                       : 1;
+	u64	frag_hit                         : 1;
+	u64	data_cmdq_ptr                    : 8;
+	u64	filter_result                    : 6;
+	u64	router_result                    : 6;
+	u64	nat_result                       : 6;
+	u64	hdri_result                      : 6;
+	u64	dcph_result                      : 6;
+	u64	dcph_result_valid                : 1;
+	u32	not_used_2                       : 4;
+	u64	tx_pkt_suspended                 : 1;
+	u64	tx_pkt_dropped                   : 1;
+	u32	not_used_3                       : 3;
+	u64	metadata_valid                   : 1;
+	u64	metadata_type                    : 4;
+	u64	ul_cs_start_diff                 : 9;
+	u64	cs_disable_trlr_vld_bit          : 1;
+	u64	cs_required                      : 1;
+	u64	dest_hdr_len                     : 8;
+	u64	fr_l                             : 1;
+	u64	fl_h                             : 1;
+	u64	fr_g                             : 1;
+	u64	fr_ret                           : 1;
+	u64	fr_rule_id                       : 10;
+	u64	rt_l                             : 1;
+	u64	rt_h                             : 1;
+	u64	rtng_tbl_index                   : 5;
+	u64	rt_match                         : 1;
+	u64	rt_rule_id                       : 10;
+	u64	nat_tbl_index                    : 13;
+	u64	nat_type                         : 2;
+	u64	hdr_l                            : 1;
+	u64	header_offset                    : 10;
+	u64	not_used_4                       : 1;
+	u64	filter_result_valid              : 1;
+	u64	router_result_valid              : 1;
+	u64	nat_result_valid                 : 1;
+	u64	hdri_result_valid                : 1;
+	u64	not_used_5                       : 1;
+	u64	stream_id                        : 8;
+	u64	not_used_6                       : 6;
+	u64	dcph_context_index               : 2;
+	u64	dcph_cfg_size                    : 16;
+	u64	dcph_cfg_count                   : 32;
+	u64	tag_info                         : 48;
+	u64	ucp_cmd_id                       : 16;
+	u64	metadata                         : 32;
+	u64	ucp_cmd_params                   : 32;
+	u64	nat_ip_address                   : 32;
+	u64	nat_ip_cs_diff                   : 16;
+	u64	frag_dest_pipe                   : 5;
+	u64	frag_nat_type                    : 2;
+	u64	fragr_ret                        : 1;
+	u64	frag_protocol                    : 8;
+	u64	src_ip_address                   : 32;
+	u64	dest_ip_address                  : 32;
+	u64	not_used_7                       : 37;
+	u64	frag_hdr_l                       : 1;
+	u64	frag_header_offset               : 10;
+	u64	frag_id                          : 16;
+} __packed;
+
+#endif /* #if !defined(_IPA_PKT_CNTXT_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
new file mode 100644
index 0000000..831f9c8
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
@@ -0,0 +1,1510 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#include "ipa_reg_dump.h"
+#include "ipa_access_control.h"
+
+/* Total size required for test bus */
+#define IPA_MEM_OVERLAY_SIZE     0x66000
+
+/*
+ * The following structure contains a hierarchy of structures that
+ * ultimately leads to a series of leafs. The leafs are structures
+ * containing detailed, bit level, register definitions.
+ */
+static struct regs_save_hierarchy_s ipa_reg_save;
+
+static unsigned int ipa_testbus_mem[IPA_MEM_OVERLAY_SIZE];
+
+/*
+ * The following data structure contains a list of the registers
+ * (whose data are to be copied) and the locations (within
+ * ipa_reg_save above) into which the registers' values need to be
+ * copied.
+ */
+static struct map_src_dst_addr_s ipa_regs_to_save_array[] = {
+	/*
+	 * =====================================================================
+	 * IPA register definitions begin here...
+	 * =====================================================================
+	 */
+
+	/* IPA General Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE,
+			     ipa.gen,
+			     ipa_state),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_RX_ACTIVE,
+			     ipa.gen,
+			     ipa_state_rx_active),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_TX_WRAPPER,
+			     ipa.gen,
+			     ipa_state_tx_wrapper),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_TX0,
+			     ipa.gen,
+			     ipa_state_tx0),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_TX1,
+			     ipa.gen,
+			     ipa_state_tx1),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_AGGR_ACTIVE,
+			     ipa.gen,
+			     ipa_state_aggr_active),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_DFETCHER,
+			     ipa.gen,
+			     ipa_state_dfetcher),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_FETCHER_MASK_0,
+			     ipa.gen,
+			     ipa_state_fetcher_mask_0),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_FETCHER_MASK_1,
+			     ipa.gen,
+			     ipa_state_fetcher_mask_1),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_AOS,
+			     ipa.gen,
+			     ipa_state_gsi_aos),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_IF,
+			     ipa.gen,
+			     ipa_state_gsi_if),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_SKIP,
+			     ipa.gen,
+			     ipa_state_gsi_skip),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_TLV,
+			     ipa.gen,
+			     ipa_state_gsi_tlv),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPL_TIMER_LSB,
+			     ipa.gen,
+			     ipa_dpl_timer_lsb),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPL_TIMER_MSB,
+			     ipa.gen,
+			     ipa_dpl_timer_msb),
+	GEN_SRC_DST_ADDR_MAP(IPA_PROC_IPH_CFG,
+			     ipa.gen,
+			     ipa_proc_iph_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_ROUTE,
+			     ipa.gen,
+			     ipa_route),
+	GEN_SRC_DST_ADDR_MAP(IPA_SPARE_REG_1,
+			     ipa.gen,
+			     ipa_spare_reg_1),
+	GEN_SRC_DST_ADDR_MAP(IPA_SPARE_REG_2,
+			     ipa.gen,
+			     ipa_spare_reg_2),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG,
+			     ipa.gen,
+			     ipa_log),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_CFG,
+			     ipa.gen,
+			     ipa_log_buf_status_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_ADDR,
+			     ipa.gen,
+			     ipa_log_buf_status_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_WRITE_PTR,
+			     ipa.gen,
+			     ipa_log_buf_status_write_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_RAM_PTR,
+			     ipa.gen,
+			     ipa_log_buf_status_ram_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_CFG,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_ADDR,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_WRITE_PTR,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_write_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_RAM_PTR,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_ram_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_DPL_FIFO,
+			     ipa.gen,
+			     ipa_state_dpl_fifo),
+	GEN_SRC_DST_ADDR_MAP(IPA_COMP_HW_VERSION,
+			     ipa.gen,
+			     ipa_comp_hw_version),
+	GEN_SRC_DST_ADDR_MAP(IPA_FILT_ROUT_HASH_EN,
+			     ipa.gen,
+			     ipa_filt_rout_hash_en),
+	GEN_SRC_DST_ADDR_MAP(IPA_FILT_ROUT_HASH_FLUSH,
+			     ipa.gen,
+			     ipa_filt_rout_hash_flush),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_FETCHER,
+			     ipa.gen,
+			     ipa_state_fetcher),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV4_FILTER_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv4_filter_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV6_FILTER_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv6_filter_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV4_ROUTE_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv4_route_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV6_ROUTE_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv6_route_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_BAM_ACTIVATED_PORTS,
+			     ipa.gen,
+			     ipa_bam_activated_ports),
+	GEN_SRC_DST_ADDR_MAP(IPA_TX_COMMANDER_CMDQ_STATUS,
+			     ipa.gen,
+			     ipa_tx_commander_cmdq_status),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_SNIF_EL_EN,
+			     ipa.gen,
+			     ipa_log_buf_hw_snif_el_en),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL,
+			     ipa.gen,
+			     ipa_log_buf_hw_snif_el_wr_n_rd_sel),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX,
+			     ipa.gen,
+			     ipa_log_buf_hw_snif_el_cli_mux),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_ACL,
+			     ipa.gen,
+			     ipa_state_acl),
+	GEN_SRC_DST_ADDR_MAP(IPA_SYS_PKT_PROC_CNTXT_BASE,
+			     ipa.gen,
+			     ipa_sys_pkt_proc_cntxt_base),
+	GEN_SRC_DST_ADDR_MAP(IPA_SYS_PKT_PROC_CNTXT_BASE_MSB,
+			     ipa.gen,
+			     ipa_sys_pkt_proc_cntxt_base_msb),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOCAL_PKT_PROC_CNTXT_BASE,
+			     ipa.gen,
+			     ipa_local_pkt_proc_cntxt_base),
+	GEN_SRC_DST_ADDR_MAP(IPA_RSRC_GRP_CFG,
+			     ipa.gen,
+			     ipa_rsrc_grp_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_PIPELINE_DISABLE,
+			     ipa.gen,
+			     ipa_pipeline_disable),
+	GEN_SRC_DST_ADDR_MAP(IPA_COMP_CFG,
+			     ipa.gen,
+			     ipa_comp_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_NLO_AGGR,
+			     ipa.gen,
+			     ipa_state_nlo_aggr),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_CFG1,
+			     ipa.gen,
+			     ipa_nlo_pp_cfg1),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_CFG2,
+			     ipa.gen,
+			     ipa_nlo_pp_cfg2),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_ACK_LIMIT_CFG,
+			     ipa.gen,
+			     ipa_nlo_pp_ack_limit_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_DATA_LIMIT_CFG,
+			     ipa.gen,
+			     ipa_nlo_pp_data_limit_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_MIN_DSM_CFG,
+			     ipa.gen,
+			     ipa_nlo_min_dsm_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_FLUSH_REQ,
+			     ipa.gen,
+			     ipa_nlo_vp_flush_req),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_FLUSH_COOKIE,
+			     ipa.gen,
+			     ipa_nlo_vp_flush_cookie),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_FLUSH_ACK,
+			     ipa.gen,
+			     ipa_nlo_vp_flush_ack),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_DSM_OPEN,
+			     ipa.gen,
+			     ipa_nlo_vp_dsm_open),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_QBAP_OPEN,
+			     ipa.gen,
+			     ipa_nlo_vp_qbap_open),
+
+	/* Debug Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_DEBUG_DATA,
+			     ipa.dbg,
+			     ipa_debug_data),
+	GEN_SRC_DST_ADDR_MAP(IPA_STEP_MODE_BREAKPOINTS,
+			     ipa.dbg,
+			     ipa_step_mode_breakpoints),
+	GEN_SRC_DST_ADDR_MAP(IPA_STEP_MODE_STATUS,
+			     ipa.dbg,
+			     ipa_step_mode_status),
+
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_CMD_n, ipa_rx_splt_cmdq_cmd_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_CFG_n, ipa_rx_splt_cmdq_cfg_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_0_n, ipa_rx_splt_cmdq_data_wr_0_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_1_n, ipa_rx_splt_cmdq_data_wr_1_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_2_n, ipa_rx_splt_cmdq_data_wr_2_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_3_n, ipa_rx_splt_cmdq_data_wr_3_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_0_n, ipa_rx_splt_cmdq_data_rd_0_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_1_n, ipa_rx_splt_cmdq_data_rd_1_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_2_n, ipa_rx_splt_cmdq_data_rd_2_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_3_n, ipa_rx_splt_cmdq_data_rd_3_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_STATUS_n, ipa_rx_splt_cmdq_status_n),
+
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CFG_WR,
+				  ipa.dbg,
+				  ipa_rx_hps_cmdq_cfg_wr),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CFG_RD,
+				  ipa.dbg,
+				  ipa_rx_hps_cmdq_cfg_rd),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_rx_hps_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_rx_hps_cmdq_status_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
+			     ipa.dbg,
+			     ipa_rx_hps_clients_min_depth_0),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+			     ipa.dbg,
+			     ipa_rx_hps_clients_max_depth_0),
+	GEN_SRC_DST_ADDR_MAP(IPA_HPS_DPS_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_hps_dps_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_HPS_DPS_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_hps_dps_cmdq_status_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPS_TX_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_dps_tx_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPS_TX_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_dps_tx_cmdq_status_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_ACKMNGR_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_ackmngr_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_ACKMNGR_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_ackmngr_cmdq_status_empty),
+
+	/*
+	 * NOTE: That GEN_SRC_DST_ADDR_MAP() not used below.  This is
+	 *       because the following registers are not scaler, rather
+	 *       they are register arrays...
+	 */
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_IRQ_STTS_EE_n,
+				      ipa_irq_stts_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_IRQ_EN_EE_n,
+				      ipa_irq_en_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_FEC_ADDR_EE_n,
+				      ipa_fec_addr_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_FEC_ATTR_EE_n,
+				      ipa_fec_attr_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_SNOC_FEC_EE_n,
+				      ipa_snoc_fec_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_HOLB_DROP_IRQ_INFO_EE_n,
+				      ipa_holb_drop_irq_info_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_SUSPEND_IRQ_INFO_EE_n,
+				      ipa_suspend_irq_info_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_SUSPEND_IRQ_EN_EE_n,
+				      ipa_suspend_irq_en_ee_n),
+
+	/* Pipe Endp Registers */
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_CTRL_n,
+					 ipa_endp_init_ctrl_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_CTRL_SCND_n,
+					 ipa_endp_init_ctrl_scnd_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_CFG_n,
+					 ipa_endp_init_cfg_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_NAT_n,
+					 ipa_endp_init_nat_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_n,
+					 ipa_endp_init_hdr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_EXT_n,
+					 ipa_endp_init_hdr_ext_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+					 ipa_endp_init_hdr_metadata_mask_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_METADATA_n,
+					 ipa_endp_init_hdr_metadata_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_MODE_n,
+					 ipa_endp_init_mode_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_AGGR_n,
+					 ipa_endp_init_aggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+					 ipa_endp_init_hol_block_en_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+					 ipa_endp_init_hol_block_timer_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_DEAGGR_n,
+					 ipa_endp_init_deaggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_STATUS_n,
+					 ipa_endp_status_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_RSRC_GRP_n,
+					 ipa_endp_init_rsrc_grp_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_SEQ_n,
+					 ipa_endp_init_seq_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_GSI_CFG_TLV_n,
+					 ipa_endp_gsi_cfg_tlv_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_GSI_CFG_AOS_n,
+					 ipa_endp_gsi_cfg_aos_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_GSI_CFG1_n,
+					 ipa_endp_gsi_cfg1_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+					 ipa_endp_filter_router_hsh_cfg_n),
+
+	/* Source Resource Group Config Registers */
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					    ipa_src_rsrc_grp_01_rsrc_type_n),
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					    ipa_src_rsrc_grp_23_rsrc_type_n),
+
+	/* Destination Resource Group Config Registers */
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					    ipa_dst_rsrc_grp_01_rsrc_type_n),
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					    ipa_dst_rsrc_grp_23_rsrc_type_n),
+
+	/* Source Resource Group Count Registers */
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_CNT_GRP(
+		IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n,
+		ipa_src_rsrc_grp_0123_rsrc_type_cnt_n),
+
+	/* Destination Resource Group Count Registers */
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_CNT_GRP(
+		IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n,
+		ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n),
+
+	/*
+	 * =====================================================================
+	 * GSI register definitions begin here...
+	 * =====================================================================
+	 */
+
+	/* GSI General Registers */
+	GEN_SRC_DST_ADDR_MAP(GSI_CFG,
+			     gsi.gen,
+			     gsi_cfg),
+	GEN_SRC_DST_ADDR_MAP(GSI_REE_CFG,
+			     gsi.gen,
+			     gsi_ree_cfg),
+	IPA_REG_SAVE_GSI_VER(
+			     IPA_GSI_TOP_GSI_INST_RAM_n,
+			     ipa_gsi_top_gsi_inst_ram_n),
+
+	/* GSI Debug Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_BUSY_REG,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_busy_reg),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_event_pending),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_timer_pending),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_rd_wr_pending),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_pc_from_sw),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_SW_STALL,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_sw_stall),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_pc_for_debug),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_qsb_log_err_trns_id),
+
+	/* GSI IRAM pointers Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_db),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ev_db),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_new_re),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_dis_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_event_gen_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_timer_expired),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_write_eng_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_read_eng_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_uc_gp_int),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_int_mod_stopped),
+
+	/* GSI SHRAM pointers Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr),
+
+	/*
+	 * NOTE: That GEN_SRC_DST_ADDR_MAP() not used below.  This is
+	 *       because the following registers are not scaler, rather
+	 *       they are register arrays...
+	 */
+
+	/* GSI General EE Registers */
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(GSI_MANAGER_EE_QOS_n,
+					      gsi_manager_ee_qos_n),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_GSI_STATUS,
+					      ee_n_gsi_status),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_TYPE_IRQ,
+					      ee_n_cntxt_type_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_TYPE_IRQ_MSK,
+					      ee_n_cntxt_type_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_GSI_CH_IRQ,
+					      ee_n_cntxt_src_gsi_ch_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_EV_CH_IRQ,
+					      ee_n_cntxt_src_ev_ch_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK,
+					      ee_n_cntxt_src_gsi_ch_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_EV_CH_IRQ_MSK,
+					      ee_n_cntxt_src_ev_ch_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_IEOB_IRQ,
+					      ee_n_cntxt_src_ieob_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_IEOB_IRQ_MSK,
+					      ee_n_cntxt_src_ieob_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_GSI_IRQ_STTS,
+					      ee_n_cntxt_gsi_irq_stts),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_GLOB_IRQ_STTS,
+					      ee_n_cntxt_glob_irq_stts),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_ERROR_LOG,
+					      ee_n_error_log),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SCRATCH_0,
+					      ee_n_cntxt_scratch_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SCRATCH_1,
+					      ee_n_cntxt_scratch_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_INTSET,
+					      ee_n_cntxt_intset),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_MSI_BASE_LSB,
+					      ee_n_cntxt_msi_base_lsb),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_MSI_BASE_MSB,
+					      ee_n_cntxt_msi_base_msb),
+
+	/* GSI Channel Context Registers */
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_0,
+					    ee_n_gsi_ch_k_cntxt_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_1,
+					    ee_n_gsi_ch_k_cntxt_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_2,
+					    ee_n_gsi_ch_k_cntxt_2),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_3,
+					    ee_n_gsi_ch_k_cntxt_3),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_4,
+					    ee_n_gsi_ch_k_cntxt_4),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_5,
+					    ee_n_gsi_ch_k_cntxt_5),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_6,
+					    ee_n_gsi_ch_k_cntxt_6),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_7,
+					    ee_n_gsi_ch_k_cntxt_7),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
+					    ee_n_gsi_ch_k_re_fetch_read_ptr),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
+					    ee_n_gsi_ch_k_re_fetch_write_ptr),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_QOS,
+					    ee_n_gsi_ch_k_qos),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_0,
+					    ee_n_gsi_ch_k_scratch_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_1,
+					    ee_n_gsi_ch_k_scratch_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_2,
+					    ee_n_gsi_ch_k_scratch_2),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_3,
+					    ee_n_gsi_ch_k_scratch_3),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(GSI_MAP_EE_n_CH_k_VP_TABLE,
+					    gsi_map_ee_n_ch_k_vp_table),
+
+	/* GSI Channel Event Context Registers */
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_0,
+					     ee_n_ev_ch_k_cntxt_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_1,
+					     ee_n_ev_ch_k_cntxt_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_2,
+					     ee_n_ev_ch_k_cntxt_2),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_3,
+					     ee_n_ev_ch_k_cntxt_3),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_4,
+					     ee_n_ev_ch_k_cntxt_4),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_5,
+					     ee_n_ev_ch_k_cntxt_5),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_6,
+					     ee_n_ev_ch_k_cntxt_6),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_7,
+					     ee_n_ev_ch_k_cntxt_7),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_8,
+					     ee_n_ev_ch_k_cntxt_8),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_9,
+					     ee_n_ev_ch_k_cntxt_9),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_10,
+					     ee_n_ev_ch_k_cntxt_10),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_11,
+					     ee_n_ev_ch_k_cntxt_11),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_12,
+					     ee_n_ev_ch_k_cntxt_12),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_13,
+					     ee_n_ev_ch_k_cntxt_13),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_SCRATCH_0,
+					     ee_n_ev_ch_k_scratch_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_SCRATCH_1,
+					     ee_n_ev_ch_k_scratch_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(GSI_DEBUG_EE_n_EV_k_VP_TABLE,
+					     gsi_debug_ee_n_ev_k_vp_table),
+
+#if defined(CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS) && \
+	CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS > 0
+	/* Endp Registers for remaining pipes */
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_CTRL_n,
+					       ipa_endp_init_ctrl_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_CTRL_SCND_n,
+					       ipa_endp_init_ctrl_scnd_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_CFG_n,
+					       ipa_endp_init_cfg_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_NAT_n,
+					       ipa_endp_init_nat_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HDR_n,
+					       ipa_endp_init_hdr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HDR_EXT_n,
+					       ipa_endp_init_hdr_ext_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA
+		(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+		ipa_endp_init_hdr_metadata_mask_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HDR_METADATA_n,
+					       ipa_endp_init_hdr_metadata_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_MODE_n,
+					       ipa_endp_init_mode_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_AGGR_n,
+					       ipa_endp_init_aggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+					       ipa_endp_init_hol_block_en_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+					       ipa_endp_init_hol_block_timer_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_DEAGGR_n,
+					       ipa_endp_init_deaggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_STATUS_n,
+					       ipa_endp_status_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_RSRC_GRP_n,
+					       ipa_endp_init_rsrc_grp_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_SEQ_n,
+					       ipa_endp_init_seq_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_GSI_CFG_TLV_n,
+					       ipa_endp_gsi_cfg_tlv_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_GSI_CFG_AOS_n,
+					       ipa_endp_gsi_cfg_aos_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_GSI_CFG1_n,
+					       ipa_endp_gsi_cfg1_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA
+		(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		 ipa_endp_filter_router_hsh_cfg_n),
+#endif
+};
+
+/* IPA uC PER registers save Cfg array */
+static struct map_src_dst_addr_s ipa_uc_regs_to_save_array[] = {
+	/* HWP registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_SYS_ADDR,
+			     ipa.hwp,
+			     ipa_uc_qmb_sys_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_LOCAL_ADDR,
+			     ipa.hwp,
+			     ipa_uc_qmb_local_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_LENGTH,
+			     ipa.hwp,
+			     ipa_uc_qmb_length),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_TRIGGER,
+			     ipa.hwp,
+			     ipa_uc_qmb_trigger),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_PENDING_TID,
+			     ipa.hwp,
+			     ipa_uc_qmb_pending_tid),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK,
+			     ipa.hwp,
+			     ipa_uc_qmb_completed_rd_fifo_peek),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK,
+			     ipa.hwp,
+			     ipa_uc_qmb_completed_wr_fifo_peek),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_MISC,
+			     ipa.hwp,
+			     ipa_uc_qmb_misc),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_STATUS,
+			     ipa.hwp,
+			     ipa_uc_qmb_status),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_BUS_ATTRIB,
+			     ipa.hwp,
+			     ipa_uc_qmb_bus_attrib),
+};
+
+static void ipa_hal_save_regs_save_ipa_testbus(void);
+static void ipa_reg_save_gsi_fifo_status(void);
+static void ipa_reg_save_rsrc_cnts(void);
+static void ipa_hal_save_regs_ipa_cmdq(void);
+static void ipa_hal_save_regs_rsrc_db(void);
+static void ipa_reg_save_anomaly_check(void);
+
+static struct reg_access_funcs_s *get_access_funcs(u32 addr)
+{
+	u32 i, asub = ipa3_ctx->sd_state;
+
+	for (i = 0; i < ARRAY_SIZE(mem_access_map); i++) {
+		if (addr >= mem_access_map[i].addr_range_begin &&
+			addr <= mem_access_map[i].addr_range_end) {
+			return mem_access_map[i].access[asub];
+		}
+	}
+
+	IPAERR("Unknown register offset(0x%08X). Using dflt access methods\n",
+		   addr);
+
+	return &io_matrix[AA_COMBO];
+}
+
+static u32 in_dword(
+	u32 addr)
+{
+	struct reg_access_funcs_s *io = get_access_funcs(addr);
+
+	return io->read(ipa3_ctx->reg_collection_base + addr);
+}
+
+static u32 in_dword_masked(
+	u32 addr,
+	u32 mask)
+{
+	struct reg_access_funcs_s *io = get_access_funcs(addr);
+	u32 val;
+
+	val = io->read(ipa3_ctx->reg_collection_base + addr);
+
+	if (io->read == act_read)
+		return val & mask;
+
+	return val;
+}
+
+static void out_dword(
+	u32 addr,
+	u32 val)
+{
+	struct reg_access_funcs_s *io = get_access_funcs(addr);
+
+	io->write(ipa3_ctx->reg_collection_base + addr, val);
+}
+
+/*
+ * FUNCTION:  ipa_save_gsi_ver
+ *
+ * Saves the gsi version
+ *
+ * @return
+ * None
+ */
+void ipa_save_gsi_ver(void)
+{
+	if (!ipa3_ctx->do_register_collection_on_crash)
+		return;
+
+	ipa_reg_save.gsi.fw_ver =
+		IPA_READ_1xVECTOR_REG(IPA_GSI_TOP_GSI_INST_RAM_n, 0);
+}
+
+/*
+ * FUNCTION:  ipa_save_registers
+ *
+ * Saves all the IPA register values which are configured
+ *
+ * @return
+ * None
+ */
+void ipa_save_registers(void)
+{
+	u32 i = 0;
+	/* Fetch the number of registers configured to be saved */
+	u32 num_regs = ARRAY_SIZE(ipa_regs_to_save_array);
+	u32 num_uc_per_regs = ARRAY_SIZE(ipa_uc_regs_to_save_array);
+	union ipa_hwio_def_ipa_rsrc_mngr_db_cfg_u for_cfg;
+	union ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_u for_read;
+
+	if (!ipa3_ctx->do_register_collection_on_crash)
+		return;
+
+	IPAERR("Commencing\n");
+
+	/*
+	 * Remove the GSI FIFO and the endp registers for extra pipes for
+	 * now.  These would be saved later
+	 */
+	num_regs -= (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+		     IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+
+	memset(&for_cfg, 0, sizeof(for_cfg));
+	memset(&for_read, 0, sizeof(for_read));
+
+	/* Now save all the configured registers */
+	for (i = 0; i < num_regs; i++) {
+		/* Copy reg value to our data struct */
+		*(ipa_regs_to_save_array[i].dst_addr) =
+			in_dword(ipa_regs_to_save_array[i].src_addr);
+	}
+
+	/*
+	 * Set the active flag for all active pipe indexed registers.
+	 */
+	for (i = 0; i < IPA_HW_PIPE_ID_MAX; i++)
+		ipa_reg_save.ipa.pipes[i].active = true;
+
+	/* Now save the per endp registers for the remaining pipes */
+	for (i = 0; i < (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+			 IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS); i++) {
+		/* Copy reg value to our data struct */
+		*(ipa_regs_to_save_array[num_regs + i].dst_addr) =
+			in_dword(ipa_regs_to_save_array[num_regs + i].src_addr);
+	}
+
+	IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA_ACTIVE();
+
+	num_regs += (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+		     IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+
+	/* Saving GSI FIFO Status registers */
+	ipa_reg_save_gsi_fifo_status();
+
+	/*
+	 * On targets that support SSR, we generally want to disable
+	 * the following reg save functionality as it may cause stalls
+	 * in IPA after the SSR.
+	 *
+	 * To override this, set do_non_tn_collection_on_crash to
+	 * true, via dtsi, and the collection will be done.
+	 */
+	if (ipa3_ctx->do_non_tn_collection_on_crash) {
+		/* Save all the uC PER configured registers */
+		for (i = 0; i < num_uc_per_regs; i++) {
+			/* Copy reg value to our data struct */
+			*(ipa_uc_regs_to_save_array[i].dst_addr) =
+			    in_dword(ipa_uc_regs_to_save_array[i].src_addr);
+		}
+
+		/* Saving CMD Queue registers */
+		ipa_hal_save_regs_ipa_cmdq();
+
+		/* Collecting resource DB information */
+		ipa_hal_save_regs_rsrc_db();
+
+		/* Save IPA testbus */
+		if (ipa3_ctx->do_testbus_collection_on_crash)
+			ipa_hal_save_regs_save_ipa_testbus();
+	}
+
+	/* GSI test bus and QSB log */
+	for (i = 0;
+	     i < ARRAY_SIZE(ipa_reg_save_gsi_ch_test_bus_selector_array);
+	     i++) {
+		/* Write test bus selector */
+		IPA_WRITE_SCALER_REG(
+			GSI_TEST_BUS_SEL,
+			ipa_reg_save_gsi_ch_test_bus_selector_array[i]);
+
+		ipa_reg_save.gsi.debug.gsi_test_bus.test_bus_reg[
+		    i].gsi_testbus_reg =
+		    (u32) IPA_READ_SCALER_REG(GSI_TEST_BUS_REG);
+	}
+
+	ipa_reg_save_rsrc_cnts();
+
+	for (i = 0; i < HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn + 1; i++)
+		ipa_reg_save.gsi.debug.gsi_mcs_regs.mcs_reg[i].rf_reg =
+			IPA_READ_1xVECTOR_REG(GSI_DEBUG_SW_RF_n_READ, i);
+
+	for (i = 0; i < HWIO_GSI_DEBUG_COUNTERn_MAXn + 1; i++)
+		ipa_reg_save.gsi.debug.gsi_cnt_regs.cnt[i].counter_value =
+			(u16)IPA_READ_1xVECTOR_REG(GSI_DEBUG_COUNTERn, i);
+
+	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7; i++) {
+		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.a7[
+			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
+		u32 n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+
+		if (!ipa_reg_save.gsi.ch_cntxt.a7[
+				i].gsi_map_ee_n_ch_k_vp_table.valid)
+			continue;
+		ipa_reg_save.gsi.ch_cntxt.a7[
+			i].mcs_channel_scratch.scratch4.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 2);
+		ipa_reg_save.gsi.ch_cntxt.a7[
+			i].mcs_channel_scratch.scratch5.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
+	}
+
+	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC; i++) {
+		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.uc[
+			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
+		u32 n = phys_ch_idx*IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+
+		if (!ipa_reg_save.gsi.ch_cntxt.uc[
+				i].gsi_map_ee_n_ch_k_vp_table.valid)
+			continue;
+		ipa_reg_save.gsi.ch_cntxt.uc[
+			i].mcs_channel_scratch.scratch4.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 2);
+		ipa_reg_save.gsi.ch_cntxt.uc[
+			i].mcs_channel_scratch.scratch5.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
+	}
+
+	/*
+	 * On targets that support SSR, we generally want to disable
+	 * the following reg save functionality as it may cause stalls
+	 * in IPA after the SSR.
+	 *
+	 * To override this, set do_non_tn_collection_on_crash to
+	 * true, via dtsi, and the collection will be done.
+	 */
+	if (ipa3_ctx->do_non_tn_collection_on_crash) {
+		u32 ofst = GEN_2xVECTOR_REG_OFST(IPA_CTX_ID_m_CTX_NUM_n, 0, 0);
+		struct reg_access_funcs_s *io = get_access_funcs(ofst);
+		/*
+		 * If the memory is accessible, copy pkt context directly from
+		 * IPA_CTX_ID register space
+		 */
+		if (io->read == act_read) {
+			memcpy((void *)ipa_reg_save.pkt_ctntx,
+				   (const void *)
+				   (ipa3_ctx->reg_collection_base + ofst),
+				   sizeof(ipa_reg_save.pkt_ctntx));
+
+			for_cfg.value =
+				IPA_READ_SCALER_REG(IPA_RSRC_MNGR_DB_CFG);
+
+			for_cfg.def.rsrc_type_sel = 0;
+
+			IPA_MASKED_WRITE_SCALER_REG(
+				IPA_RSRC_MNGR_DB_CFG,
+				for_cfg.value);
+
+			for (i = 0; i < IPA_HW_PKT_CTNTX_MAX; i++) {
+				for_cfg.def.rsrc_id_sel = i;
+
+				IPA_MASKED_WRITE_SCALER_REG(
+					IPA_RSRC_MNGR_DB_CFG,
+					for_cfg.value);
+
+				for_read.value =
+					IPA_READ_SCALER_REG(
+						IPA_RSRC_MNGR_DB_RSRC_READ);
+
+				if (for_read.def.rsrc_occupied) {
+					ipa_reg_save.pkt_ctntx_active[i] = true;
+					ipa_reg_save.pkt_cntxt_state[i] =
+						(enum ipa_hw_pkt_cntxt_state_e)
+						ipa_reg_save.pkt_ctntx[i].state;
+				}
+			}
+		} else {
+			IPAERR("IPA_CTX_ID is not currently accessible\n");
+		}
+	}
+
+	ipa_reg_save_anomaly_check();
+
+	IPAERR("Completed\n");
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_gsi_fifo_status
+ *
+ * This function saves the GSI FIFO Status registers for all endpoints
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_reg_save_gsi_fifo_status(void)
+{
+	union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u gsi_fifo_status_ctrl;
+	u8 i;
+
+	memset(&gsi_fifo_status_ctrl, 0, sizeof(gsi_fifo_status_ctrl));
+
+	for (i = 0; i < IPA_HW_PIPE_ID_MAX; i++) {
+		gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_en = 1;
+		gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_port_sel = i;
+
+		IPA_MASKED_WRITE_SCALER_REG(IPA_GSI_FIFO_STATUS_CTRL,
+				     gsi_fifo_status_ctrl.value);
+
+		ipa_reg_save.gsi_fifo_status[i].gsi_fifo_status_ctrl.value =
+			IPA_READ_SCALER_REG(IPA_GSI_FIFO_STATUS_CTRL);
+		ipa_reg_save.gsi_fifo_status[i].gsi_tlv_fifo_status.value =
+			IPA_READ_SCALER_REG(IPA_GSI_TLV_FIFO_STATUS);
+		ipa_reg_save.gsi_fifo_status[i].gsi_aos_fifo_status.value =
+			IPA_READ_SCALER_REG(IPA_GSI_AOS_FIFO_STATUS);
+	}
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_rsrc_cnts
+ *
+ * This function saves the resource counts for all PCIE and DDR
+ * resource groups.
+ *
+ * @param
+ * @return
+ */
+static void ipa_reg_save_rsrc_cnts(void)
+{
+	union ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_u
+		src_0123_rsrc_cnt;
+	union ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_u
+		dst_0123_rsrc_cnt;
+
+	ipa_reg_save.rsrc_cnts.pcie.resource_group = IPA_HW_PCIE_SRC_RSRP_GRP;
+	ipa_reg_save.rsrc_cnts.ddr.resource_group = IPA_HW_DDR_SRC_RSRP_GRP;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 0);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.pkt_cntxt =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.pkt_cntxt =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 1);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.descriptor_list =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.descriptor_list =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 2);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.data_descriptor_buffer =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.data_descriptor_buffer =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 3);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.hps_dmars =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.hps_dmars =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 4);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.reserved_acks =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.reserved_acks =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	dst_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 0);
+
+	ipa_reg_save.rsrc_cnts.pcie.dst.reserved_sectors =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.dst.reserved_sectors =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_1_cnt;
+
+	dst_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 1);
+
+	ipa_reg_save.rsrc_cnts.pcie.dst.dps_dmars =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.dst.dps_dmars =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_1_cnt;
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_rsrc_cnts_test_bus
+ *
+ * This function saves the resource counts for all PCIE and DDR
+ * resource groups collected from test bus.
+ *
+ * @param
+ *
+ * @return
+ */
+void ipa_reg_save_rsrc_cnts_test_bus(void)
+{
+	int32_t rsrc_type = 0;
+
+	ipa_reg_save.rsrc_cnts.pcie.resource_group = IPA_HW_PCIE_SRC_RSRP_GRP;
+	ipa_reg_save.rsrc_cnts.ddr.resource_group = IPA_HW_DDR_SRC_RSRP_GRP;
+
+	rsrc_type = 0;
+	ipa_reg_save.rsrc_cnts.pcie.src.pkt_cntxt =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.pkt_cntxt =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 1;
+	ipa_reg_save.rsrc_cnts.pcie.src.descriptor_list =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.descriptor_list =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 2;
+	ipa_reg_save.rsrc_cnts.pcie.src.data_descriptor_buffer =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.data_descriptor_buffer =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 3;
+	ipa_reg_save.rsrc_cnts.pcie.src.hps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.hps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 4;
+	ipa_reg_save.rsrc_cnts.pcie.src.reserved_acks =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.reserved_acks =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 5;
+	ipa_reg_save.rsrc_cnts.pcie.dst.reserved_sectors =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_DEST_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.dst.reserved_sectors =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_DEST_RSRP_GRP);
+
+	rsrc_type = 6;
+	ipa_reg_save.rsrc_cnts.pcie.dst.dps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_DEST_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.dst.dps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_DEST_RSRP_GRP);
+}
+
+/*
+ * FUNCTION:  ipa_hal_save_regs_ipa_cmdq
+ *
+ * This function saves the various IPA CMDQ registers
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_hal_save_regs_ipa_cmdq(void)
+{
+	int32_t i;
+	union ipa_hwio_def_ipa_rx_hps_cmdq_cmd_u rx_hps_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_hps_dps_cmdq_cmd_u hps_dps_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_dps_tx_cmdq_cmd_u dps_tx_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_ackmngr_cmdq_cmd_u ackmngr_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_u
+		prod_ackmngr_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_u ntf_tx_cmdq_cmd = { { 0 } };
+
+	/* Save RX_HPS CMDQ   */
+	for (i = 0; i < IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS; i++) {
+		rx_hps_cmdq_cmd.def.rd_req = 0;
+		rx_hps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_RX_HPS_CMDQ_CMD,
+				     rx_hps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_COUNT);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_STATUS);
+		rx_hps_cmdq_cmd.def.rd_req = 1;
+		rx_hps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_RX_HPS_CMDQ_CMD,
+				     rx_hps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_0);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_1_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_1);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_2_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_2);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_3_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_3);
+	}
+
+	/* Save HPS_DPS CMDQ   */
+	for (i = 0; i < IPA_TESTBUS_SEL_EP_MAX + 1; i++) {
+		hps_dps_cmdq_cmd.def.rd_req = 0;
+		hps_dps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_HPS_DPS_CMDQ_CMD,
+				     hps_dps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_hps_dps_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_HPS_DPS_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_hps_dps_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_HPS_DPS_CMDQ_COUNT);
+
+		hps_dps_cmdq_cmd.def.rd_req = 1;
+		hps_dps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_HPS_DPS_CMDQ_CMD,
+				     hps_dps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_hps_dps_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_HPS_DPS_CMDQ_DATA_RD_0);
+	}
+
+	/* Save DPS_TX CMDQ   */
+	for (i = 0; i < IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS; i++) {
+		dps_tx_cmdq_cmd.def.cmd_client = i;
+		dps_tx_cmdq_cmd.def.rd_req = 0;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_DPS_TX_CMDQ_CMD,
+				     dps_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_dps_tx_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_DPS_TX_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_dps_tx_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_DPS_TX_CMDQ_COUNT);
+
+		dps_tx_cmdq_cmd.def.cmd_client = i;
+		dps_tx_cmdq_cmd.def.rd_req = 1;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_DPS_TX_CMDQ_CMD,
+				     dps_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_dps_tx_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_DPS_TX_CMDQ_DATA_RD_0);
+	}
+
+	/* Save ACKMNGR CMDQ   */
+	for (i = 0; i < IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS; i++) {
+		ackmngr_cmdq_cmd.def.rd_req = 0;
+		ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_ACKMNGR_CMDQ_CMD,
+				     ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ackmngr_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_ACKMNGR_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_ackmngr_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_ACKMNGR_CMDQ_COUNT);
+
+		ackmngr_cmdq_cmd.def.rd_req = 1;
+		ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_ACKMNGR_CMDQ_CMD,
+				     ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ackmngr_cmdq_data_rd_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_ACKMNGR_CMDQ_DATA_RD);
+	}
+
+	/* Save PROD ACKMNGR CMDQ   */
+	for (i = 0; i < IPA_TESTBUS_SEL_EP_MAX + 1; i++) {
+		prod_ackmngr_cmdq_cmd.def.rd_req = 0;
+		prod_ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_PROD_ACKMNGR_CMDQ_CMD,
+				     prod_ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_prod_ackmngr_cmdq_status_arr[i].value
+			= IPA_READ_SCALER_REG(
+				IPA_PROD_ACKMNGR_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_prod_ackmngr_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_PROD_ACKMNGR_CMDQ_COUNT);
+		prod_ackmngr_cmdq_cmd.def.rd_req = 1;
+		prod_ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_PROD_ACKMNGR_CMDQ_CMD,
+				     prod_ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_prod_ackmngr_cmdq_data_rd_arr[
+			i].value =
+			IPA_READ_SCALER_REG(
+				IPA_PROD_ACKMNGR_CMDQ_DATA_RD);
+	}
+
+	/* Save NTF_TX CMDQ   */
+	for (i = 0; i < IPA_TESTBUS_SEL_EP_MAX + 1; i++) {
+		ntf_tx_cmdq_cmd.def.rd_req = 0;
+		ntf_tx_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_NTF_TX_CMDQ_CMD,
+				     ntf_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ntf_tx_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_NTF_TX_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_ntf_tx_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_NTF_TX_CMDQ_COUNT);
+		ntf_tx_cmdq_cmd.def.rd_req = 1;
+		ntf_tx_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_NTF_TX_CMDQ_CMD,
+				     ntf_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ntf_tx_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_NTF_TX_CMDQ_DATA_RD_0);
+	}
+}
+
+/*
+ * FUNCTION:  ipa_hal_save_regs_save_ipa_testbus
+ *
+ * This function saves the IPA testbus
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_hal_save_regs_save_ipa_testbus(void)
+{
+	s32 sel_internal, sel_external, sel_ep;
+	union ipa_hwio_def_ipa_testbus_sel_u testbus_sel = { { 0 } };
+
+	if (ipa_reg_save.ipa.testbus == NULL) {
+		/*
+		 * Test-bus structure not allocated - exit test-bus collection
+		 */
+		IPADBG("ipa_reg_save.ipa.testbus was not allocated\n");
+		return;
+	}
+
+	/* Enable Test-bus */
+	testbus_sel.value = 0;
+	testbus_sel.def.testbus_en = true;
+
+	IPA_WRITE_SCALER_REG(IPA_TESTBUS_SEL, testbus_sel.value);
+
+	for (sel_external = 0;
+		 sel_external <= IPA_TESTBUS_SEL_EXTERNAL_MAX;
+		 sel_external++) {
+
+		for (sel_internal = 0;
+			 sel_internal <= IPA_TESTBUS_SEL_INTERNAL_MAX;
+			 sel_internal++) {
+
+			testbus_sel.def.pipe_select = 0;
+			testbus_sel.def.external_block_select =
+				sel_external;
+			testbus_sel.def.internal_block_select =
+				sel_internal;
+
+			IPA_MASKED_WRITE_SCALER_REG(
+				IPA_TESTBUS_SEL,
+				testbus_sel.value);
+
+			ipa_reg_save.ipa.testbus->global.global[
+				sel_internal][sel_external].testbus_sel.value =
+				testbus_sel.value;
+
+			ipa_reg_save.ipa.testbus->global.global[
+				sel_internal][sel_external].testbus_data.value =
+				IPA_READ_SCALER_REG(IPA_DEBUG_DATA);
+		}
+	}
+
+	/* Collect per EP test bus */
+	for (sel_ep = 0;
+		 sel_ep <= IPA_TESTBUS_SEL_EP_MAX;
+		 sel_ep++) {
+
+		for (sel_external = 0;
+			 sel_external <=
+				 IPA_TESTBUS_SEL_EXTERNAL_MAX;
+			 sel_external++) {
+
+			for (sel_internal = 0;
+				 sel_internal <=
+					 IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX;
+				 sel_internal++) {
+
+				testbus_sel.def.pipe_select = sel_ep;
+				testbus_sel.def.external_block_select =
+					sel_external;
+				testbus_sel.def.internal_block_select =
+					sel_internal;
+
+				IPA_MASKED_WRITE_SCALER_REG(
+					IPA_TESTBUS_SEL,
+					testbus_sel.value);
+
+				ipa_reg_save.ipa.testbus->ep[sel_ep].entry_ep[
+					sel_internal][sel_external].
+					testbus_sel.value =
+					testbus_sel.value;
+
+				ipa_reg_save.ipa.testbus->ep[sel_ep].entry_ep[
+					sel_internal][sel_external].
+					testbus_data.value =
+					IPA_READ_SCALER_REG(
+						IPA_DEBUG_DATA);
+			}
+		}
+	}
+
+	/* Disable Test-bus */
+	testbus_sel.value = 0;
+
+	IPA_WRITE_SCALER_REG(
+		IPA_TESTBUS_SEL,
+		testbus_sel.value);
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_init
+ *
+ * This function initializes and memsets the register save struct.
+ *
+ * @param
+ *
+ * @return
+ */
+int ipa_reg_save_init(u32 value)
+{
+	u32 i, num_regs = ARRAY_SIZE(ipa_regs_to_save_array);
+
+	if (!ipa3_ctx->do_register_collection_on_crash)
+		return 0;
+
+	memset(&ipa_reg_save, value, sizeof(ipa_reg_save));
+
+	ipa_reg_save.ipa.testbus = NULL;
+
+	if (ipa3_ctx->do_testbus_collection_on_crash) {
+		ipa_reg_save.ipa.testbus =
+		    (struct ipa_reg_save_ipa_testbus_s *) ipa_testbus_mem;
+	}
+
+	/* setup access for register collection/dump on crash */
+	IPADBG("Mapping 0x%x bytes starting at 0x%x\n",
+	       ipa3_ctx->entire_ipa_block_size,
+	       ipa3_ctx->ipa_wrapper_base);
+
+	ipa3_ctx->reg_collection_base =
+		ioremap(ipa3_ctx->ipa_wrapper_base,
+			ipa3_ctx->entire_ipa_block_size);
+
+	if (!ipa3_ctx->reg_collection_base) {
+		IPAERR(":register collection ioremap err\n");
+		return -EFAULT;
+	}
+
+	num_regs -= (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+		     IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+
+	for (i = 0; i < (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+			 IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS); i++)
+		*(ipa_regs_to_save_array[num_regs + i].dst_addr) = 0x0;
+
+	return 0;
+}
+
+/*
+ * FUNCTION:  ipa_hal_save_regs_rsrc_db
+ *
+ * This function saves the various IPA RSRC_MNGR_DB registers
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_hal_save_regs_rsrc_db(void)
+{
+	u32 rsrc_type = 0;
+	u32 rsrc_id = 0;
+	u32 rsrc_group = 0;
+	union ipa_hwio_def_ipa_rsrc_mngr_db_cfg_u
+		ipa_rsrc_mngr_db_cfg = { { 0 } };
+
+	ipa_rsrc_mngr_db_cfg.def.rsrc_grp_sel = rsrc_group;
+
+	for (rsrc_type = 0; rsrc_type <= IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX;
+	     rsrc_type++) {
+		for (rsrc_id = 0; rsrc_id <= IPA_RSCR_MNGR_DB_RSRC_ID_MAX;
+		     rsrc_id++) {
+			ipa_rsrc_mngr_db_cfg.def.rsrc_id_sel = rsrc_id;
+			ipa_rsrc_mngr_db_cfg.def.rsrc_type_sel = rsrc_type;
+			IPA_MASKED_WRITE_SCALER_REG(IPA_RSRC_MNGR_DB_CFG,
+					     ipa_rsrc_mngr_db_cfg.value);
+			ipa_reg_save.ipa.dbg.ipa_rsrc_mngr_db_rsrc_read_arr
+			    [rsrc_type][rsrc_id].value =
+			    IPA_READ_SCALER_REG(
+					IPA_RSRC_MNGR_DB_RSRC_READ);
+			ipa_reg_save.ipa.dbg.ipa_rsrc_mngr_db_list_read_arr
+			    [rsrc_type][rsrc_id].value =
+			    IPA_READ_SCALER_REG(
+					IPA_RSRC_MNGR_DB_LIST_READ);
+		}
+	}
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_anomaly_check
+ *
+ * Checks RX state and TX state upon crash dump collection and prints
+ * anomalies.
+ *
+ * TBD- Add more anomaly checks in the future.
+ *
+ * @return
+ */
+static void ipa_reg_save_anomaly_check(void)
+{
+	if ((ipa_reg_save.ipa.gen.ipa_state.rx_wait != 0)
+	    || (ipa_reg_save.ipa.gen.ipa_state.rx_idle != 1)) {
+		IPADBG(
+		    "RX ACTIVITY, ipa_state.rx_wait = %d, ipa_state.rx_idle = %d, ipa_state_rx_active.endpoints = %d (bitmask)\n",
+		    ipa_reg_save.ipa.gen.ipa_state.rx_wait,
+		    ipa_reg_save.ipa.gen.ipa_state.rx_idle,
+		    ipa_reg_save.ipa.gen.ipa_state_rx_active.endpoints);
+
+		if (ipa_reg_save.ipa.gen.ipa_state.tx_idle != 1) {
+			IPADBG(
+			    "TX ACTIVITY, ipa_state.idle = %d, ipa_state_tx_wrapper.tx0_idle = %d, ipa_state_tx_wrapper.tx1_idle = %d\n",
+			    ipa_reg_save.ipa.gen.ipa_state.tx_idle,
+			    ipa_reg_save.ipa.gen.ipa_state_tx_wrapper.tx0_idle,
+			    ipa_reg_save.ipa.gen.ipa_state_tx_wrapper.tx1_idle);
+
+			IPADBG(
+			    "ipa_state_tx0.last_cmd_pipe = %d, ipa_state_tx1.last_cmd_pipe = %d\n",
+			    ipa_reg_save.ipa.gen.ipa_state_tx0.last_cmd_pipe,
+			    ipa_reg_save.ipa.gen.ipa_state_tx1.last_cmd_pipe);
+		}
+	}
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
new file mode 100644
index 0000000..8707e9c
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
@@ -0,0 +1,1367 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_REG_DUMP_H_)
+#define _IPA_REG_DUMP_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include "ipa_i.h"
+
+#include "ipa_pkt_cntxt.h"
+#include "ipa_hw_common_ex.h"
+
+/*
+ * The following macros are used to peek and poke register values and
+ * are required by some of the macros and include files that follow...
+ */
+#define my_in_dword(addr) \
+	(readl(addr))
+
+#define my_out_dword(addr, val) \
+	({ __iowmb(); writel_relaxed((val), (addr)); })
+
+#define IPA_0_IPA_WRAPPER_BASE 0 /* required by following includes */
+
+#include "ipa_hwio.h"
+#include "gsi_hwio.h"
+#include "ipa_gcc_hwio.h"
+
+#include "ipa_hwio_def.h"
+#include "gsi_hwio_def.h"
+#include "ipa_gcc_hwio_def.h"
+
+#define IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS     0x6
+#define IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS     0x4
+#define IPA_DEBUG_TESTBUS_RSRC_NUM_EP            7
+#define IPA_DEBUG_TESTBUS_RSRC_NUM_GRP           3
+#define IPA_TESTBUS_SEL_EP_MAX                   0x1F
+#define IPA_TESTBUS_SEL_EXTERNAL_MAX             0x40
+#define IPA_TESTBUS_SEL_INTERNAL_MAX             0xFF
+#define IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX        0x40
+#define IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS     0x9
+#define IPA_RSCR_MNGR_DB_RSRC_ID_MAX             0x3F
+#define IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX           0xA
+
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_ZEROS   (0x0)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_0   (0x1)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_1   (0x2)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_2   (0x3)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_3   (0x4)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_4   (0x5)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_DB_ENG  (0x9)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_0   (0xB)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1   (0xC)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2   (0xD)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3   (0xE)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0   (0x13)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1   (0x14)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2   (0x15)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_3   (0x16)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_4   (0x17)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_5   (0x18)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_0    (0x1B)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_1    (0x1C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_0    (0x1F)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_1    (0x20)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_2    (0x21)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_3    (0x22)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_4    (0x23)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_0  (0x27)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_1  (0x28)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_2  (0x29)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_3  (0x2A)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_0   (0x2B)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_1   (0x2C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_2   (0x2D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_3   (0x2E)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_0 (0x33)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_1 (0x34)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2 (0x35)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3 (0x36)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR     (0x3A)
+
+#define IPA_DEBUG_TESTBUS_DEF_EXTERNAL           50
+#define IPA_DEBUG_TESTBUS_DEF_INTERNAL           6
+
+#define IPA_REG_SAVE_GSI_NUM_EE                  3
+
+#define IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS         22
+
+#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_BIT_MASK 0x7E000
+#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_SHIFT    13
+
+#define IPA_REG_SAVE_HWP_GSI_EE                  2
+
+/*
+ * A structure used to map a source address to destination address...
+ */
+struct map_src_dst_addr_s {
+	u32  src_addr; /* register offset to copy value from */
+	u32 *dst_addr; /* memory address to copy register value to */
+};
+
+/*
+ * A macro to generate the names of scaler (ie. non-vector) registers
+ * that reside in the *hwio.h files (said files contain the manifest
+ * constants for the registers' offsets in the register memory map).
+ */
+#define GEN_SCALER_REG_OFST(reg_name) \
+	(HWIO_ ## reg_name ## _ADDR)
+/*
+ * A macro designed to generate the rmsk associated with reg_name
+ */
+#define GEN_SCALER_REG_RMSK(reg_name) \
+	(HWIO_ ## reg_name ## _RMSK)
+
+/*
+ * A macro to generate the names of vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate access to registers that are
+ * addressed via one dimension.
+ */
+#define GEN_1xVECTOR_REG_OFST(reg_name, row) \
+	(HWIO_ ## reg_name ## _ADDR(row))
+
+/*
+ * A macro to generate the names of vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate access to registers that are
+ * addressed via two dimensions.
+ */
+#define GEN_2xVECTOR_REG_OFST(reg_name, row, col) \
+	(HWIO_ ## reg_name ## _ADDR(row, col))
+
+/*
+ * A macro to generate the access to scaler registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate read access from a scaler
+ * register..
+ */
+#define IPA_READ_SCALER_REG(reg_name) \
+	HWIO_ ## reg_name ## _IN
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate read access from a one
+ * dimensional vector register...
+ */
+#define IPA_READ_1xVECTOR_REG(reg_name, row) \
+	HWIO_ ## reg_name ## _INI(row)
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate read access from a two
+ * dimensional vector register...
+ */
+#define IPA_READ_2xVECTOR_REG(reg_name, row, col) \
+	HWIO_ ## reg_name ## _INI2(row, col)
+
+/*
+ * A macro to generate the access to scaler registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate write access to a scaler
+ * register..
+ */
+#define IPA_WRITE_SCALER_REG(reg_name, val) \
+	HWIO_ ## reg_name ## _OUT(val)
+
+/*
+ * Similar to the above, but with val masked by the register's rmsk...
+ */
+#define IPA_MASKED_WRITE_SCALER_REG(reg_name, val) \
+	out_dword(GEN_SCALER_REG_OFST(reg_name), \
+			  (GEN_SCALER_REG_RMSK(reg_name) & val))
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate write access to a one
+ * dimensional vector register...
+ */
+#define IPA_WRITE_1xVECTOR_REG(reg_name, row, val) \
+	HWIO_ ## reg_name ## _OUTI(row, val)
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate write access to a two
+ * dimensional vector register...
+ */
+#define IPA_WRITE_2xVECTOR_REG(reg_name, row, col, val) \
+	HWIO_ ## reg_name ## _OUTI2(row, col, val)
+
+/*
+ * Macro that helps generate a mapping between a register's address
+ * and where the register's value will get stored (ie. source and
+ * destination address mapping) upon dump...
+ */
+#define GEN_SRC_DST_ADDR_MAP(reg_name, sub_struct, field_name) \
+	{ GEN_SCALER_REG_OFST(reg_name), \
+	  (u32 *)&ipa_reg_save.sub_struct.field_name }
+
+/*
+ * Macro to get value of bits 18:13, used tp get rsrc cnts from
+ * IPA_DEBUG_DATA
+ */
+#define IPA_DEBUG_TESTBUS_DATA_GET_RSRC_CNT_BITS_FROM_DEBUG_DATA(x) \
+	((x & IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_BIT_MASK) >> \
+	 IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_SHIFT)
+
+/*
+ * Macro to get rsrc cnt of specific rsrc type and rsrc grp from test
+ * bus collected data
+ */
+#define IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type, rsrc_grp) \
+	IPA_DEBUG_TESTBUS_DATA_GET_RSRC_CNT_BITS_FROM_DEBUG_DATA( \
+		ipa_reg_save.ipa.testbus->ep_rsrc[rsrc_type].entry_ep \
+		[rsrc_grp].testbus_data.value)
+
+/*
+ * Macro to pluck the gsi version from ram.
+ */
+#define IPA_REG_SAVE_GSI_VER(reg_name, var_name)	\
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.gsi.gen.var_name }
+/*
+ * Macro to define a particular register cfg entry for all 3 EE
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_GEN_EE(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE), \
+		(u32 *)&ipa_reg_save.ipa.gen_ee[IPA_HW_Q6_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE), \
+		(u32 *)&ipa_reg_save.ipa.gen_ee[IPA_HW_A7_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_HWP_EE), \
+		(u32 *)&ipa_reg_save.ipa.gen_ee[IPA_HW_HWP_EE].var_name }
+
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_FIFO(reg_name, var_name, index) \
+	{ GEN_SCALER_REG_OFST(reg_name), \
+		(u32 *)&ipa_reg_save.ipa.gsi_fifo_status[index].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all pipe
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(reg_name, var_name) \
+	{ 0, 0 }
+
+/*
+ * Macro to define a particular register cfg entry for all resource
+ * group register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[2].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[3].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[4].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[5].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[6].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[7].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all resource
+ * group register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_grp[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_grp[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_grp[2].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all source
+ * resource group count register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_CNT_GRP(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[2].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[3].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[4].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[5].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[6].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[7].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all dest
+ * resource group count register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_CNT_GRP(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_cnt[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_cnt[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_cnt[2].var_name }
+
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE), \
+		(u32 *)&ipa_reg_save.gsi.gen_ee[IPA_HW_A7_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE), \
+		(u32 *)&ipa_reg_save.gsi.gen_ee[IPA_HW_Q6_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE), \
+		(u32 *)&ipa_reg_save.gsi.gen_ee[IPA_REG_SAVE_HWP_GSI_EE].\
+			var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all GSI EE
+ * register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(reg_name, var_name) \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[1].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[2].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[3].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 4), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[4].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 5), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[5].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 6), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[6].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 7), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[7].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 8), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[8].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 9), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[9].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 10), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[10].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 11), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[11].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 12), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[12].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 13), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[13].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 14), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[14].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 15), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[15].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 16), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[16].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 17), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[17].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[18].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[1].var_name }
+
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(reg_name, var_name) \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[1].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[2].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[3].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 4), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[4].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 5), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[5].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 6), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[6].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 7), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[7].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 8), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[8].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 9), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[9].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 10), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[10].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 11), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[11].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 12), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[12].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 13), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[13].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 14), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[14].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 15), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[15].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 16), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[16].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 17), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[17].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[18].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[0].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for GSI QSB debug
+ * registers
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_QSB_DEBUG(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[0] }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[1] }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[2] }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[3] }
+
+#define IPA_REG_SAVE_RX_SPLT_CMDQ(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[0]}, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[1]}, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[2]}, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[3]}
+
+/*
+ * IPA HW Platform Type
+ */
+enum ipa_hw_ee_e {
+	IPA_HW_A7_EE  = 0, /* A7's execution environment */
+	IPA_HW_Q6_EE  = 1, /* Q6's execution environment */
+	IPA_HW_HWP_EE = 3, /* HWP's execution environment */
+	IPA_HW_EE_MAX,     /* Max EE to support */
+};
+
+/*
+ * General IPA register save data struct (ie. this is where register
+ * values, once read, get placed...
+ */
+struct ipa_gen_regs_s {
+	struct ipa_hwio_def_ipa_state_s
+	  ipa_state;
+	struct ipa_hwio_def_ipa_state_rx_active_s
+	  ipa_state_rx_active;
+	struct ipa_hwio_def_ipa_state_tx_wrapper_s
+	  ipa_state_tx_wrapper;
+	struct ipa_hwio_def_ipa_state_tx0_s
+	  ipa_state_tx0;
+	struct ipa_hwio_def_ipa_state_tx1_s
+	  ipa_state_tx1;
+	struct ipa_hwio_def_ipa_state_aggr_active_s
+	  ipa_state_aggr_active;
+	struct ipa_hwio_def_ipa_state_dfetcher_s
+	  ipa_state_dfetcher;
+	struct ipa_hwio_def_ipa_state_fetcher_mask_0_s
+	  ipa_state_fetcher_mask_0;
+	struct ipa_hwio_def_ipa_state_fetcher_mask_1_s
+	  ipa_state_fetcher_mask_1;
+	struct ipa_hwio_def_ipa_state_gsi_aos_s
+	  ipa_state_gsi_aos;
+	struct ipa_hwio_def_ipa_state_gsi_if_s
+	  ipa_state_gsi_if;
+	struct ipa_hwio_def_ipa_state_gsi_skip_s
+	  ipa_state_gsi_skip;
+	struct ipa_hwio_def_ipa_state_gsi_tlv_s
+	  ipa_state_gsi_tlv;
+	struct ipa_hwio_def_ipa_dpl_timer_lsb_s
+	  ipa_dpl_timer_lsb;
+	struct ipa_hwio_def_ipa_dpl_timer_msb_s
+	  ipa_dpl_timer_msb;
+	struct ipa_hwio_def_ipa_proc_iph_cfg_s
+	  ipa_proc_iph_cfg;
+	struct ipa_hwio_def_ipa_route_s
+	  ipa_route;
+	struct ipa_hwio_def_ipa_spare_reg_1_s
+	  ipa_spare_reg_1;
+	struct ipa_hwio_def_ipa_spare_reg_2_s
+	  ipa_spare_reg_2;
+	struct ipa_hwio_def_ipa_log_s
+	  ipa_log;
+	struct ipa_hwio_def_ipa_log_buf_status_cfg_s
+	  ipa_log_buf_status_cfg;
+	struct ipa_hwio_def_ipa_log_buf_status_addr_s
+	  ipa_log_buf_status_addr;
+	struct ipa_hwio_def_ipa_log_buf_status_write_ptr_s
+	  ipa_log_buf_status_write_ptr;
+	struct ipa_hwio_def_ipa_log_buf_status_ram_ptr_s
+	  ipa_log_buf_status_ram_ptr;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_s
+	  ipa_log_buf_hw_cmd_cfg;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_s
+	  ipa_log_buf_hw_cmd_addr;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_s
+	  ipa_log_buf_hw_cmd_write_ptr;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_s
+	  ipa_log_buf_hw_cmd_ram_ptr;
+	struct ipa_hwio_def_ipa_comp_hw_version_s
+	  ipa_comp_hw_version;
+	struct ipa_hwio_def_ipa_filt_rout_hash_en_s
+	  ipa_filt_rout_hash_en;
+	struct ipa_hwio_def_ipa_filt_rout_hash_flush_s
+	  ipa_filt_rout_hash_flush;
+	struct ipa_hwio_def_ipa_state_fetcher_s
+	  ipa_state_fetcher;
+	struct ipa_hwio_def_ipa_ipv4_filter_init_values_s
+	  ipa_ipv4_filter_init_values;
+	struct ipa_hwio_def_ipa_ipv6_filter_init_values_s
+	  ipa_ipv6_filter_init_values;
+	struct ipa_hwio_def_ipa_ipv4_route_init_values_s
+	  ipa_ipv4_route_init_values;
+	struct ipa_hwio_def_ipa_ipv6_route_init_values_s
+	  ipa_ipv6_route_init_values;
+	struct ipa_hwio_def_ipa_bam_activated_ports_s
+	  ipa_bam_activated_ports;
+	struct ipa_hwio_def_ipa_tx_commander_cmdq_status_s
+	  ipa_tx_commander_cmdq_status;
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_en_s
+	  ipa_log_buf_hw_snif_el_en;
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_s
+	  ipa_log_buf_hw_snif_el_wr_n_rd_sel;
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_s
+	  ipa_log_buf_hw_snif_el_cli_mux;
+	struct ipa_hwio_def_ipa_state_acl_s
+	  ipa_state_acl;
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_s
+	  ipa_sys_pkt_proc_cntxt_base;
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_s
+	  ipa_sys_pkt_proc_cntxt_base_msb;
+	struct ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_s
+	  ipa_local_pkt_proc_cntxt_base;
+	struct ipa_hwio_def_ipa_rsrc_grp_cfg_s
+	  ipa_rsrc_grp_cfg;
+	struct ipa_hwio_def_ipa_comp_cfg_s
+	  ipa_comp_cfg;
+	struct ipa_hwio_def_ipa_state_dpl_fifo_s
+	  ipa_state_dpl_fifo;
+	struct ipa_hwio_def_ipa_pipeline_disable_s
+	  ipa_pipeline_disable;
+	struct ipa_hwio_def_ipa_state_nlo_aggr_s
+	  ipa_state_nlo_aggr;
+	struct ipa_hwio_def_ipa_nlo_pp_cfg1_s
+	  ipa_nlo_pp_cfg1;
+	struct ipa_hwio_def_ipa_nlo_pp_cfg2_s
+	  ipa_nlo_pp_cfg2;
+	struct ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_s
+	  ipa_nlo_pp_ack_limit_cfg;
+	struct ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_s
+	  ipa_nlo_pp_data_limit_cfg;
+	struct ipa_hwio_def_ipa_nlo_min_dsm_cfg_s
+	  ipa_nlo_min_dsm_cfg;
+	struct ipa_hwio_def_ipa_nlo_vp_flush_req_s
+	  ipa_nlo_vp_flush_req;
+	struct ipa_hwio_def_ipa_nlo_vp_flush_cookie_s
+	  ipa_nlo_vp_flush_cookie;
+	struct ipa_hwio_def_ipa_nlo_vp_flush_ack_s
+	  ipa_nlo_vp_flush_ack;
+	struct ipa_hwio_def_ipa_nlo_vp_dsm_open_s
+	  ipa_nlo_vp_dsm_open;
+	struct ipa_hwio_def_ipa_nlo_vp_qbap_open_s
+	  ipa_nlo_vp_qbap_open;
+};
+
+/*
+ * General IPA register save data struct
+ */
+struct ipa_reg_save_gen_ee_s {
+	struct ipa_hwio_def_ipa_irq_stts_ee_n_s
+	  ipa_irq_stts_ee_n;
+	struct ipa_hwio_def_ipa_irq_en_ee_n_s
+	  ipa_irq_en_ee_n;
+	struct ipa_hwio_def_ipa_fec_addr_ee_n_s
+	  ipa_fec_addr_ee_n;
+	struct ipa_hwio_def_ipa_fec_attr_ee_n_s
+	  ipa_fec_attr_ee_n;
+	struct ipa_hwio_def_ipa_snoc_fec_ee_n_s
+	  ipa_snoc_fec_ee_n;
+	struct ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_s
+	  ipa_holb_drop_irq_info_ee_n;
+	struct ipa_hwio_def_ipa_suspend_irq_info_ee_n_s
+	  ipa_suspend_irq_info_ee_n;
+	struct ipa_hwio_def_ipa_suspend_irq_en_ee_n_s
+	  ipa_suspend_irq_en_ee_n;
+};
+
+/*
+ * Pipe Endp IPA register save data struct
+ */
+struct ipa_reg_save_pipe_endp_s {
+	struct ipa_hwio_def_ipa_endp_init_ctrl_n_s
+	  ipa_endp_init_ctrl_n;
+	struct ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_s
+	  ipa_endp_init_ctrl_scnd_n;
+	struct ipa_hwio_def_ipa_endp_init_cfg_n_s
+	  ipa_endp_init_cfg_n;
+	struct ipa_hwio_def_ipa_endp_init_nat_n_s
+	  ipa_endp_init_nat_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_n_s
+	  ipa_endp_init_hdr_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_ext_n_s
+	  ipa_endp_init_hdr_ext_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_s
+	  ipa_endp_init_hdr_metadata_mask_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_n_s
+	  ipa_endp_init_hdr_metadata_n;
+	struct ipa_hwio_def_ipa_endp_init_mode_n_s
+	  ipa_endp_init_mode_n;
+	struct ipa_hwio_def_ipa_endp_init_aggr_n_s
+	  ipa_endp_init_aggr_n;
+	struct ipa_hwio_def_ipa_endp_init_hol_block_en_n_s
+	  ipa_endp_init_hol_block_en_n;
+	struct ipa_hwio_def_ipa_endp_init_hol_block_timer_n_s
+	  ipa_endp_init_hol_block_timer_n;
+	struct ipa_hwio_def_ipa_endp_init_deaggr_n_s
+	  ipa_endp_init_deaggr_n;
+	struct ipa_hwio_def_ipa_endp_status_n_s
+	  ipa_endp_status_n;
+	struct ipa_hwio_def_ipa_endp_init_rsrc_grp_n_s
+	  ipa_endp_init_rsrc_grp_n;
+	struct ipa_hwio_def_ipa_endp_init_seq_n_s
+	  ipa_endp_init_seq_n;
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_s
+	  ipa_endp_gsi_cfg_tlv_n;
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_s
+	  ipa_endp_gsi_cfg_aos_n;
+	struct ipa_hwio_def_ipa_endp_gsi_cfg1_n_s
+	  ipa_endp_gsi_cfg1_n;
+	struct ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_s
+	  ipa_endp_filter_router_hsh_cfg_n;
+};
+
+/*
+ * Pipe IPA register save data struct
+ */
+struct ipa_reg_save_pipe_s {
+	u8				active;
+	struct ipa_reg_save_pipe_endp_s endp;
+};
+
+/*
+ * HWP IPA register save data struct
+ */
+struct ipa_reg_save_hwp_s {
+	struct ipa_hwio_def_ipa_uc_qmb_sys_addr_s
+	  ipa_uc_qmb_sys_addr;
+	struct ipa_hwio_def_ipa_uc_qmb_local_addr_s
+	  ipa_uc_qmb_local_addr;
+	struct ipa_hwio_def_ipa_uc_qmb_length_s
+	  ipa_uc_qmb_length;
+	struct ipa_hwio_def_ipa_uc_qmb_trigger_s
+	  ipa_uc_qmb_trigger;
+	struct ipa_hwio_def_ipa_uc_qmb_pending_tid_s
+	  ipa_uc_qmb_pending_tid;
+	struct ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_s
+	  ipa_uc_qmb_completed_rd_fifo_peek;
+	struct ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_s
+	  ipa_uc_qmb_completed_wr_fifo_peek;
+	struct ipa_hwio_def_ipa_uc_qmb_misc_s
+	  ipa_uc_qmb_misc;
+	struct ipa_hwio_def_ipa_uc_qmb_status_s
+	  ipa_uc_qmb_status;
+	struct ipa_hwio_def_ipa_uc_qmb_bus_attrib_s
+	  ipa_uc_qmb_bus_attrib;
+};
+
+/*
+ * IPA TESTBUS entry struct
+ */
+struct ipa_reg_save_ipa_testbus_entry_s {
+	union ipa_hwio_def_ipa_testbus_sel_u testbus_sel;
+	union ipa_hwio_def_ipa_debug_data_u testbus_data;
+};
+
+/* IPA TESTBUS global struct */
+struct ipa_reg_save_ipa_testbus_global_s {
+	struct ipa_reg_save_ipa_testbus_entry_s
+	global[IPA_TESTBUS_SEL_INTERNAL_MAX + 1]
+	[IPA_TESTBUS_SEL_EXTERNAL_MAX + 1];
+};
+
+/* IPA TESTBUS per EP struct */
+struct ipa_reg_save_ipa_testbus_ep_s {
+	struct ipa_reg_save_ipa_testbus_entry_s
+	entry_ep[IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX + 1]
+	[IPA_TESTBUS_SEL_EXTERNAL_MAX + 1];
+};
+
+/* IPA TESTBUS per EP struct */
+struct ipa_reg_save_ipa_testbus_ep_rsrc_s {
+	struct ipa_reg_save_ipa_testbus_entry_s
+	  entry_ep[IPA_DEBUG_TESTBUS_RSRC_NUM_GRP];
+};
+
+/* IPA TESTBUS save data struct */
+struct ipa_reg_save_ipa_testbus_s {
+	struct ipa_reg_save_ipa_testbus_global_s global;
+	struct ipa_reg_save_ipa_testbus_ep_s
+	  ep[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_reg_save_ipa_testbus_ep_rsrc_s
+	  ep_rsrc[IPA_DEBUG_TESTBUS_RSRC_NUM_EP];
+};
+
+/*
+ * Debug IPA register save data struct
+ */
+struct ipa_reg_save_dbg_s {
+	struct ipa_hwio_def_ipa_debug_data_s
+	  ipa_debug_data;
+	struct ipa_hwio_def_ipa_step_mode_status_s
+	  ipa_step_mode_status;
+	struct ipa_hwio_def_ipa_step_mode_breakpoints_s
+	  ipa_step_mode_breakpoints;
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_s
+	  ipa_rx_splt_cmdq_cmd_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_s
+	 ipa_rx_splt_cmdq_cfg_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_s
+	  ipa_rx_splt_cmdq_data_wr_0_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_s
+	  ipa_rx_splt_cmdq_data_wr_1_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_s
+	  ipa_rx_splt_cmdq_data_wr_2_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_s
+	  ipa_rx_splt_cmdq_data_wr_3_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_s
+	  ipa_rx_splt_cmdq_data_rd_0_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_s
+	  ipa_rx_splt_cmdq_data_rd_1_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_s
+	  ipa_rx_splt_cmdq_data_rd_2_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_s
+	  ipa_rx_splt_cmdq_data_rd_3_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s
+	  ipa_rx_splt_cmdq_status_n[IPA_RX_SPLT_CMDQ_MAX];
+
+	union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_u
+		ipa_rx_hps_cmdq_cfg_wr;
+	union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_u
+		ipa_rx_hps_cmdq_cfg_rd;
+
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s
+	  ipa_rx_hps_cmdq_cmd;
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_u
+		ipa_rx_hps_cmdq_data_rd_0_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_u
+		ipa_rx_hps_cmdq_data_rd_1_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_u
+		ipa_rx_hps_cmdq_data_rd_2_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_u
+		ipa_rx_hps_cmdq_data_rd_3_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_count_u
+	  ipa_rx_hps_cmdq_count_arr[IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_status_u
+	  ipa_rx_hps_cmdq_status_arr[IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_s
+	  ipa_rx_hps_cmdq_status_empty;
+	struct ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_s
+	  ipa_rx_hps_clients_min_depth_0;
+	struct ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_s
+	  ipa_rx_hps_clients_max_depth_0;
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_cmd_s
+	  ipa_hps_dps_cmdq_cmd;
+	union ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_u
+		ipa_hps_dps_cmdq_data_rd_0_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_hps_dps_cmdq_count_u
+		ipa_hps_dps_cmdq_count_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_hps_dps_cmdq_status_u
+		ipa_hps_dps_cmdq_status_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_s
+	  ipa_hps_dps_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_cmd_s
+	  ipa_dps_tx_cmdq_cmd;
+	union ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_u
+		ipa_dps_tx_cmdq_data_rd_0_arr[
+		IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_dps_tx_cmdq_count_u
+		ipa_dps_tx_cmdq_count_arr[IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_dps_tx_cmdq_status_u
+	ipa_dps_tx_cmdq_status_arr[IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS];
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_s
+	  ipa_dps_tx_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_cmd_s
+	  ipa_ackmngr_cmdq_cmd;
+	union ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_u
+		ipa_ackmngr_cmdq_data_rd_arr[
+		IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_ackmngr_cmdq_count_u
+	  ipa_ackmngr_cmdq_count_arr[IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_ackmngr_cmdq_status_u
+		ipa_ackmngr_cmdq_status_arr[
+		IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS];
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_s
+	  ipa_ackmngr_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_s
+	  ipa_prod_ackmngr_cmdq_cmd;
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_u
+		ipa_prod_ackmngr_cmdq_data_rd_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_u
+		ipa_prod_ackmngr_cmdq_count_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_u
+		ipa_prod_ackmngr_cmdq_status_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_s
+	  ipa_prod_ackmngr_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_s
+	  ipa_ntf_tx_cmdq_cmd;
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_u
+		ipa_ntf_tx_cmdq_data_rd_0_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_count_u
+		ipa_ntf_tx_cmdq_count_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_status_u
+		ipa_ntf_tx_cmdq_status_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_s
+	  ipa_ntf_tx_cmdq_status_empty;
+
+	union ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_u
+		ipa_rsrc_mngr_db_rsrc_read_arr[IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX +
+					       1][IPA_RSCR_MNGR_DB_RSRC_ID_MAX
+						  + 1];
+	union ipa_hwio_def_ipa_rsrc_mngr_db_list_read_u
+		ipa_rsrc_mngr_db_list_read_arr[IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX +
+					       1][IPA_RSCR_MNGR_DB_RSRC_ID_MAX
+						  + 1];
+};
+
+/* Source Resource Group IPA register save data struct */
+struct ipa_reg_save_src_rsrc_grp_s {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_s
+	  ipa_src_rsrc_grp_01_rsrc_type_n;
+	struct ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_s
+	  ipa_src_rsrc_grp_23_rsrc_type_n;
+};
+
+/* Source Resource Group IPA register save data struct */
+struct ipa_reg_save_dst_rsrc_grp_s {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_s
+	  ipa_dst_rsrc_grp_01_rsrc_type_n;
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_s
+	  ipa_dst_rsrc_grp_23_rsrc_type_n;
+};
+
+/* Source Resource Group Count IPA register save data struct */
+struct ipa_reg_save_src_rsrc_cnt_s {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_s
+	  ipa_src_rsrc_grp_0123_rsrc_type_cnt_n;
+};
+
+/* Destination Resource Group Count IPA register save data struct */
+struct ipa_reg_save_dst_rsrc_cnt_s {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_s
+	  ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n;
+};
+
+/* GSI General register save data struct */
+struct ipa_reg_save_gsi_gen_s {
+	struct gsi_hwio_def_gsi_cfg_s
+	  gsi_cfg;
+	struct gsi_hwio_def_gsi_ree_cfg_s
+	  gsi_ree_cfg;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_s
+	  ipa_gsi_top_gsi_inst_ram_n;
+};
+
+/* GSI General EE register save data struct */
+struct ipa_reg_save_gsi_gen_ee_s {
+	struct gsi_hwio_def_gsi_manager_ee_qos_n_s
+	  gsi_manager_ee_qos_n;
+	struct gsi_hwio_def_ee_n_gsi_status_s
+	  ee_n_gsi_status;
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_s
+	  ee_n_cntxt_type_irq;
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s
+	  ee_n_cntxt_type_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s
+	  ee_n_cntxt_src_gsi_ch_irq;
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s
+	  ee_n_cntxt_src_ev_ch_irq;
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s
+	  ee_n_cntxt_src_gsi_ch_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s
+	  ee_n_cntxt_src_ev_ch_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s
+	  ee_n_cntxt_src_ieob_irq;
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s
+	  ee_n_cntxt_src_ieob_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s
+	  ee_n_cntxt_gsi_irq_stts;
+	struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s
+	  ee_n_cntxt_glob_irq_stts;
+	struct gsi_hwio_def_ee_n_error_log_s
+	  ee_n_error_log;
+	struct gsi_hwio_def_ee_n_cntxt_scratch_0_s
+	  ee_n_cntxt_scratch_0;
+	struct gsi_hwio_def_ee_n_cntxt_scratch_1_s
+	  ee_n_cntxt_scratch_1;
+	struct gsi_hwio_def_ee_n_cntxt_intset_s
+	  ee_n_cntxt_intset;
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s
+	  ee_n_cntxt_msi_base_lsb;
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s
+	  ee_n_cntxt_msi_base_msb;
+};
+
+static u32 ipa_reg_save_gsi_ch_test_bus_selector_array[] = {
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_ZEROS,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_DB_ENG,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_5,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR,
+};
+
+/*
+ * GSI QSB debug bus register save data struct
+ */
+struct ipa_reg_save_gsi_test_bus_s {
+	struct
+	  gsi_hwio_def_gsi_test_bus_reg_s
+	  test_bus_reg[ARRAY_SIZE(ipa_reg_save_gsi_ch_test_bus_selector_array)];
+};
+
+/* GSI debug MCS registers save data struct */
+struct ipa_reg_save_gsi_mcs_regs_s {
+	struct
+	  gsi_hwio_def_gsi_debug_sw_rf_n_read_s
+		mcs_reg[HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn + 1];
+};
+
+/* GSI debug counters save data struct */
+struct ipa_reg_save_gsi_debug_cnt_s {
+	struct
+	  gsi_hwio_def_gsi_debug_countern_s
+		cnt[HWIO_GSI_DEBUG_COUNTERn_MAXn + 1];
+};
+
+/* GSI IRAM pointers (IEP) save data struct */
+struct ipa_reg_save_gsi_iram_ptr_regs_s {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_cmd;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_s
+	  ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_db;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_s
+	  ipa_gsi_top_gsi_iram_ptr_ev_db;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_s
+	  ipa_gsi_top_gsi_iram_ptr_new_re;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_dis_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_empty;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_event_gen_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_s
+	  ipa_gsi_top_gsi_iram_ptr_timer_expired;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_write_eng_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_read_eng_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_s
+	  ipa_gsi_top_gsi_iram_ptr_uc_gp_int;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_s
+	  ipa_gsi_top_gsi_iram_ptr_int_mod_stopped;
+};
+
+/* GSI SHRAM pointers save data struct */
+struct ipa_reg_save_gsi_shram_ptr_regs_s {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr;
+};
+
+/* GSI debug register save data struct */
+struct ipa_reg_save_gsi_debug_s {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_s
+	  ipa_gsi_top_gsi_debug_busy_reg;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_s
+	  ipa_gsi_top_gsi_debug_event_pending;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_s
+	  ipa_gsi_top_gsi_debug_timer_pending;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_s
+	  ipa_gsi_top_gsi_debug_rd_wr_pending;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_s
+	  ipa_gsi_top_gsi_debug_pc_from_sw;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_s
+	  ipa_gsi_top_gsi_debug_sw_stall;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_s
+	  ipa_gsi_top_gsi_debug_pc_for_debug;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s
+	  ipa_gsi_top_gsi_debug_qsb_log_err_trns_id;
+	struct ipa_reg_save_gsi_test_bus_s		gsi_test_bus;
+	struct ipa_reg_save_gsi_mcs_regs_s		gsi_mcs_regs;
+	struct ipa_reg_save_gsi_debug_cnt_s		gsi_cnt_regs;
+	struct ipa_reg_save_gsi_iram_ptr_regs_s		gsi_iram_ptrs;
+	struct ipa_reg_save_gsi_shram_ptr_regs_s	gsi_shram_ptrs;
+};
+
+/* GSI MCS channel scratch registers save data struct */
+struct ipa_reg_save_gsi_mcs_channel_scratch_regs_s {
+	struct gsi_hwio_def_gsi_shram_n_s
+	  scratch4;
+	struct gsi_hwio_def_gsi_shram_n_s
+	  scratch5;
+};
+
+/* GSI Channel Context register save data struct */
+struct ipa_reg_save_gsi_ch_cntxt_per_ep_s {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s
+	  ee_n_gsi_ch_k_cntxt_0;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s
+	  ee_n_gsi_ch_k_cntxt_1;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s
+	  ee_n_gsi_ch_k_cntxt_2;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s
+	  ee_n_gsi_ch_k_cntxt_3;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s
+	  ee_n_gsi_ch_k_cntxt_4;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s
+	  ee_n_gsi_ch_k_cntxt_5;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s
+	  ee_n_gsi_ch_k_cntxt_6;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s
+	  ee_n_gsi_ch_k_cntxt_7;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s
+	  ee_n_gsi_ch_k_re_fetch_read_ptr;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s
+	  ee_n_gsi_ch_k_re_fetch_write_ptr;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s
+	  ee_n_gsi_ch_k_qos;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s
+	  ee_n_gsi_ch_k_scratch_0;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s
+	  ee_n_gsi_ch_k_scratch_1;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s
+	  ee_n_gsi_ch_k_scratch_2;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s
+	  ee_n_gsi_ch_k_scratch_3;
+	struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s
+	  gsi_map_ee_n_ch_k_vp_table;
+	struct ipa_reg_save_gsi_mcs_channel_scratch_regs_s mcs_channel_scratch;
+};
+
+/* GSI Event Context register save data struct */
+struct ipa_reg_save_gsi_evt_cntxt_per_ep_s {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s
+	  ee_n_ev_ch_k_cntxt_0;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s
+	  ee_n_ev_ch_k_cntxt_1;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s
+	  ee_n_ev_ch_k_cntxt_2;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s
+	  ee_n_ev_ch_k_cntxt_3;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s
+	  ee_n_ev_ch_k_cntxt_4;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s
+	  ee_n_ev_ch_k_cntxt_5;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s
+	  ee_n_ev_ch_k_cntxt_6;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s
+	  ee_n_ev_ch_k_cntxt_7;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s
+	  ee_n_ev_ch_k_cntxt_8;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s
+	  ee_n_ev_ch_k_cntxt_9;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s
+	  ee_n_ev_ch_k_cntxt_10;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s
+	  ee_n_ev_ch_k_cntxt_11;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s
+	  ee_n_ev_ch_k_cntxt_12;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s
+	  ee_n_ev_ch_k_cntxt_13;
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s
+	  ee_n_ev_ch_k_scratch_0;
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s
+	  ee_n_ev_ch_k_scratch_1;
+	struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s
+	  gsi_debug_ee_n_ev_k_vp_table;
+};
+
+/* GSI FIFO status register save data struct */
+struct ipa_reg_save_gsi_fifo_status_s {
+	union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u
+		gsi_fifo_status_ctrl;
+	union ipa_hwio_def_ipa_gsi_tlv_fifo_status_u
+		gsi_tlv_fifo_status;
+	union ipa_hwio_def_ipa_gsi_aos_fifo_status_u
+		gsi_aos_fifo_status;
+};
+
+/* GSI Channel Context register save top level data struct */
+struct ipa_reg_save_gsi_ch_cntxt_s {
+	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
+		a7[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7];
+	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
+		uc[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC];
+};
+
+/* GSI Event Context register save top level data struct */
+struct ipa_reg_save_gsi_evt_cntxt_s {
+	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
+		a7[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7];
+	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
+		uc[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC];
+};
+
+/* Top level IPA register save data struct */
+struct ipa_regs_save_hierarchy_s {
+	struct ipa_gen_regs_s
+		gen;
+	struct ipa_reg_save_gen_ee_s
+		gen_ee[IPA_HW_EE_MAX];
+	struct ipa_reg_save_hwp_s
+		hwp;
+	struct ipa_reg_save_dbg_s
+		dbg;
+	struct ipa_reg_save_ipa_testbus_s
+		*testbus;
+	struct ipa_reg_save_pipe_s
+		pipes[IPA_HW_PIPE_ID_MAX];
+	struct ipa_reg_save_src_rsrc_grp_s
+		src_rsrc_grp[IPA_HW_SRC_RSRP_TYPE_MAX];
+	struct ipa_reg_save_dst_rsrc_grp_s
+		dst_rsrc_grp[IPA_HW_DST_RSRP_TYPE_MAX];
+	struct ipa_reg_save_src_rsrc_cnt_s
+		src_rsrc_cnt[IPA_HW_SRC_RSRP_TYPE_MAX];
+	struct ipa_reg_save_dst_rsrc_cnt_s
+		dst_rsrc_cnt[IPA_HW_DST_RSRP_TYPE_MAX];
+};
+
+/* Top level GSI register save data struct */
+struct gsi_regs_save_hierarchy_s {
+	u32 fw_ver;
+	struct ipa_reg_save_gsi_gen_s		gen;
+	struct ipa_reg_save_gsi_gen_ee_s	gen_ee[IPA_REG_SAVE_GSI_NUM_EE];
+	struct ipa_reg_save_gsi_ch_cntxt_s	ch_cntxt;
+	struct ipa_reg_save_gsi_evt_cntxt_s	evt_cntxt;
+	struct ipa_reg_save_gsi_debug_s		debug;
+};
+
+/* Source resources for a resource group */
+struct ipa_reg_save_src_rsrc_cnts_s {
+	u8 pkt_cntxt;
+	u8 descriptor_list;
+	u8 data_descriptor_buffer;
+	u8 hps_dmars;
+	u8 reserved_acks;
+};
+
+/* Destination resources for a resource group */
+struct ipa_reg_save_dst_rsrc_cnts_s {
+	u8 reserved_sectors;
+	u8 dps_dmars;
+};
+
+/* Resource count structure for a resource group */
+struct ipa_reg_save_rsrc_cnts_per_grp_s {
+	/* Resource group number */
+	u8 resource_group;
+	/* Source resources for a resource group */
+	struct ipa_reg_save_src_rsrc_cnts_s src;
+	/* Destination resources for a resource group */
+	struct ipa_reg_save_dst_rsrc_cnts_s dst;
+};
+
+/* Top level resource count structure */
+struct ipa_reg_save_rsrc_cnts_s {
+	/* Resource count structure for PCIE group */
+	struct ipa_reg_save_rsrc_cnts_per_grp_s pcie;
+	/* Resource count structure for DDR group */
+	struct ipa_reg_save_rsrc_cnts_per_grp_s ddr;
+};
+
+/*
+ * Top level IPA and GSI registers save data struct
+ */
+struct regs_save_hierarchy_s {
+	struct ipa_regs_save_hierarchy_s
+		ipa;
+	struct gsi_regs_save_hierarchy_s
+		gsi;
+	bool
+		pkt_ctntx_active[IPA_HW_PKT_CTNTX_MAX];
+	union ipa_hwio_def_ipa_ctxh_ctrl_u
+		pkt_ctntxt_lock;
+	enum ipa_hw_pkt_cntxt_state_e
+		pkt_cntxt_state[IPA_HW_PKT_CTNTX_MAX];
+	struct ipa_pkt_ctntx_s
+		pkt_ctntx[IPA_HW_PKT_CTNTX_MAX];
+	struct ipa_reg_save_rsrc_cnts_s
+		rsrc_cnts;
+	struct ipa_reg_save_gsi_fifo_status_s
+		gsi_fifo_status[IPA_HW_PIPE_ID_MAX];
+};
+
+/*
+ * The following section deals with handling IPA registers' memory
+ * access relative to pre-defined memory protection schemes
+ * (ie. "access control").
+ *
+ * In a nut shell, the intent of the data stuctures below is to allow
+ * higher level register accessors to be unaware of what really is
+ * going on at the lowest level (ie. real vs non-real access).  This
+ * methodology is also designed to allow for platform specific "access
+ * maps."
+ */
+
+/*
+ * Function for doing an actual read
+ */
+static inline u32
+act_read(void __iomem *addr)
+{
+	u32 val = my_in_dword(addr);
+
+	return val;
+}
+
+/*
+ * Function for doing an actual write
+ */
+static inline void
+act_write(void __iomem *addr, u32 val)
+{
+	my_out_dword(addr, val);
+}
+
+/*
+ * Function that pretends to do a read
+ */
+static inline u32
+nop_read(void __iomem *addr)
+{
+	return IPA_MEM_INIT_VAL;
+}
+
+/*
+ * Function that pretends to do a write
+ */
+static inline void
+nop_write(void __iomem *addr, u32 val)
+{
+}
+
+/*
+ * The following are used to define struct reg_access_funcs_s below...
+ */
+typedef u32 (*reg_read_func_t)(
+	void __iomem *addr);
+typedef void (*reg_write_func_t)(
+	void __iomem *addr,
+	u32 val);
+
+/*
+ * The following in used to define io_matrix[] below...
+ */
+struct reg_access_funcs_s {
+	reg_read_func_t  read;
+	reg_write_func_t write;
+};
+
+/*
+ * The following will be used to appropriately index into the
+ * read/write combos defined in io_matrix[] below...
+ */
+#define AA_COMBO 0 /* actual read, actual write */
+#define AN_COMBO 1 /* actual read, no-op write  */
+#define NA_COMBO 2 /* no-op read,  actual write */
+#define NN_COMBO 3 /* no-op read,  no-op write  */
+
+/*
+ * The following will be used to dictate registers' access methods
+ * relative to the state of secure debug...whether it's enabled or
+ * disabled.
+ *
+ * NOTE: The table below defines all access combinations.
+ */
+static struct reg_access_funcs_s io_matrix[] = {
+	{ act_read, act_write }, /* the AA_COMBO */
+	{ act_read, nop_write }, /* the AN_COMBO */
+	{ nop_read, act_write }, /* the NA_COMBO */
+	{ nop_read, nop_write }, /* the NN_COMBO */
+};
+
+/*
+ * The following will be used to define and drive IPA's register
+ * access rules.
+ */
+struct reg_mem_access_map_t {
+	u32 addr_range_begin;
+	u32 addr_range_end;
+	struct reg_access_funcs_s *access[2];
+};
+
+#endif /* #if !defined(_IPA_REG_DUMP_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index d575a2e..1bf724c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4411,8 +4411,10 @@
 	if (res)
 		IPAERR("uC panic handler failed %d\n", res);
 
-	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0)
+	if (atomic_read(&ipa3_ctx->ipa_clk_vote)) {
 		ipahal_print_all_regs(false);
+		ipa_save_registers();
+	}
 
 	return NOTIFY_DONE;
 }
@@ -4582,6 +4584,9 @@
 	if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
 		ipa3_proxy_clk_vote();
 
+	/* The following will retrieve and save the gsi fw version */
+	ipa_save_gsi_ver();
+
 	if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
 		ipa3_ctx->pdev)) {
 		IPAERR("fail to init ipahal\n");
@@ -5166,6 +5171,32 @@
 	return 0;
 }
 
+/*
+ * SCM call to check if secure dump is allowed.
+ *
+ * Returns true in secure dump allowed.
+ * Return false when secure dump not allowed.
+ */
+#define TZ_UTIL_GET_SEC_DUMP_STATE  0x10
+static bool ipa_is_mem_dump_allowed(void)
+{
+	struct scm_desc desc = {0};
+	int ret = 0;
+
+	desc.args[0] = 0;
+	desc.arginfo = 0;
+
+	ret = scm_call2(
+		SCM_SIP_FNID(SCM_SVC_UTIL, TZ_UTIL_GET_SEC_DUMP_STATE),
+		&desc);
+	if (ret) {
+		IPAERR("SCM DUMP_STATE call failed\n");
+		return false;
+	}
+
+	return (desc.ret[0] == 1);
+}
+
 /**
  * ipa3_pre_init() - Initialize the IPA Driver.
  * This part contains all initialization which doesn't require IPA HW, such
@@ -5248,6 +5279,37 @@
 	ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
 	ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
 	ipa3_ctx->mhi_evid_limits[1] = resource_p->mhi_evid_limits[1];
+	ipa3_ctx->entire_ipa_block_size = resource_p->entire_ipa_block_size;
+	ipa3_ctx->do_register_collection_on_crash =
+	    resource_p->do_register_collection_on_crash;
+	ipa3_ctx->do_testbus_collection_on_crash =
+	    resource_p->do_testbus_collection_on_crash;
+	ipa3_ctx->do_non_tn_collection_on_crash =
+	    resource_p->do_non_tn_collection_on_crash;
+	ipa3_ctx->secure_debug_check_action =
+		resource_p->secure_debug_check_action;
+
+	if (ipa3_ctx->secure_debug_check_action == USE_SCM) {
+		if (ipa_is_mem_dump_allowed())
+			ipa3_ctx->sd_state = SD_ENABLED;
+		else
+			ipa3_ctx->sd_state = SD_DISABLED;
+	} else {
+		if (ipa3_ctx->secure_debug_check_action == OVERRIDE_SCM_TRUE)
+			ipa3_ctx->sd_state = SD_ENABLED;
+		else
+			/* secure_debug_check_action == OVERRIDE_SCM_FALSE */
+			ipa3_ctx->sd_state = SD_DISABLED;
+	}
+
+	if (ipa3_ctx->sd_state == SD_ENABLED) {
+		/* secure debug is enabled. */
+		IPADBG("secure debug enabled\n");
+	} else {
+		/* secure debug is disabled. */
+		IPADBG("secure debug disabled\n");
+		ipa3_ctx->do_testbus_collection_on_crash = false;
+	}
 
 	WARN(ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL,
 		"Non NORMAL IPA HW mode, is this emulation platform ?");
@@ -5366,6 +5428,14 @@
 	    resource_p->ipa_mem_size);
 
 	/*
+	 * Setup access for register collection/dump on crash
+	 */
+	if (ipa_reg_save_init(IPA_MEM_INIT_VAL) != 0) {
+		result = -EFAULT;
+		goto fail_gsi_map;
+	}
+
+	/*
 	 * Since we now know where the transport's registers live,
 	 * let's set up access to them.  This is done since subseqent
 	 * functions, that deal with the transport, require the
@@ -5683,6 +5753,8 @@
 fail_init_hw:
 	gsi_unmap_base();
 fail_gsi_map:
+	if (ipa3_ctx->reg_collection_base)
+		iounmap(ipa3_ctx->reg_collection_base);
 	iounmap(ipa3_ctx->mmio);
 fail_remap:
 	ipa3_disable_clks();
@@ -6151,6 +6223,59 @@
 		       ipa_drv_res->emulator_intcntrlr_mem_size);
 	}
 
+	ipa_drv_res->entire_ipa_block_size = 0x100000;
+	result = of_property_read_u32(pdev->dev.of_node,
+				      "qcom,entire-ipa-block-size",
+				      &ipa_drv_res->entire_ipa_block_size);
+	IPADBG(": entire_ipa_block_size = %d\n",
+	       ipa_drv_res->entire_ipa_block_size);
+
+	/*
+	 * We'll read register-collection-on-crash here, but log it
+	 * later below because its value may change based on other
+	 * subsequent dtsi reads......
+	 */
+	ipa_drv_res->do_register_collection_on_crash =
+	    of_property_read_bool(pdev->dev.of_node,
+				  "qcom,register-collection-on-crash");
+	/*
+	 * We'll read testbus-collection-on-crash here...
+	 */
+	ipa_drv_res->do_testbus_collection_on_crash =
+	    of_property_read_bool(pdev->dev.of_node,
+				  "qcom,testbus-collection-on-crash");
+	IPADBG(": doing testbus collection on crash = %u\n",
+	       ipa_drv_res->do_testbus_collection_on_crash);
+
+	/*
+	 * We'll read non-tn-collection-on-crash here...
+	 */
+	ipa_drv_res->do_non_tn_collection_on_crash =
+	    of_property_read_bool(pdev->dev.of_node,
+				  "qcom,non-tn-collection-on-crash");
+	IPADBG(": doing non-tn collection on crash = %u\n",
+	       ipa_drv_res->do_non_tn_collection_on_crash);
+
+	if (ipa_drv_res->do_testbus_collection_on_crash ||
+		ipa_drv_res->do_non_tn_collection_on_crash)
+		ipa_drv_res->do_register_collection_on_crash = true;
+
+	IPADBG(": doing register collection on crash = %u\n",
+	       ipa_drv_res->do_register_collection_on_crash);
+
+	result = of_property_read_u32(
+		pdev->dev.of_node,
+		"qcom,secure-debug-check-action",
+		&ipa_drv_res->secure_debug_check_action);
+	if (result ||
+		(ipa_drv_res->secure_debug_check_action != 0 &&
+		 ipa_drv_res->secure_debug_check_action != 1 &&
+		 ipa_drv_res->secure_debug_check_action != 2))
+		ipa_drv_res->secure_debug_check_action = USE_SCM;
+
+	IPADBG(": secure-debug-check-action = %d\n",
+		   ipa_drv_res->secure_debug_check_action);
+
 	return 0;
 }
 
@@ -6560,6 +6685,7 @@
 
 	smmu_info.present[IPA_SMMU_CB_AP] = true;
 	ipa3_ctx->pdev = dev;
+	cb->next_addr = cb->va_end;
 
 	return 0;
 }
@@ -6948,8 +7074,18 @@
 
 	switch (in->smmu_client) {
 	case IPA_SMMU_WLAN_CLIENT:
-		is_smmu_enable = !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] |
-			ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
+		if (ipa3_ctx->ipa_wdi3_over_gsi)
+			is_smmu_enable =
+				!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] |
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
+		else
+			is_smmu_enable =
+				!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] |
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
+		break;
+	case IPA_SMMU_AP_CLIENT:
+		is_smmu_enable =
+			!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]);
 		break;
 	default:
 		is_smmu_enable = false;
@@ -6981,7 +7117,9 @@
 		ret = PTR_ERR(ipa3_ctx->mbox);
 		if (ret != -EPROBE_DEFER)
 			IPAERR("mailbox channel request failed, ret=%d\n", ret);
-		goto cleanup;
+
+		ipa3_ctx->mbox = NULL;
+		return;
 	}
 
 	/* prepare the QMP packet to send */
@@ -6996,8 +7134,10 @@
 	}
 
 cleanup:
-	ipa3_ctx->mbox = NULL;
-	mbox_free_channel(ipa3_ctx->mbox);
+	if (ipa3_ctx->mbox) {
+		mbox_free_channel(ipa3_ctx->mbox);
+		ipa3_ctx->mbox = NULL;
+	}
 }
 
 /**************************************************************
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 27b724f..e4daeb6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <asm/barrier.h>
@@ -1015,7 +1015,7 @@
 	return result;
 }
 
-static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
 	unsigned long chan_hdl)
 {
 	enum gsi_status gsi_res;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 4aaa9c9..cb932cb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -34,7 +34,7 @@
 };
 
 
-const char *ipa3_event_name[] = {
+const char *ipa3_event_name[IPA_EVENT_MAX_NUM] = {
 	__stringify(WLAN_CLIENT_CONNECT),
 	__stringify(WLAN_CLIENT_DISCONNECT),
 	__stringify(WLAN_CLIENT_POWER_SAVE_MODE),
@@ -1556,7 +1556,7 @@
 	int cnt = 0;
 	int i;
 
-	for (i = 0; i < IPA_EVENT_MAX_NUM; i++) {
+	for (i = 0; i < ARRAY_SIZE(ipa3_event_name); i++) {
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 				"msg[%u:%27s] W:%u R:%u\n", i,
 				ipa3_event_name[i],
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index f8abb5c..1a3da78 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -768,7 +768,8 @@
 	ipa3_dec_release_wakelock();
 	ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
 		GSI_CHAN_MODE_CALLBACK);
-	if (ret != GSI_STATUS_SUCCESS) {
+	if ((ret != GSI_STATUS_SUCCESS) &&
+		!atomic_read(&sys->curr_polling_state)) {
 		if (ret == -GSI_STATUS_PENDING_IRQ) {
 			ipa3_inc_acquire_wakelock();
 			atomic_set(&sys->curr_polling_state, 1);
@@ -2836,8 +2837,7 @@
 
 		IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
 		if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
-			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes ||
-			status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
+			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
 			IPAERR("status fields invalid\n");
 			WARN_ON(1);
 			goto bail;
@@ -3036,7 +3036,9 @@
 	INIT_LIST_HEAD(&rx_pkt->link);
 	list_add_tail(&rx_pkt->link, head);
 
-	if (notify->evt_id == GSI_CHAN_EVT_EOT) {
+	/* Check added for handling LAN consumer packet without EOT flag */
+	if (notify->evt_id == GSI_CHAN_EVT_EOT ||
+		sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
 	/* go over the list backward to save computations on updating length */
 		list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
 			rx_skb = rx_pkt->data.skb;
@@ -3066,6 +3068,10 @@
 	struct ipa3_sys_context *coal_sys;
 	int ipa_ep_idx;
 
+	if (!notify) {
+		IPAERR_RL("gsi_chan_xfer_notify is null\n");
+		return;
+	}
 	rx_skb = handle_skb_completion(notify, true);
 
 	if (rx_skb) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c
index 9d6ea9e..06b6bfc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/errno.h>
@@ -80,6 +80,9 @@
 	{"qcom,use-rg10-limitation-mitigation", false},
 	{"qcom,do-not-use-ch-gsi-20",           false},
 	{"qcom,use-ipa-pm",                     true},
+	{"qcom,register-collection-on-crash",   true},
+	{"qcom,testbus-collection-on-crash",    true},
+	{"qcom,non-tn-collection-on-crash",     true},
 };
 
 static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_0[] = {
@@ -93,6 +96,9 @@
 	{"qcom,use-rg10-limitation-mitigation", false},
 	{"qcom,do-not-use-ch-gsi-20",           false},
 	{"qcom,use-ipa-pm",                     false},
+	{"qcom,register-collection-on-crash",   true},
+	{"qcom,testbus-collection-on-crash",    true},
+	{"qcom,non-tn-collection-on-crash",     true},
 };
 
 static struct dtsi_replacement_bool ipa3_plat_drv_bool_3_5_1[] = {
@@ -106,6 +112,9 @@
 	{"qcom,use-rg10-limitation-mitigation", false},
 	{"qcom,do-not-use-ch-gsi-20",           false},
 	{"qcom,use-ipa-pm",                     false},
+	{"qcom,register-collection-on-crash",   true},
+	{"qcom,testbus-collection-on-crash",    true},
+	{"qcom,non-tn-collection-on-crash",     true},
 };
 
 static struct dtsi_replacement_bool_table
@@ -126,6 +135,7 @@
 	{"qcom,ee",                             0},
 	{"qcom,msm-bus,num-cases",              5},
 	{"emulator-bar0-offset",                0x01C00000},
+	{"qcom,entire-ipa-block-size",          0x00100000},
 };
 
 static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_0[] = {
@@ -135,6 +145,7 @@
 	{"qcom,lan-rx-ring-size",               192},
 	{"qcom,ee",                             0},
 	{"emulator-bar0-offset",                0x01C00000},
+	{"qcom,entire-ipa-block-size",          0x00100000},
 };
 
 static struct dtsi_replacement_u32 ipa3_plat_drv_u32_3_5_1[] = {
@@ -144,6 +155,7 @@
 	{"qcom,lan-rx-ring-size",               192},
 	{"qcom,ee",                             0},
 	{"emulator-bar0-offset",                0x01C00000},
+	{"qcom,entire-ipa-block-size",          0x00100000},
 };
 
 static struct dtsi_replacement_u32_table ipa3_plat_drv_u32_table[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 6ce614f..d1422db 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -54,7 +54,7 @@
 }
 
 static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
-	u32 hdr_base_addr)
+	u64 hdr_base_addr)
 {
 	struct ipa3_hdr_proc_ctx_entry *entry;
 	int ret;
@@ -92,7 +92,8 @@
 				entry->hdr->phys_base,
 				hdr_base_addr,
 				entry->hdr->offset_entry,
-				entry->l2tp_params);
+				entry->l2tp_params,
+				ipa3_ctx->use_64_bit_dma_mask);
 		if (ret)
 			return ret;
 	}
@@ -109,10 +110,10 @@
  *
  * Returns:	0 on success, negative on failure
  */
-static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
+static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr,
 	struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
 {
-	u32 hdr_base_addr;
+	u64 hdr_base_addr;
 
 	mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 7eb6421..ccd5655 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -235,8 +235,8 @@
 #define IPA_PIPE_MEM_START_OFST (0x0)
 #define IPA_PIPE_MEM_SIZE (0x0)
 #define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
-			       x == IPA_MODE_MOBILE_AP_WAN || \
-			       x == IPA_MODE_MOBILE_AP_WLAN)
+				   x == IPA_MODE_MOBILE_AP_WAN || \
+				   x == IPA_MODE_MOBILE_AP_WLAN)
 #define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
 #define IPA_A5_MUX_HEADER_LENGTH (8)
 
@@ -289,6 +289,19 @@
 #define IPA_FWS_PATH_3_5_1   "ipa/3.5.1/ipa_fws.elf"
 #define IPA_FWS_PATH_4_5     "ipa/4.5/ipa_fws.elf"
 
+/*
+ * The following will be used for determining/using access control
+ * policy.
+ */
+#define USE_SCM            0 /* use scm call to determine policy */
+#define OVERRIDE_SCM_TRUE  1 /* override scm call with true */
+#define OVERRIDE_SCM_FALSE 2 /* override scm call with false */
+
+#define SD_ENABLED  0 /* secure debug enabled. */
+#define SD_DISABLED 1 /* secure debug disabled. */
+
+#define IPA_MEM_INIT_VAL 0xFFFFFFFF
+
 #ifdef CONFIG_COMPAT
 #define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_HDR, \
@@ -1664,6 +1677,13 @@
 	bool use_ipa_pm;
 	bool vlan_mode_iface[IPA_VLAN_IF_MAX];
 	bool wdi_over_pcie;
+	u32 entire_ipa_block_size;
+	bool do_register_collection_on_crash;
+	bool do_testbus_collection_on_crash;
+	bool do_non_tn_collection_on_crash;
+	u32 secure_debug_check_action;
+	u32 sd_state;
+	void __iomem *reg_collection_base;
 	struct ipa3_wdi2_ctx wdi2_ctx;
 	struct mbox_client mbox_client;
 	struct mbox_chan *mbox;
@@ -1707,6 +1727,11 @@
 	bool use_ipa_pm;
 	struct ipa_pm_init_params pm_init;
 	bool wdi_over_pcie;
+	u32 entire_ipa_block_size;
+	bool do_register_collection_on_crash;
+	bool do_testbus_collection_on_crash;
+	bool do_non_tn_collection_on_crash;
+	u32 secure_debug_check_action;
 };
 
 /**
@@ -2701,4 +2726,55 @@
 	unsigned long *size_ptr);
 irq_handler_t ipa3_get_isr(void);
 void ipa_pc_qmp_enable(void);
+#if defined(CONFIG_IPA3_REGDUMP)
+int ipa_reg_save_init(u32 value);
+void ipa_save_registers(void);
+void ipa_save_gsi_ver(void);
+#else
+static inline int ipa_reg_save_init(u32 value) { return 0; }
+static inline void ipa_save_registers(void) {};
+static inline void ipa_save_gsi_ver(void) {};
+#endif
+
+#ifdef CONFIG_IPA_ETH
+int ipa_eth_init(void);
+void ipa_eth_exit(void);
+#else
+static inline int ipa_eth_init(void) { return 0; }
+static inline void ipa_eth_exit(void) { }
+#endif // CONFIG_IPA_ETH
+int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+	unsigned long chan_hdl);
+#ifdef CONFIG_IPA3_MHI_PRIME_MANAGER
+int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot prot);
+int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot);
+int ipa_mpm_notify_wan_state(void);
+int ipa_mpm_mhip_ul_data_stop(enum ipa_usb_teth_prot xdci_teth_prot);
+int ipa3_is_mhip_offload_enabled(void);
+#else
+static inline int ipa_mpm_mhip_xdci_pipe_enable(
+	enum ipa_usb_teth_prot prot)
+{
+	return 0;
+}
+static inline int ipa_mpm_mhip_xdci_pipe_disable(
+	enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	return 0;
+}
+static inline int ipa_mpm_notify_wan_state(void)
+{
+	return 0;
+}
+static inline int ipa_mpm_mhip_ul_data_stop(
+	enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	return 0;
+}
+static inline int ipa3_is_mhip_offload_enabled(void)
+{
+	return 0;
+}
+#endif /* CONFIG_IPA3_MHI_PRIME_MANAGER */
+
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index b522776..df9aceb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/fs.h>
@@ -392,6 +392,8 @@
 	uint8_t mac[IPA_MAC_ADDR_SIZE];
 	uint8_t mac2[IPA_MAC_ADDR_SIZE];
 
+	if (!buff)
+		return -EINVAL;
 	if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) {
 		/* debug print */
 		event_ex_cur_con = buff;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
index ebe62c2..bc0f891 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
@@ -1072,7 +1072,6 @@
 		}
 	}
 
-	imp_ctx->state = IMP_PROBED;
 	mutex_unlock(&imp_ctx->mutex);
 
 	IMP_FUNC_EXIT();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
new file mode 100644
index 0000000..3bb865d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -0,0 +1,2397 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mhi.h>
+#include <linux/msm_gsi.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include "../ipa_common_i.h"
+#include "ipa_i.h"
+
+#define IPA_MPM_DRV_NAME "ipa_mpm"
+
+#define IPA_MPM_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MPM_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MPM_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_MPM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MPM_FUNC_ENTRY() \
+	IPA_MPM_DBG("ENTRY\n")
+#define IPA_MPM_FUNC_EXIT() \
+	IPA_MPM_DBG("EXIT\n")
+
+#define IPA_MPM_MAX_MHIP_CHAN 3
+
+#define IPA_MPM_NUM_RING_DESC 0x400
+#define IPA_MPM_RING_LEN (IPA_MPM_NUM_RING_DESC - 10)
+
+#define IPA_MPM_MHI_HOST_UL_CHANNEL 4
+#define IPA_MPM_MHI_HOST_DL_CHANNEL  5
+#define DEFAULT_AGGR_TIME_LIMIT 1000 /* 1ms */
+#define DEFAULT_AGGR_PKT_LIMIT 0
+#define TRE_BUFF_SIZE 32768
+#define IPA_HOLB_TMR_EN 0x1
+#define IPA_HOLB_TMR_DIS 0x0
+#define RNDIS_IPA_DFLT_RT_HDL 0
+#define IPA_POLL_FOR_EMPTINESS_NUM 50
+#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
+#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
+#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
+
+enum mhip_re_type {
+	MHIP_RE_XFER = 0x2,
+	MHIP_RE_NOP = 0x4,
+};
+
+enum ipa_mpm_mhi_ch_id_type {
+	IPA_MPM_MHIP_CH_ID_0,
+	IPA_MPM_MHIP_CH_ID_1,
+	IPA_MPM_MHIP_CH_ID_2,
+	IPA_MPM_MHIP_CH_ID_MAX,
+};
+
+enum ipa_mpm_dma_data_direction {
+	DMA_HIPA_BIDIRECTIONAL = 0,
+	DMA_TO_HIPA = 1,
+	DMA_FROM_HIPA = 2,
+	DMA_HIPA_NONE = 3,
+};
+
+enum ipa_mpm_ipa_teth_client_type {
+	IPA_MPM_MHIP_USB,
+	IPA_MPM_MHIP_WIFI,
+};
+
+enum ipa_mpm_mhip_client_type {
+	IPA_MPM_MHIP_INIT,
+	/* USB RMNET CLIENT */
+	IPA_MPM_MHIP_USB_RMNET,
+	/* USB RNDIS / WIFI CLIENT */
+	IPA_MPM_MHIP_TETH,
+	/* USB DPL CLIENT */
+	IPA_MPM_MHIP_USB_DPL,
+	IPA_MPM_MHIP_NONE,
+};
+
+enum ipa_mpm_start_stop_type {
+	STOP,
+	START,
+};
+
+enum ipa_mpm_clk_vote_type {
+	CLK_ON,
+	CLK_OFF,
+};
+
+enum mhip_status_type {
+	MHIP_STATUS_SUCCESS,
+	MHIP_STATUS_NO_OP,
+	MHIP_STATUS_FAIL,
+	MHIP_STATUS_BAD_STATE,
+	MHIP_STATUS_EP_NOT_FOUND,
+	MHIP_STATUS_EP_NOT_READY,
+};
+
+enum mhip_smmu_domain_type {
+	MHIP_SMMU_DOMAIN_IPA,
+	MHIP_SMMU_DOMAIN_PCIE,
+	MHIP_SMMU_DOMAIN_NONE,
+};
+
+/* each pair of UL/DL channels are defined below */
+static const struct mhi_device_id mhi_driver_match_table[] = {
+	{ .chan = "IP_HW_MHIP_0" }, // for rmnet pipes
+	{ .chan = "IP_HW_MHIP_1" }, // for MHIP teth pipes - rndis/wifi
+	{ .chan = "IP_HW_ADPL" }, // DPL/ODL DL pipe
+};
+
+/*
+ * MHI PRIME GSI Descriptor format that Host IPA uses.
+ */
+struct __packed mhi_p_desc {
+	uint64_t buffer_ptr;
+	uint16_t buff_len;
+	uint16_t resvd1;
+	uint16_t chain : 1;
+	uint16_t resvd4 : 7;
+	uint16_t ieob : 1;
+	uint16_t ieot : 1;
+	uint16_t bei : 1;
+	uint16_t sct : 1;
+	uint16_t resvd3 : 4;
+	uint8_t re_type;
+	uint8_t resvd2;
+};
+
+/*
+ * MHI PRIME Channel Context and Event Context Array
+ * Information that is sent to Device IPA.
+ */
+struct ipa_mpm_channel_context_type {
+	u32 chstate : 8;
+	u32 reserved1 : 24;
+	u32 chtype;
+	u32 erindex;
+	u64 rbase;
+	u64 rlen;
+	u64 reserved2;
+	u64 reserved3;
+} __packed;
+
+struct ipa_mpm_event_context_type {
+	u32 reserved1 : 8;
+	u32 update_rp_modc : 8;
+	u32 update_rp_intmodt : 16;
+	u32 ertype;
+	u32 update_rp_addr;
+	u64 rbase;
+	u64 rlen;
+	u32 buff_size : 16;
+	u32 reserved2 : 16;
+	u32 reserved3;
+	u64 reserved4;
+} __packed;
+
+struct ipa_mpm_pipes_info_type {
+	enum ipa_client_type ipa_client;
+	struct ipa_ep_cfg ep_cfg;
+};
+
+struct ipa_mpm_channel_type {
+	struct ipa_mpm_pipes_info_type dl_cons;
+	struct ipa_mpm_pipes_info_type ul_prod;
+	enum ipa_mpm_mhip_client_type mhip_client;
+};
+
+static struct ipa_mpm_channel_type ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_MAX];
+
+/* For configuring IPA_CLIENT_MHI_PRIME_TETH_CONS */
+static struct ipa_ep_cfg mhip_dl_teth_ep_cfg = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst = IPA_CLIENT_MHI_PRIME_TETH_CONS,
+	},
+};
+
+static struct ipa_ep_cfg mhip_ul_teth_ep_cfg = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst = IPA_CLIENT_MHI_PRIME_TETH_PROD,
+	},
+};
+
+/* WARNING!! Temporary for rndis intgration only */
+
+
+/* For configuring IPA_CLIENT_MHIP_RMNET_PROD */
+static struct ipa_ep_cfg mhip_dl_rmnet_ep_cfg = {
+	.mode = {
+		.mode = IPA_DMA,
+		.dst = IPA_CLIENT_USB_CONS,
+	},
+};
+
+/* For configuring IPA_CLIENT_MHIP_RMNET_CONS */
+static struct ipa_ep_cfg mhip_ul_rmnet_ep_cfg = {
+	.mode = {
+		.mode = IPA_DMA,
+		.dst = IPA_CLIENT_USB_CONS,
+	},
+};
+
+/* For configuring IPA_CLIENT_MHIP_DPL_PROD */
+static struct ipa_ep_cfg mhip_dl_dpl_ep_cfg = {
+	.mode = {
+		.mode = IPA_DMA,
+		.dst = IPA_CLIENT_USB_CONS,
+	},
+};
+
+
+struct ipa_mpm_iova_addr {
+	dma_addr_t base;
+	unsigned int size;
+};
+
+struct ipa_mpm_dev_info {
+	struct platform_device *pdev;
+	struct device *dev;
+	bool ipa_smmu_enabled;
+	bool pcie_smmu_enabled;
+	struct ipa_mpm_iova_addr ctrl;
+	struct ipa_mpm_iova_addr data;
+	u32 chdb_base;
+	u32 erdb_base;
+};
+
+struct ipa_mpm_event_props {
+	u16 id;
+	phys_addr_t device_db;
+	struct ipa_mpm_event_context_type ev_ctx;
+};
+
+struct ipa_mpm_channel_props {
+	u16 id;
+	phys_addr_t device_db;
+	struct ipa_mpm_channel_context_type ch_ctx;
+};
+
+struct ipa_mpm_channel {
+	struct ipa_mpm_channel_props chan_props;
+	struct ipa_mpm_event_props evt_props;
+};
+
+enum ipa_mpm_gsi_state {
+	GSI_ERR,
+	GSI_INIT,
+	GSI_ALLOCATED,
+	GSI_STARTED,
+	GSI_STOPPED,
+};
+
+enum ipa_mpm_teth_state {
+	IPA_MPM_TETH_INIT = 0,
+	IPA_MPM_TETH_INPROGRESS,
+	IPA_MPM_TETH_CONNECTED,
+};
+
+enum ipa_mpm_mhip_chan {
+	IPA_MPM_MHIP_CHAN_UL,
+	IPA_MPM_MHIP_CHAN_DL,
+	IPA_MPM_MHIP_CHAN_BOTH,
+};
+
+struct producer_rings {
+	struct mhi_p_desc *tr_va;
+	struct mhi_p_desc *er_va;
+	dma_addr_t tr_pa;
+	dma_addr_t er_pa;
+	void *tre_buff[IPA_MPM_RING_LEN];
+	/*
+	 * The iova generated for AP CB,
+	 * used only for dma_map_single to flush the cache.
+	 */
+	dma_addr_t ap_iova_er;
+	dma_addr_t ap_iova_tr;
+	dma_addr_t ap_iova_buff[IPA_MPM_RING_LEN];
+};
+
+struct ipa_mpm_mhi_driver {
+	struct mhi_device *mhi_dev;
+	struct producer_rings ul_prod_ring;
+	struct producer_rings dl_prod_ring;
+	struct ipa_mpm_channel ul_prod;
+	struct ipa_mpm_channel dl_cons;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	enum ipa_mpm_gsi_state gsi_state;
+	enum ipa_mpm_teth_state teth_state;
+	struct mutex mutex;
+	bool init_complete;
+};
+
+struct ipa_mpm_context {
+	struct ipa_mpm_dev_info dev_info;
+	struct ipa_mpm_mhi_driver md[IPA_MPM_MAX_MHIP_CHAN];
+	struct mutex mutex;
+	atomic_t ipa_clk_ref_cnt;
+	atomic_t pcie_clk_ref_cnt;
+	struct device *parent_pdev;
+	struct ipa_smmu_cb_ctx carved_smmu_cb;
+};
+
+#define IPA_MPM_DESC_SIZE (sizeof(struct mhi_p_desc))
+#define IPA_MPM_RING_TOTAL_SIZE (IPA_MPM_RING_LEN * IPA_MPM_DESC_SIZE)
+#define IPA_MPM_PAGE_SIZE roundup_pow_of_two(IPA_MPM_RING_TOTAL_SIZE)
+
+
+static struct ipa_mpm_context *ipa_mpm_ctx;
+static struct platform_device *m_pdev;
+static int ipa_mpm_mhi_probe_cb(struct mhi_device *,
+	const struct mhi_device_id *);
+static void ipa_mpm_mhi_remove_cb(struct mhi_device *);
+static void ipa_mpm_mhi_status_cb(struct mhi_device *, enum MHI_CB);
+static void ipa_mpm_change_teth_state(int probe_id,
+	enum ipa_mpm_teth_state ip_state);
+static void ipa_mpm_change_gsi_state(int probe_id,
+	enum ipa_mpm_gsi_state next_state);
+static int ipa_mpm_start_stop_mhip_data_path(int probe_id,
+	enum ipa_mpm_start_stop_type start);
+static int ipa_mpm_probe(struct platform_device *pdev);
+static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id);
+static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote);
+static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
+	enum ipa_mpm_mhip_chan mhip_chan,
+	int probe_id,
+	enum ipa_mpm_start_stop_type start_stop);
+
+static struct mhi_driver mhi_driver = {
+	.id_table = mhi_driver_match_table,
+	.probe = ipa_mpm_mhi_probe_cb,
+	.remove = ipa_mpm_mhi_remove_cb,
+	.status_cb = ipa_mpm_mhi_status_cb,
+	.driver = {
+		.name = IPA_MPM_DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static void ipa_mpm_ipa3_delayed_probe(struct work_struct *work)
+{
+	(void)ipa_mpm_probe(m_pdev);
+}
+
+static DECLARE_WORK(ipa_mpm_ipa3_scheduled_probe, ipa_mpm_ipa3_delayed_probe);
+
+static void ipa_mpm_ipa3_ready_cb(void *user_data)
+{
+	struct platform_device *pdev = (struct platform_device *)(user_data);
+
+	m_pdev = pdev;
+
+	IPA_MPM_DBG("IPA ready callback has been triggered\n");
+
+	schedule_work(&ipa_mpm_ipa3_scheduled_probe);
+}
+
+void ipa_mpm_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *err_data)
+{
+	IPA_MPM_ERR("GSI EVT RING ERROR, not expected..\n");
+	ipa_assert();
+}
+
+void ipa_mpm_gsi_chan_err_cb(struct gsi_chan_err_notify *err_data)
+{
+	IPA_MPM_ERR("GSI CHAN ERROR, not expected..\n");
+	ipa_assert();
+}
+
+/**
+ * ipa_mpm_smmu_map() - SMMU maps ring and the buffer pointer.
+ * @va_addr: virtual address that needs to be mapped
+ * @sz: size of the address to be mapped
+ * @dir: ipa_mpm_dma_data_direction
+ * @ap_cb_iova: iova for AP context bank
+ *
+ * This function SMMU maps both ring and the buffer pointer.
+ * The ring pointers will be aligned to ring size and
+ * the buffer pointers should be aligned to buffer size.
+ *
+ * Returns: iova of the mapped address
+ */
+static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
+	int sz,
+	int dir,
+	dma_addr_t *ap_cb_iova)
+{
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	phys_addr_t phys_addr;
+	dma_addr_t iova;
+	int smmu_enabled;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+	unsigned long carved_iova = roundup(cb->next_addr, IPA_MPM_PAGE_SIZE);
+	int ret = 0;
+
+	if (carved_iova >= cb->va_end) {
+		IPA_MPM_ERR("running out of carved_iova %x\n", carved_iova);
+		ipa_assert();
+	}
+	/*
+	 * Both Host IPA and PCIE SMMU should be enabled or disabled
+	 * for proceed.
+	 * If SMMU Enabled => iova == pa
+	 * If SMMU Disabled => iova == iommu mapped iova
+	 * dma_map_single ensures cache is flushed and the memory is not
+	 * touched again until dma_unmap_single() is called
+	 */
+	smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (smmu_enabled) {
+		/* Map the phys addr to both PCIE and IPA AP CB
+		 * from the carved out common iova range.
+		 */
+		ipa_smmu_domain = ipa3_get_smmu_domain();
+
+		if (!ipa_smmu_domain) {
+			IPA_MPM_ERR("invalid IPA smmu domain\n");
+			ipa_assert();
+		}
+
+		if (!ipa_mpm_ctx->md[0].mhi_dev->dev.parent) {
+			IPA_MPM_ERR("invalid PCIE SMMU domain\n");
+			ipa_assert();
+		}
+
+		phys_addr = virt_to_phys((void *) va_addr);
+		IPA_SMMU_ROUND_TO_PAGE(carved_iova, phys_addr, sz,
+					iova_p, pa_p, size_p);
+
+		/* Flush the cache with dma_map_single for IPA AP CB */
+		*ap_cb_iova = dma_map_single(ipa3_ctx->pdev, va_addr,
+						sz, dir);
+		ret = ipa3_iommu_map(ipa_smmu_domain, iova_p,
+					pa_p, size_p, prot);
+		if (ret) {
+			IPA_MPM_ERR("IPA IOMMU returned failure, ret = %d\n",
+					ret);
+			ipa_assert();
+		}
+
+		pcie_smmu_domain = iommu_get_domain_for_dev(
+			ipa_mpm_ctx->md[0].mhi_dev->dev.parent);
+		ret = iommu_map(pcie_smmu_domain, iova_p, pa_p, size_p, prot);
+
+		if (ret) {
+			IPA_MPM_ERR("PCIe IOMMU returned failure, ret = %d\n",
+				ret);
+			ipa_assert();
+		}
+
+		iova = iova_p;
+		cb->next_addr = iova_p + size_p;
+	} else {
+		iova = dma_map_single(ipa3_ctx->pdev, va_addr, sz, dir);
+		*ap_cb_iova = iova;
+	}
+	return iova;
+}
+
+/**
+ * ipa_mpm_smmu_unmap() - SMMU unmaps ring and the buffer pointer.
+ * @va_addr: virtual address that needs to be mapped
+ * @sz: size of the address to be mapped
+ * @dir: ipa_mpm_dma_data_direction
+ * @ap_cb_iova: iova for AP context bank
+ *
+ * This function SMMU unmaps both ring and the buffer pointer.
+ * The ring pointers will be aligned to ring size and
+ * the buffer pointers should be aligned to buffer size.
+ *
+ * Return: none
+ */
+static void ipa_mpm_smmu_unmap(dma_addr_t carved_iova, int sz, int dir,
+	dma_addr_t ap_cb_iova)
+{
+	int ret;
+	unsigned long iova_p;
+	unsigned long pa_p;
+	u32 size_p = 0;
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+	int smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (smmu_enabled) {
+		ipa_smmu_domain = ipa3_get_smmu_domain();
+		if (!ipa_smmu_domain) {
+			IPA_MPM_ERR("invalid IPA smmu domain\n");
+			ipa_assert();
+		}
+
+		if (!ipa_mpm_ctx->md[0].mhi_dev->dev.parent) {
+			IPA_MPM_ERR("invalid PCIE SMMU domain\n");
+			ipa_assert();
+		}
+
+		IPA_SMMU_ROUND_TO_PAGE(carved_iova, carved_iova, sz,
+					iova_p, pa_p, size_p);
+
+		ret = iommu_unmap(ipa_smmu_domain, carved_iova, size_p);
+		if (ret) {
+			IPA_MPM_ERR("IPA IOMMU Unmap failure, ret = %d\n",
+					ret);
+			ipa_assert();
+		}
+		pcie_smmu_domain = iommu_get_domain_for_dev(
+			ipa_mpm_ctx->md[0].mhi_dev->dev.parent);
+
+		ret = iommu_unmap(pcie_smmu_domain, carved_iova, size_p);
+
+		if (ret) {
+			IPA_MPM_ERR("PCIe IOMMU Unmap failure, ret = %d\n",
+				ret);
+			ipa_assert();
+		}
+		cb->next_addr -= size_p;
+		dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova, size_p, dir);
+	} else {
+		dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova, sz, dir);
+	}
+}
+
+static u32 ipa_mpm_smmu_map_doorbell(enum mhip_smmu_domain_type smmu_domain,
+	u32 pa_addr)
+{
+	/*
+	 * Doorbells are already in PA, map these to
+	 * PCIE/IPA doman if SMMUs are enabled.
+	 */
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	int smmu_enabled;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	int ret = 0;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+	unsigned long carved_iova = roundup(cb->next_addr, PAGE_SIZE);
+	u32 iova = 0;
+	u64 offset = 0;
+
+	if (carved_iova >= cb->va_end) {
+		IPA_MPM_ERR("running out of carved_iova %x\n", carved_iova);
+		ipa_assert();
+	}
+
+	smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (smmu_enabled) {
+		IPA_SMMU_ROUND_TO_PAGE(carved_iova, pa_addr, PAGE_SIZE,
+					iova_p, pa_p, size_p);
+		if (smmu_domain == MHIP_SMMU_DOMAIN_IPA) {
+			ipa_smmu_domain = ipa3_get_smmu_domain();
+			ret = ipa3_iommu_map(ipa_smmu_domain,
+				iova_p, pa_p, size_p, prot);
+			if (ret) {
+				IPA_MPM_ERR("IPA doorbell mapping failed\n");
+				ipa_assert();
+			}
+			offset = pa_addr - pa_p;
+		} else if (smmu_domain == MHIP_SMMU_DOMAIN_PCIE) {
+			pcie_smmu_domain = iommu_get_domain_for_dev(
+				ipa_mpm_ctx->md[0].mhi_dev->dev.parent);
+			 ret = iommu_map(pcie_smmu_domain,
+				iova_p, pa_p, size_p, prot);
+			if (ret) {
+				IPA_MPM_ERR("PCIe doorbell mapping failed\n");
+				ipa_assert();
+			}
+			offset = pa_addr - pa_p;
+		}
+		iova = iova_p + offset;
+		cb->next_addr = iova_p + PAGE_SIZE;
+	} else {
+		iova = pa_addr;
+	}
+	return iova;
+}
+
+int get_idx_from_id(const struct mhi_device_id *id)
+{
+	return (id - mhi_driver_match_table);
+}
+
+static void get_ipa3_client(int id,
+	enum ipa_client_type *ul_prod,
+	enum ipa_client_type *dl_cons)
+{
+	IPA_MPM_FUNC_ENTRY();
+
+	if (id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		*ul_prod = IPA_CLIENT_MAX;
+		*dl_cons = IPA_CLIENT_MAX;
+	} else {
+		*ul_prod = ipa_mpm_pipes[id].ul_prod.ipa_client;
+		*dl_cons = ipa_mpm_pipes[id].dl_cons.ipa_client;
+	}
+	IPA_MPM_FUNC_EXIT();
+}
+
+static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
+	int mhi_idx, struct ipa_req_chan_out_params *out_params)
+{
+	int ipa_ep_idx;
+	int res;
+	struct mhi_p_desc *ev_ring;
+	struct mhi_p_desc *tr_ring;
+	int tr_ring_sz, ev_ring_sz;
+	dma_addr_t ev_ring_iova, tr_ring_iova;
+	dma_addr_t ap_cb_iova;
+	struct ipa_request_gsi_channel_params gsi_params;
+	int dir;
+	int i;
+	void *buff;
+	int result;
+	int k;
+	struct ipa3_ep_context *ep;
+
+	if (mhip_client == IPA_CLIENT_MAX)
+		goto fail_gen;
+
+	if (mhi_idx == IPA_MPM_MHIP_CH_ID_MAX)
+		goto fail_gen;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPA_MPM_ERR("fail to find channel EP.\n");
+		goto fail_gen;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid == 1) {
+		IPAERR("EP %d already allocated.\n", ipa_ep_idx);
+		return 0;
+	}
+
+	IPA_MPM_DBG("connecting client %d (ep: %d)\n", mhip_client, ipa_ep_idx);
+
+	IPA_MPM_FUNC_ENTRY();
+
+	ev_ring_sz = IPA_MPM_RING_TOTAL_SIZE;
+	ev_ring = kzalloc(ev_ring_sz, GFP_KERNEL);
+	if (!ev_ring)
+		goto fail_evt_alloc;
+
+	tr_ring_sz = IPA_MPM_RING_TOTAL_SIZE;
+	tr_ring = kzalloc(tr_ring_sz, GFP_KERNEL);
+	if (!tr_ring)
+		goto fail_tr_alloc;
+
+	tr_ring[0].re_type = MHIP_RE_NOP;
+
+	dir = IPA_CLIENT_IS_PROD(mhip_client) ?
+		DMA_TO_HIPA : DMA_FROM_HIPA;
+
+	/* allocate transfer ring elements */
+	for (i = 1, k = 0; i < IPA_MPM_RING_LEN; i++, k++) {
+		buff = kzalloc(TRE_BUFF_SIZE, GFP_KERNEL);
+
+		if (!buff)
+			goto fail_buff_alloc;
+
+		if (IPA_CLIENT_IS_PROD(mhip_client))
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff[k] =
+									buff;
+		else
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff[k] =
+									buff;
+
+		tr_ring[i].buffer_ptr =
+			ipa_mpm_smmu_map(buff, TRE_BUFF_SIZE, dir,
+				&ap_cb_iova);
+
+		if (!tr_ring[i].buffer_ptr)
+			goto fail_smmu_map_ring;
+
+		tr_ring[i].buff_len = TRE_BUFF_SIZE;
+		tr_ring[i].chain = 0;
+		tr_ring[i].ieob = 0;
+		tr_ring[i].ieot = 0;
+		tr_ring[i].bei = 0;
+		tr_ring[i].sct = 0;
+		tr_ring[i].re_type = MHIP_RE_XFER;
+
+		if (IPA_CLIENT_IS_PROD(mhip_client))
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[k] =
+				ap_cb_iova;
+		else
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[k] =
+				ap_cb_iova;
+	}
+
+	tr_ring_iova = ipa_mpm_smmu_map(tr_ring, tr_ring_sz, dir,
+		&ap_cb_iova);
+	if (!tr_ring_iova)
+		goto fail_smmu_map_ring;
+
+	ev_ring_iova = ipa_mpm_smmu_map(ev_ring, ev_ring_sz, dir,
+		&ap_cb_iova);
+	if (!ev_ring_iova)
+		goto fail_smmu_map_ring;
+
+	/* Store Producer channel rings */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		/* Device UL */
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = ev_ring;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = tr_ring;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa = ev_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa = tr_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr =
+			ap_cb_iova;
+	} else {
+		/* Host UL */
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = ev_ring;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = tr_ring;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = ev_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = tr_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr =
+			ap_cb_iova;
+	}
+
+	memset(&gsi_params, 0, sizeof(struct ipa_request_gsi_channel_params));
+
+	if (IPA_CLIENT_IS_PROD(mhip_client))
+		gsi_params.ipa_ep_cfg =
+		ipa_mpm_pipes[mhi_idx].dl_cons.ep_cfg;
+	else
+		gsi_params.ipa_ep_cfg =
+		ipa_mpm_pipes[mhi_idx].ul_prod.ep_cfg;
+
+	gsi_params.client = mhip_client;
+	gsi_params.skip_ep_cfg = false;
+
+	/*
+	 * RP update address = Device channel DB address
+	 * CLIENT_PROD -> Host DL
+	 * CLIENT_CONS -> Host UL
+	 */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		gsi_params.evt_ring_params.rp_update_addr =
+			ipa_mpm_smmu_map_doorbell(
+			MHIP_SMMU_DOMAIN_IPA,
+			ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.device_db);
+		if (gsi_params.evt_ring_params.rp_update_addr == 0)
+			goto fail_smmu_map_db;
+		gsi_params.evt_ring_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
+		gsi_params.chan_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
+	} else {
+		gsi_params.evt_ring_params.rp_update_addr =
+			ipa_mpm_smmu_map_doorbell(
+			MHIP_SMMU_DOMAIN_IPA,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.device_db);
+		if (gsi_params.evt_ring_params.rp_update_addr == 0)
+			goto fail_smmu_map_db;
+		gsi_params.evt_ring_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
+		gsi_params.chan_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
+	}
+
+	/* Fill Event ring params */
+	gsi_params.evt_ring_params.intf = GSI_EVT_CHTYPE_MHIP_EV;
+	gsi_params.evt_ring_params.intr = GSI_INTR_MSI;
+	gsi_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	gsi_params.evt_ring_params.ring_len =
+		(IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
+	gsi_params.evt_ring_params.ring_base_vaddr = NULL;
+	gsi_params.evt_ring_params.int_modt = 0;
+	gsi_params.evt_ring_params.int_modc = 0;
+	gsi_params.evt_ring_params.intvec = 0;
+	gsi_params.evt_ring_params.msi_addr = 0;
+	gsi_params.evt_ring_params.exclusive = true;
+	gsi_params.evt_ring_params.err_cb = ipa_mpm_gsi_evt_ring_err_cb;
+	gsi_params.evt_ring_params.user_data = NULL;
+
+	/* Evt Scratch Params */
+	/* Disable the Moderation for ringing doorbells */
+	gsi_params.evt_scratch.mhip.rp_mod_threshold = 1;
+	gsi_params.evt_scratch.mhip.rp_mod_timer = 0;
+	gsi_params.evt_scratch.mhip.rp_mod_counter = 0;
+	gsi_params.evt_scratch.mhip.rp_mod_timer_id = 0;
+	gsi_params.evt_scratch.mhip.rp_mod_timer_running = 0;
+	gsi_params.evt_scratch.mhip.fixed_buffer_sz = TRE_BUFF_SIZE;
+
+	if (IPA_CLIENT_IS_PROD(mhip_client))
+		gsi_params.evt_scratch.mhip.rp_mod_threshold = 4;
+
+	/* Channel Params */
+	gsi_params.chan_params.prot = GSI_CHAN_PROT_MHIP;
+	gsi_params.chan_params.dir = IPA_CLIENT_IS_PROD(mhip_client) ?
+		GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
+	/* chan_id is set in ipa3_request_gsi_channel() */
+	gsi_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
+	gsi_params.chan_params.ring_len =
+		(IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
+	gsi_params.chan_params.ring_base_vaddr = NULL;
+	gsi_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	gsi_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_params.chan_params.low_weight = 1;
+	gsi_params.chan_params.xfer_cb = NULL;
+	gsi_params.chan_params.err_cb = ipa_mpm_gsi_chan_err_cb;
+	gsi_params.chan_params.chan_user_data = NULL;
+
+	/* Channel scratch */
+	gsi_params.chan_scratch.mhip.assert_bit_40 = 0;
+	gsi_params.chan_scratch.mhip.host_channel = 1;
+
+	res = ipa3_request_gsi_channel(&gsi_params, out_params);
+	if (res) {
+		IPA_MPM_ERR("failed to allocate GSI channel res=%d\n", res);
+		goto fail_alloc_channel;
+	}
+
+	ipa_mpm_change_gsi_state(mhi_idx, GSI_ALLOCATED);
+
+	result = ipa3_start_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPA_MPM_ERR("start MHIP channel %d failed\n", mhip_client);
+		ipa_mpm_ctx->md[mhi_idx].gsi_state = GSI_ERR;
+		goto fail_start_channel;
+	}
+	ipa_mpm_change_gsi_state(mhi_idx, GSI_STARTED);
+
+	/* Fill in the Device Context params */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		/* This is the DL channel :: Device -> Host */
+		ipa_mpm_ctx->md[mhi_idx].dl_cons.evt_props.ev_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
+		ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.ch_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
+	} else {
+		ipa_mpm_ctx->md[mhi_idx].ul_prod.evt_props.ev_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.ch_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+
+	return 0;
+
+fail_start_channel:
+	ipa3_disable_data_path(ipa_ep_idx);
+	ipa3_stop_gsi_channel(ipa_ep_idx);
+fail_alloc_channel:
+	ipa3_release_gsi_channel(ipa_ep_idx);
+fail_smmu_map_db:
+fail_smmu_map_ring:
+fail_tr_alloc:
+fail_evt_alloc:
+fail_buff_alloc:
+	ipa_assert();
+fail_gen:
+	return -EFAULT;
+}
+
+void ipa_mpm_clean_mhip_chan(int mhi_idx, enum ipa_client_type mhip_client)
+{
+	int dir;
+	int i;
+	int result;
+	int ipa_ep_idx;
+	struct mhi_p_desc *ev_ring;
+	struct mhi_p_desc *tr_ring;
+	int tr_ring_sz, ev_ring_sz;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (mhip_client == IPA_CLIENT_MAX)
+		return;
+	if (mhi_idx == IPA_MPM_MHIP_CH_ID_MAX)
+		return;
+
+	dir = IPA_CLIENT_IS_PROD(mhip_client) ?
+		DMA_TO_HIPA : DMA_FROM_HIPA;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPA_MPM_ERR("fail to find channel EP.\n");
+		return;
+	}
+
+	/* Release channel */
+	result = ipa3_release_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPA_MPM_ERR("start MHIP channel %d failed\n", mhip_client);
+		ipa_mpm_ctx->md[mhi_idx].gsi_state = GSI_ERR;
+	}
+
+	ipa_mpm_change_gsi_state(mhi_idx, GSI_INIT);
+
+
+	/* deallocate transfer ring buffers  */
+	for (i = 0; i < IPA_MPM_RING_LEN; i++) {
+		if (IPA_CLIENT_IS_PROD(mhip_client)) {
+			ipa_mpm_smmu_unmap(
+			(dma_addr_t)
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff[i],
+			TRE_BUFF_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]);
+
+			kfree(
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff[i]);
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff[i]
+								= NULL;
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]
+								= 0;
+		} else {
+			ipa_mpm_smmu_unmap(
+			(dma_addr_t)
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff[i],
+			TRE_BUFF_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
+			);
+
+			kfree(
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff[i]);
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff[i]
+								= NULL;
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
+								= 0;
+		}
+	}
+
+	tr_ring_sz = sizeof(*tr_ring) * (IPA_MPM_RING_LEN);
+	ev_ring_sz = sizeof(*ev_ring) * (IPA_MPM_RING_LEN);
+
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa,
+			ev_ring_sz, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er);
+
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa,
+			tr_ring_sz, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
+
+		kfree(ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va);
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = NULL;
+
+		kfree(ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va);
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = NULL;
+
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er = 0;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr = 0;
+	} else {
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa,
+			ev_ring_sz, dir,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er);
+
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa,
+			tr_ring_sz, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
+
+		kfree(ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va);
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = NULL;
+
+		kfree(ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va);
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = NULL;
+
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er = 0;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr = 0;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+}
+
+/* round addresses for closest page per SMMU requirements */
+static inline void ipa_mpm_smmu_round_to_page(uint64_t iova, uint64_t pa,
+	uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p)
+{
+	*iova_p = rounddown(iova, PAGE_SIZE);
+	*pa_p = rounddown(pa, PAGE_SIZE);
+	*size_p = roundup(size + pa - *pa_p, PAGE_SIZE);
+}
+
+
+static int __ipa_mpm_configure_mhi_device(struct ipa_mpm_channel *ch,
+	int mhi_idx, int dir)
+{
+	struct mhi_buf ch_config[2];
+	int ret;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (ch == NULL) {
+		IPA_MPM_ERR("ch config is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Populate CCA */
+	ch_config[0].buf = &ch->chan_props.ch_ctx;
+	ch_config[0].len = sizeof(ch->chan_props.ch_ctx);
+	ch_config[0].name = "CCA";
+
+	/* populate ECA */
+	ch_config[1].buf = &ch->evt_props.ev_ctx;
+	ch_config[1].len = sizeof(ch->evt_props.ev_ctx);
+	ch_config[1].name = "ECA";
+
+	IPA_MPM_DBG("Configuring MHI PRIME device for mhi_idx %d\n", mhi_idx);
+
+	ret = mhi_device_configure(ipa_mpm_ctx->md[mhi_idx].mhi_dev, dir,
+			ch_config, 2);
+	if (ret) {
+		IPA_MPM_ERR("mhi_device_configure failed\n");
+		return -EINVAL;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+
+	return 0;
+}
+
+static void ipa_mpm_mhip_shutdown(void)
+{
+	int mhip_idx;
+	enum ipa_client_type ul_chan, dl_chan;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
+		if (ipa_mpm_ctx->md[mhip_idx].gsi_state >= GSI_ALLOCATED) {
+			get_ipa3_client(mhip_idx, &ul_chan, &dl_chan);
+			IPA_MPM_DBG("Stopping chan = %d\n", mhip_idx);
+			/* MHIP PROD: Enable HOLB and Stop the GSI UL channel */
+			ipa_mpm_start_stop_mhip_data_path(mhip_idx, STOP);
+			ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+							mhip_idx, STOP);
+			ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_DL,
+							mhip_idx, STOP);
+
+			/* Clean up the GSI UL and DL channels */
+			if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+				ipa_mpm_ctx->dev_info.pcie_smmu_enabled) {
+				IPA_MPM_DBG("Cleaning SMMU entries..\n");
+			}
+
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, mhip_idx);
+			ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
+			if (ul_chan != IPA_CLIENT_MAX)
+				ipa_mpm_clean_mhip_chan(mhip_idx, ul_chan);
+			if (dl_chan != IPA_CLIENT_MAX)
+				ipa_mpm_clean_mhip_chan(mhip_idx, dl_chan);
+		}
+	}
+	IPA_MPM_FUNC_EXIT();
+}
+
+/*
+ * Turning on/OFF PCIE Clock is done once for all clients.
+ * Always vote for Probe_ID 0 as a standard.
+ */
+static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id)
+{
+	int result = 0;
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("probe_id not found\n");
+		return -EINVAL;
+	}
+
+	if (vote > CLK_OFF) {
+		IPA_MPM_ERR("Invalid vote\n");
+		return -EINVAL;
+	}
+
+	if (ipa_mpm_ctx->md[probe_id].mhi_dev == NULL) {
+		IPA_MPM_ERR("MHI not initialized yet\n");
+		return 0;
+	}
+	if (vote == CLK_ON) {
+		if (atomic_read(&ipa_mpm_ctx->pcie_clk_ref_cnt) == 0) {
+			result = mhi_device_get_sync(
+				ipa_mpm_ctx->md[probe_id].mhi_dev);
+			if (result) {
+				IPA_MPM_ERR("mhi_sync_get failed %d\n",
+					result);
+				return result;
+			}
+			IPA_MPM_DBG("PCIE clock now ON\n");
+		}
+		atomic_inc(&ipa_mpm_ctx->pcie_clk_ref_cnt);
+	} else {
+		if ((atomic_read(&ipa_mpm_ctx->pcie_clk_ref_cnt) == 1)) {
+			mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev);
+			IPA_MPM_DBG("PCIE clock off ON\n");
+		}
+		atomic_dec(&ipa_mpm_ctx->pcie_clk_ref_cnt);
+	}
+
+	return result;
+}
+
+/*
+ * Turning on/OFF IPA Clock is done only once- for all clients
+ */
+static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote)
+{
+	if (vote > CLK_OFF)
+		return;
+
+	if (vote == CLK_ON) {
+		if (!atomic_read(&ipa_mpm_ctx->ipa_clk_ref_cnt)) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("ipa_mpm");
+			IPA_MPM_DBG("IPA clock now ON\n");
+		}
+		atomic_inc(&ipa_mpm_ctx->ipa_clk_ref_cnt);
+	} else {
+		if (atomic_read(&ipa_mpm_ctx->ipa_clk_ref_cnt) == 1) {
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("ipa_mpm");
+			IPA_MPM_DBG("IPA clock now OFF\n");
+		}
+		atomic_dec(&ipa_mpm_ctx->ipa_clk_ref_cnt);
+	}
+}
+
+static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
+	enum ipa_mpm_mhip_chan mhip_chan,
+	int probe_id,
+	enum ipa_mpm_start_stop_type start_stop)
+{
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	bool is_start;
+	enum ipa_client_type ul_chan, dl_chan;
+	u32 source_pipe_bitmask = 0;
+	enum gsi_status gsi_res = GSI_STATUS_SUCCESS;
+	int result;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (mhip_chan > IPA_MPM_MHIP_CHAN_BOTH) {
+		IPA_MPM_ERR("MHI not initialized yet\n");
+		return MHIP_STATUS_FAIL;
+	}
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("MHI not initialized yet\n");
+		return MHIP_STATUS_FAIL;
+	}
+
+	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
+
+	if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+	} else if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+		ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
+	} else if (mhip_chan == IPA_MPM_MHIP_CHAN_BOTH) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+		ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
+	}
+
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPA_MPM_ERR("fail to get EP# for idx %d\n", ipa_ep_idx);
+		return MHIP_STATUS_EP_NOT_FOUND;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	IPA_MPM_DBG("current GSI state = %d, action = %d\n",
+		ipa_mpm_ctx->md[probe_id].gsi_state, start_stop);
+
+	if (ipa_mpm_ctx->md[probe_id].gsi_state < GSI_ALLOCATED) {
+		IPA_MPM_ERR("GSI chan is not allocated yet..\n");
+		return MHIP_STATUS_EP_NOT_READY;
+	}
+
+	is_start = (start_stop == START) ? true : false;
+
+	if (is_start) {
+		if (ipa_mpm_ctx->md[probe_id].gsi_state == GSI_STARTED) {
+			IPA_MPM_ERR("GSI chan is already started\n");
+			return MHIP_STATUS_NO_OP;
+		}
+
+		/* Start GSI channel */
+		gsi_res = ipa3_start_gsi_channel(ipa_ep_idx);
+		if (gsi_res != GSI_STATUS_SUCCESS) {
+			IPA_MPM_ERR("Error starting channel: err = %d\n",
+					gsi_res);
+			goto gsi_chan_fail;
+		} else {
+			ipa_mpm_change_gsi_state(probe_id, GSI_STARTED);
+		}
+	} else {
+		if (ipa_mpm_ctx->md[probe_id].gsi_state == GSI_STOPPED) {
+			IPA_MPM_ERR("GSI chan is already stopped\n");
+			return MHIP_STATUS_NO_OP;
+		} else if (ipa_mpm_ctx->md[probe_id].gsi_state !=
+							GSI_STARTED) {
+			IPA_MPM_ERR("GSI chan is not previously started\n");
+			return MHIP_STATUS_BAD_STATE;
+		}
+
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+			source_pipe_bitmask = 1 <<
+				ipa3_get_ep_mapping(ep->client);
+
+			/* First Stop UL GSI channel before unvote PCIe clock */
+			result = ipa3_stop_gsi_channel(ipa_ep_idx);
+
+			if (result) {
+				IPA_MPM_ERR("UL chan stop failed\n");
+				goto gsi_chan_fail;
+			} else {
+				ipa_mpm_change_gsi_state(probe_id,
+							GSI_STARTED);
+			}
+		}
+
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+			result = ipa3_stop_gsi_channel(ipa_ep_idx);
+			if (result) {
+				IPA_MPM_ERR("Fail to stop DL channel\n");
+				goto gsi_chan_fail;
+			} else {
+				ipa_mpm_change_gsi_state(probe_id, GSI_STOPPED);
+			}
+		}
+	}
+	IPA_MPM_FUNC_EXIT();
+
+	return MHIP_STATUS_SUCCESS;
+gsi_chan_fail:
+	ipa3_disable_data_path(ipa_ep_idx);
+	ipa_mpm_change_gsi_state(probe_id, GSI_ERR);
+	ipa_assert();
+
+	return MHIP_STATUS_FAIL;
+}
+
+int ipa_mpm_notify_wan_state(void)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	static enum mhip_status_type status;
+	int ret = 0;
+	enum ipa_client_type ul_chan, dl_chan;
+	enum ipa_mpm_mhip_client_type mhip_client = IPA_MPM_MHIP_TETH;
+
+	if (!ipa3_is_mhip_offload_enabled())
+		return -EPERM;
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return -EPERM;
+	}
+
+	IPA_MPM_DBG("WAN backhaul available for probe_id = %d\n", probe_id);
+	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
+
+	/* Start UL MHIP channel for offloading the tethering connection */
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
+
+	if (ret) {
+		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
+		return ret;
+	}
+
+	status = ipa_mpm_start_stop_mhip_chan(
+				IPA_MPM_MHIP_CHAN_UL, probe_id, START);
+	switch (status) {
+	case MHIP_STATUS_SUCCESS:
+	case MHIP_STATUS_NO_OP:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
+		ret = ipa_mpm_start_stop_mhip_data_path(probe_id, START);
+
+		if (ret) {
+			IPA_MPM_ERR("Couldnt start UL GSI channel");
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+			return ret;
+		}
+
+		if (status == MHIP_STATUS_NO_OP) {
+			/* Channels already have been started,
+			 * we can devote for pcie clocks
+			 */
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		}
+		break;
+	case MHIP_STATUS_EP_NOT_READY:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INPROGRESS);
+		break;
+	case MHIP_STATUS_FAIL:
+	case MHIP_STATUS_BAD_STATE:
+	case MHIP_STATUS_EP_NOT_FOUND:
+		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		ret = -EFAULT;
+		break;
+	default:
+		IPA_MPM_ERR("Err not found\n");
+		break;
+	}
+
+	return ret;
+}
+
+static void ipa_mpm_change_gsi_state(int probe_id,
+	enum ipa_mpm_gsi_state next_state)
+{
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX)
+		return;
+
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mutex);
+	ipa_mpm_ctx->md[probe_id].gsi_state = next_state;
+	IPA_MPM_DBG("GSI next_state = %d\n",
+		ipa_mpm_ctx->md[probe_id].gsi_state);
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mutex);
+}
+
+static void ipa_mpm_change_teth_state(int probe_id,
+	enum ipa_mpm_teth_state next_state)
+{
+	enum ipa_mpm_teth_state curr_state;
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return;
+	}
+
+	curr_state = ipa_mpm_ctx->md[probe_id].teth_state;
+
+	IPA_MPM_DBG("curr_state = %d, ip_state = %d mhip_s\n",
+		curr_state, next_state);
+
+	switch (curr_state) {
+	case IPA_MPM_TETH_INIT:
+		if (next_state == IPA_MPM_TETH_CONNECTED)
+			next_state = IPA_MPM_TETH_INPROGRESS;
+		break;
+	case IPA_MPM_TETH_INPROGRESS:
+		break;
+	case IPA_MPM_TETH_CONNECTED:
+		break;
+	default:
+		IPA_MPM_ERR("No change in state\n");
+		break;
+	}
+
+	ipa_mpm_ctx->md[probe_id].teth_state = next_state;
+	IPA_MPM_DBG("next_state = %d\n", next_state);
+}
+
+static void ipa_mpm_read_channel(enum ipa_client_type chan)
+{
+	struct gsi_chan_info chan_info;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	int res;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(chan);
+
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("failed to get idx");
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	IPA_MPM_ERR("Reading channel for chan %d, ep = %d, gsi_chan_hdl = %d\n",
+		chan, ep, ep->gsi_chan_hdl);
+
+	res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
+	if (res)
+		IPA_MPM_ERR("Reading of channel failed for ep %d\n", ep);
+}
+
+static int ipa_mpm_start_stop_mhip_data_path(int probe_id,
+	enum ipa_mpm_start_stop_type start)
+{
+	int ipa_ep_idx;
+	int res = 0;
+	enum ipa_client_type ul_chan, dl_chan;
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return 0;
+	}
+	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
+	IPA_MPM_DBG("Start/Stop Data Path ? = %d\n", start);
+
+	/* Defensive check to make sure start/stop MHIP channels only if
+	 *  MHIP channels are allocated.
+	 */
+
+	if (ipa_mpm_ctx->md[probe_id].gsi_state < GSI_ALLOCATED) {
+		IPA_MPM_ERR("Cant start/stop data, GSI state = %d\n",
+			ipa_mpm_ctx->md[probe_id].gsi_state);
+		return -EFAULT;
+	}
+
+	/* MHIP Start Data path:
+	 * IPA MHIP Producer: remove HOLB
+	 * IPA MHIP Consumer : no op as there is no delay on these pipes.
+	 */
+	if (start) {
+		IPA_MPM_DBG("Enabling data path\n");
+		if (ul_chan != IPA_CLIENT_MAX) {
+			/* Remove HOLB on the producer pipe */
+			IPA_MPM_DBG("Removing HOLB on ep = %s\n",
+				__stringify(ul_chan));
+			ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+
+			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+				IPAERR("failed to get idx");
+				return ipa_ep_idx;
+			}
+
+			res = ipa3_enable_data_path(ipa_ep_idx);
+			if (res)
+				IPA_MPM_ERR("Enable data path failed res=%d\n",
+					res);
+		}
+	} else {
+		IPA_MPM_DBG("Disabling data path\n");
+		if (ul_chan != IPA_CLIENT_MAX) {
+			/* Set HOLB on the producer pipe */
+			ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+
+			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+				IPAERR("failed to get idx");
+				return ipa_ep_idx;
+			}
+
+			res = ipa3_disable_data_path(ipa_ep_idx);
+			if (res)
+				IPA_MPM_ERR("disable data path failed res=%d\n",
+					res);
+		}
+	}
+
+	return res;
+}
+
+/* ipa_mpm_mhi_probe_cb is received for each MHI'/MHI channel
+ * Currently we have 4 MHI channels.
+ */
+static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
+	const struct mhi_device_id *mhi_id)
+{
+	struct ipa_mpm_channel *ch;
+	int ret;
+	enum ipa_client_type ul_prod, dl_cons;
+	int probe_id;
+	struct ipa_req_chan_out_params ul_out_params, dl_out_params;
+	void __iomem  *db_addr;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
+	u32 wp_addr;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("ipa_mpm_ctx is NULL not expected, returning..\n");
+		return -ENOMEM;
+	}
+
+	probe_id = get_idx_from_id(mhi_id);
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("chan=%s is not supported for now\n", mhi_id);
+		return -EPERM;
+	}
+
+	if (ipa_mpm_ctx->md[probe_id].init_complete) {
+		IPA_MPM_ERR("Probe initialization already done, returning\n");
+		return -EPERM;
+	}
+
+	IPA_MPM_DBG("Received probe for id=%d\n", probe_id);
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_2) {
+		/* NOTE :: DPL not supported yet , remove later */
+		IPA_MPM_DBG("DPL not supported yet - returning for DPL..\n");
+		return 0;
+	}
+
+	get_ipa3_client(probe_id, &ul_prod, &dl_cons);
+
+	/* Vote for IPA clock for first time in initialization seq.
+	 * IPA clock will be devoted when MHI enters LPM
+	 * PCIe clock will be voted / devoted with every channel probe
+	 * we receive.
+	 * ul_prod = Host -> Device
+	 * dl_cons = Device -> Host
+	 */
+	ipa_mpm_ctx->md[probe_id].mhi_dev = mhi_dev;
+
+	ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
+	ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
+	/* NOTE :: Duplicate IPA vote - just for BU, remove later */
+	ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
+
+	IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
+
+	/*
+	 * Set up MHI' pipes for Device IPA filling in
+	 * Channel Context and Event Context.
+	 * These params will be sent to Device side.
+	 * UL CHAN = HOST -> Device
+	 * DL CHAN = Device -> HOST
+	 * per channel a TRE and EV is allocated.
+	 * for a UL channel -
+	 * IPA HOST PROD TRE -> IPA DEVICE CONS EV
+	 * IPA HOST PROD EV ->  IPA DEVICE CONS TRE
+	 * for a DL channel -
+	 * IPA Device PROD TRE -> IPA HOST CONS EV
+	 * IPA Device PROD EV ->  IPA HOST CONS TRE
+	 */
+	if (probe_id != IPA_MPM_MHIP_CH_ID_2) {
+		if (ul_prod != IPA_CLIENT_MAX) {
+			/* store UL properties */
+			ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
+			/* Store Channel properties */
+			ch->chan_props.id = mhi_dev->ul_chan_id;
+			ch->chan_props.device_db =
+				ipa_mpm_ctx->dev_info.chdb_base +
+				ch->chan_props.id * 8;
+			/* Fill Channel Conext to be sent to Device side */
+			ch->chan_props.ch_ctx.chtype =
+				IPA_MPM_MHI_HOST_UL_CHANNEL;
+			ch->chan_props.ch_ctx.erindex =
+				mhi_dev->ul_event_id;
+			ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
+				GSI_EVT_RING_RE_SIZE_16B;
+			/* Store Event properties */
+			ch->evt_props.ev_ctx.update_rp_modc = 0;
+			ch->evt_props.ev_ctx.update_rp_intmodt = 0;
+			ch->evt_props.ev_ctx.ertype = 1;
+			ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
+				GSI_EVT_RING_RE_SIZE_16B;
+			ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
+			ch->evt_props.device_db =
+				ipa_mpm_ctx->dev_info.erdb_base +
+				ch->chan_props.ch_ctx.erindex * 8;
+		}
+	}
+	if (dl_cons != IPA_CLIENT_MAX) {
+		/* store DL channel properties */
+		ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
+		/* Store Channel properties */
+		ch->chan_props.id = mhi_dev->dl_chan_id;
+		ch->chan_props.device_db =
+			ipa_mpm_ctx->dev_info.chdb_base +
+			ch->chan_props.id * 8;
+		/* Fill Channel Conext to be be sent to Dev side */
+		ch->chan_props.ch_ctx.chstate = 1;
+		ch->chan_props.ch_ctx.chtype =
+			IPA_MPM_MHI_HOST_DL_CHANNEL;
+		ch->chan_props.ch_ctx.erindex = mhi_dev->dl_event_id;
+		ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		/* Store Event properties */
+		ch->evt_props.ev_ctx.update_rp_modc = 0;
+		ch->evt_props.ev_ctx.update_rp_intmodt = 0;
+		ch->evt_props.ev_ctx.ertype = 1;
+		ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
+		ch->evt_props.device_db =
+			ipa_mpm_ctx->dev_info.erdb_base +
+			ch->chan_props.ch_ctx.erindex * 8;
+	}
+	/* connect Host GSI pipes with MHI' protocol */
+	if (probe_id != IPA_MPM_MHIP_CH_ID_2)  {
+		ret = ipa_mpm_connect_mhip_gsi_pipe(ul_prod,
+			probe_id, &ul_out_params);
+		if (ret) {
+			IPA_MPM_ERR("failed connecting MPM client %d\n",
+					ul_prod);
+			goto fail_gsi_setup;
+		}
+	}
+	ret = ipa_mpm_connect_mhip_gsi_pipe(dl_cons, probe_id, &dl_out_params);
+	if (ret) {
+		IPA_MPM_ERR("connecting MPM client = %d failed\n",
+			dl_cons);
+		goto fail_gsi_setup;
+	}
+	if (probe_id != IPA_MPM_MHIP_CH_ID_2)  {
+		if (ul_prod != IPA_CLIENT_MAX) {
+			ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
+			ch->evt_props.ev_ctx.update_rp_addr =
+				ipa_mpm_smmu_map_doorbell(
+					MHIP_SMMU_DOMAIN_PCIE,
+					ul_out_params.db_reg_phs_addr_lsb);
+
+			if (ch->evt_props.ev_ctx.update_rp_addr == 0)
+				ipa_assert();
+
+			ret = __ipa_mpm_configure_mhi_device(
+					ch, probe_id, DMA_TO_HIPA);
+			if (ret) {
+				IPA_MPM_ERR("configure_mhi_dev fail %d\n",
+						ret);
+				goto fail_smmu;
+			}
+		}
+	}
+
+	if (dl_cons != IPA_CLIENT_MAX) {
+		ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
+		ch->evt_props.ev_ctx.update_rp_addr =
+			ipa_mpm_smmu_map_doorbell(
+					MHIP_SMMU_DOMAIN_PCIE,
+					dl_out_params.db_reg_phs_addr_lsb);
+
+		if (ch->evt_props.ev_ctx.update_rp_addr == 0)
+			ipa_assert();
+
+		ret = __ipa_mpm_configure_mhi_device(ch, probe_id,
+					DMA_FROM_HIPA);
+		if (ret) {
+			IPA_MPM_ERR("mpm_config_mhi_dev failed %d\n", ret);
+			goto fail_smmu;
+		}
+	}
+
+	ret = mhi_prepare_for_transfer(ipa_mpm_ctx->md[probe_id].mhi_dev);
+	if (ret) {
+		IPA_MPM_ERR("mhi_prepare_for_transfer failed %d\n", ret);
+		goto fail_smmu;
+	}
+
+	/*
+	 * Ring initial channel db - Host Side UL and Device side DL channel.
+	 * To ring doorbell, write "WP" into doorbell register.
+	 * This WP should be set to 1 element less than ring max.
+	 */
+
+	/* Ring UL PRODUCER TRANSFER RING (HOST IPA -> DEVICE IPA) Doorbell */
+	if (ul_prod != IPA_CLIENT_MAX) {
+		IPA_MPM_DBG("Host UL TR PA DB = 0X%0x\n",
+			ul_out_params.db_reg_phs_addr_lsb);
+
+		db_addr = ioremap(
+			(phys_addr_t)(ul_out_params.db_reg_phs_addr_lsb), 4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
+
+		iowrite32(wp_addr, db_addr);
+
+		IPA_MPM_DBG("Host UL TR  DB = 0X%0x, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+
+		iounmap(db_addr);
+		ipa_mpm_read_channel(ul_prod);
+	}
+
+	/* Ring UL PRODUCER EVENT RING (HOST IPA -> DEVICE IPA) Doorbell
+	 * Ring the event DB to a value outside the
+	 * ring range such that rp and wp never meet.
+	 */
+	if (ul_prod != IPA_CLIENT_MAX) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+		if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+			IPA_MPM_ERR("fail to alloc EP.\n");
+			goto fail_start_channel;
+		}
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+		IPA_MPM_DBG("for ep_idx %d , gsi_evt_ring_hdl = %d\n",
+			ipa_ep_idx, ep->gsi_evt_ring_hdl);
+		gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
+			&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+
+		IPA_MPM_DBG("Host UL ER PA DB = 0X%0x\n",
+			evt_ring_db_addr_low);
+
+		db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.er_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+		IPA_MPM_DBG("Host UL ER  DB = 0X%0x, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+
+		iowrite32(wp_addr, db_addr);
+		iounmap(db_addr);
+	}
+
+	/* Ring DEVICE IPA DL CONSUMER Event Doorbell */
+	if (ul_prod != IPA_CLIENT_MAX) {
+		db_addr = ioremap((phys_addr_t)
+			(ipa_mpm_ctx->md[probe_id].ul_prod.evt_props.device_db),
+			4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+
+		iowrite32(wp_addr, db_addr);
+		iounmap(db_addr);
+	}
+
+	/* Ring DL PRODUCER (DEVICE IPA -> HOST IPA) Doorbell */
+	if (dl_cons != IPA_CLIENT_MAX) {
+		db_addr = ioremap((phys_addr_t)
+		(ipa_mpm_ctx->md[probe_id].dl_cons.chan_props.device_db),
+		4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
+
+		IPA_MPM_DBG("Device DL TR  DB = 0X%0X, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+
+		iowrite32(wp_addr, db_addr);
+
+		iounmap(db_addr);
+	}
+
+	/*
+	 * Ring event ring DB on Device side.
+	 * ipa_mpm should ring the event DB to a value outside the
+	 * ring range such that rp and wp never meet.
+	 */
+	if (dl_cons != IPA_CLIENT_MAX) {
+		db_addr =
+		ioremap(
+		(phys_addr_t)
+		(ipa_mpm_ctx->md[probe_id].dl_cons.evt_props.device_db),
+		4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.er_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+
+		iowrite32(wp_addr, db_addr);
+		IPA_MPM_DBG("Device  UL ER  DB = 0X%0X,wp_addr = 0X%0x",
+			db_addr, wp_addr);
+		iounmap(db_addr);
+	}
+
+	/* Ring DL EVENT RING CONSUMER (DEVICE IPA CONSUMER) Doorbell */
+	if (dl_cons != IPA_CLIENT_MAX) {
+		ipa_ep_idx = ipa3_get_ep_mapping(dl_cons);
+		if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+			IPA_MPM_ERR("fail to alloc EP.\n");
+			goto fail_start_channel;
+		}
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+		gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
+			&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+		IPA_MPM_DBG("Host DL ER PA DB = 0X%0x\n",
+				evt_ring_db_addr_low);
+		db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+		iowrite32(wp_addr, db_addr);
+		IPA_MPM_DBG("Host  DL ER  DB = 0X%0X, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+		iounmap(db_addr);
+	}
+
+	/* Check if TETH connection is in progress, no op
+	 * if no then Stop UL channel.
+	 */
+	switch (ipa_mpm_ctx->md[probe_id].teth_state) {
+	case IPA_MPM_TETH_INIT:
+		/* No teth started yet, disable UL channel */
+		ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+						probe_id, STOP);
+
+		/* Disable data path */
+		if (ipa_mpm_start_stop_mhip_data_path(probe_id, STOP)) {
+			IPA_MPM_ERR("MHIP Enable data path failed\n");
+			goto fail_start_channel;
+		}
+		break;
+	case IPA_MPM_TETH_INPROGRESS:
+	case IPA_MPM_TETH_CONNECTED:
+		IPA_MPM_DBG("UL channel is already started, continue\n");
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
+
+		/* Enable data path */
+		if (ipa_mpm_start_stop_mhip_data_path(probe_id, START)) {
+			IPA_MPM_ERR("MHIP Enable data path failed\n");
+			goto fail_start_channel;
+		}
+
+		/* Lyft the delay for rmnet USB prod pipe */
+		ipa3_set_reset_client_prod_pipe_delay(false,
+			IPA_CLIENT_USB_PROD);
+		break;
+	default:
+		IPA_MPM_DBG("No op for UL channel, in teth state = %d");
+		break;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+
+fail_gsi_setup:
+fail_start_channel:
+fail_smmu:
+	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
+		IPA_MPM_DBG("SMMU failed\n");
+	ipa_assert();
+	return ret;
+}
+
+static void ipa_mpm_init_mhip_channel_info(void)
+{
+	/* IPA_MPM_MHIP_CH_ID_0 => MHIP TETH PIPES  */
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ipa_client =
+		IPA_CLIENT_MHI_PRIME_TETH_PROD;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ep_cfg =
+		mhip_dl_teth_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ipa_client =
+		IPA_CLIENT_MHI_PRIME_TETH_CONS;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ep_cfg =
+		mhip_ul_teth_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].mhip_client =
+		IPA_MPM_MHIP_TETH;
+
+	/* IPA_MPM_MHIP_CH_ID_1 => MHIP RMNET PIPES */
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ipa_client =
+		IPA_CLIENT_MHI_PRIME_RMNET_PROD;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ep_cfg =
+		mhip_dl_rmnet_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ipa_client =
+		IPA_CLIENT_MHI_PRIME_RMNET_CONS;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ep_cfg =
+		mhip_ul_rmnet_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].mhip_client =
+		IPA_MPM_MHIP_USB_RMNET;
+
+	/* IPA_MPM_MHIP_CH_ID_2 => MHIP ADPL PIPE */
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ipa_client =
+		IPA_CLIENT_MHI_PRIME_DPL_PROD;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ep_cfg =
+		mhip_dl_dpl_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].ul_prod.ipa_client =
+		IPA_CLIENT_MAX;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].mhip_client =
+	IPA_MPM_MHIP_USB_DPL;
+}
+
+static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
+{
+	IPA_MPM_FUNC_ENTRY();
+	ipa_mpm_mhip_shutdown();
+	IPA_MPM_FUNC_EXIT();
+}
+
+static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
+				enum MHI_CB mhi_cb)
+{
+	int mhip_idx;
+	enum mhip_status_type status;
+
+	IPA_MPM_DBG("%d\n", mhi_cb);
+
+	for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
+		if (mhi_dev == ipa_mpm_ctx->md[mhip_idx].mhi_dev)
+			break;
+	}
+	if (mhip_idx >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_DBG("ignoring secondary callbacks\n");
+		return;
+	}
+	switch (mhi_cb) {
+	case MHI_CB_IDLE:
+		break;
+	case MHI_CB_LPM_ENTER:
+		status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_DL,
+							mhip_idx, STOP);
+		IPA_MPM_DBG("status = %d\n", status);
+		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
+		break;
+	case MHI_CB_LPM_EXIT:
+		status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_DL,
+							mhip_idx, START);
+		ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
+		break;
+	case MHI_CB_EE_RDDM:
+	case MHI_CB_PENDING_DATA:
+	case MHI_CB_SYS_ERROR:
+	case MHI_CB_FATAL_ERROR:
+		IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
+		break;
+	}
+}
+
+int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe)
+{
+	int result = 0;
+	struct ipa_ep_cfg ep_cfg = { { 0 } };
+
+	IPA_MPM_FUNC_ENTRY();
+	IPA_MPM_DBG("DMA from %d to %d\n", src_pipe, dst_pipe);
+
+	/* Set USB PROD PIPE DMA to MHIP PROD PIPE */
+	ep_cfg.mode.mode = IPA_DMA;
+	ep_cfg.mode.dst = dst_pipe;
+	ep_cfg.seq.set_dynamic = true;
+
+	result = ipa_cfg_ep(ipa_get_ep_mapping(src_pipe), &ep_cfg);
+	IPA_MPM_FUNC_EXIT();
+
+	return result;
+}
+
+int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe)
+{
+	int result = 0;
+	struct ipa_ep_cfg ep_cfg = { { 0 } };
+
+	IPA_MPM_FUNC_ENTRY();
+	IPA_MPM_DBG("DMA from %d to %d\n", src_pipe, dst_pipe);
+
+	/* Set USB PROD PIPE DMA to MHIP PROD PIPE */
+	ep_cfg.mode.mode = IPA_BASIC;
+	ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
+	ep_cfg.seq.set_dynamic = true;
+
+	result = ipa_cfg_ep(ipa_get_ep_mapping(src_pipe), &ep_cfg);
+	IPA_MPM_FUNC_EXIT();
+
+	return result;
+}
+
+void ipa_mpm_mhip_map_prot(enum ipa_usb_teth_prot prot,
+	enum ipa_mpm_mhip_client_type *mhip_client)
+{
+	switch (prot) {
+	case IPA_USB_RNDIS:
+		*mhip_client = IPA_MPM_MHIP_TETH;
+		break;
+	case IPA_USB_RMNET:
+		*mhip_client = IPA_MPM_MHIP_USB_RMNET;
+		break;
+	case IPA_USB_DIAG:
+		*mhip_client = IPA_MPM_MHIP_USB_DPL;
+		break;
+	default:
+		*mhip_client = IPA_MPM_MHIP_NONE;
+		break;
+	}
+	IPA_MPM_DBG("Mapped xdci prot %d -> MHIP prot %d\n", prot,
+		*mhip_client);
+}
+
+int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	enum mhip_status_type status;
+	int ret = 0;
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("MPM not platform probed yet, returning ..\n");
+		return 0;
+	}
+
+	ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return 0;
+	}
+
+	IPA_MPM_DBG("Connect xdci prot %d -> mhip_client = %d probe_id = %d\n",
+		xdci_teth_prot, mhip_client, probe_id);
+
+	ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
+
+	switch (mhip_client) {
+	case IPA_MPM_MHIP_USB_RMNET:
+		ipa_mpm_set_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
+		break;
+	case IPA_MPM_MHIP_TETH:
+	case IPA_MPM_MHIP_USB_DPL:
+		IPA_MPM_DBG("Teth connecting for prot %d\n", mhip_client);
+		return 0;
+	default:
+		IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
+		ret = 0;
+		break;
+	}
+
+	/* Start UL MHIP channel for offloading the tethering connection */
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
+
+	if (ret) {
+		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
+		return ret;
+	}
+
+	status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+						probe_id, START);
+
+	switch (status) {
+	case MHIP_STATUS_SUCCESS:
+	case MHIP_STATUS_NO_OP:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
+		ipa_mpm_start_stop_mhip_data_path(probe_id, START);
+		/* Lift the delay for rmnet USB prod pipe */
+		ipa3_set_reset_client_prod_pipe_delay(false,
+			IPA_CLIENT_USB_PROD);
+		if (status == MHIP_STATUS_NO_OP) {
+			/* Channels already have been started,
+			 * we can devote for pcie clocks
+			 */
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		}
+		break;
+	case MHIP_STATUS_EP_NOT_READY:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INPROGRESS);
+		break;
+	case MHIP_STATUS_FAIL:
+	case MHIP_STATUS_BAD_STATE:
+	case MHIP_STATUS_EP_NOT_FOUND:
+		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		ret = -EFAULT;
+		break;
+	default:
+		IPA_MPM_ERR("Err not found\n");
+		break;
+	}
+	return ret;
+}
+
+int ipa_mpm_mhip_ul_data_stop(enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	int ret = 0;
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("MPM not platform probed, returning ..\n");
+		return 0;
+	}
+
+	ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Invalid probe_id\n");
+		return 0;
+	}
+
+	IPA_MPM_DBG("Map xdci prot %d to mhip_client = %d probe_id = %d\n",
+		xdci_teth_prot, mhip_client, probe_id);
+
+	ret = ipa_mpm_start_stop_mhip_data_path(probe_id, STOP);
+
+	if (ret)
+		IPA_MPM_ERR("Error stopping UL path, err = %d\n", ret);
+
+	return ret;
+}
+
+int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	enum mhip_status_type status;
+	int ret = 0;
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("MPM not platform probed, returning ..\n");
+		return 0;
+	}
+
+	ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Invalid probe_id\n");
+		return 0;
+	}
+
+	IPA_MPM_DBG("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
+			xdci_teth_prot, mhip_client, probe_id);
+
+	switch (mhip_client) {
+	case IPA_MPM_MHIP_USB_RMNET:
+		ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
+		break;
+	case IPA_MPM_MHIP_TETH:
+	case IPA_MPM_MHIP_USB_DPL:
+		IPA_MPM_DBG("Teth Disconnecting for prot %d\n", mhip_client);
+		return 0;
+	default:
+		IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
+		return 0;
+	}
+
+	status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+		probe_id, STOP);
+
+	switch (status) {
+	case MHIP_STATUS_SUCCESS:
+	case MHIP_STATUS_NO_OP:
+	case MHIP_STATUS_EP_NOT_READY:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
+		ipa_mpm_start_stop_mhip_data_path(probe_id, STOP);
+		break;
+	case MHIP_STATUS_FAIL:
+	case MHIP_STATUS_BAD_STATE:
+	case MHIP_STATUS_EP_NOT_FOUND:
+		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		ret = -EFAULT;
+		break;
+	default:
+		IPA_MPM_ERR("Err not found\n");
+		break;
+	}
+
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+
+	if (ret) {
+		IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n", ret);
+		return ret;
+	}
+
+	ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
+
+	return ret;
+}
+
+static int ipa_mpm_populate_smmu_info(struct platform_device *pdev)
+{
+	struct ipa_smmu_in_params smmu_in;
+	struct ipa_smmu_out_params smmu_out;
+	u32 carved_iova_ap_mapping[2];
+	struct ipa_smmu_cb_ctx *cb;
+	struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	int ret = 0;
+
+	if (ipa_mpm_ctx->carved_smmu_cb.valid) {
+		IPA_MPM_DBG("SMMU Context allocated, returning ..\n");
+		return ret;
+	}
+
+	cb = &ipa_mpm_ctx->carved_smmu_cb;
+
+	/* get IPA SMMU enabled status */
+	smmu_in.smmu_client = IPA_SMMU_AP_CLIENT;
+	if (ipa_get_smmu_params(&smmu_in, &smmu_out))
+		ipa_mpm_ctx->dev_info.ipa_smmu_enabled = false;
+	else
+		ipa_mpm_ctx->dev_info.ipa_smmu_enabled =
+		smmu_out.smmu_enable;
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iova-mapping",
+		carved_iova_ap_mapping, 2)) {
+		IPA_MPM_ERR("failed to read of_node %s\n",
+			"qcom,mpm-iova-mapping");
+		return -EINVAL;
+	}
+	ipa_mpm_ctx->dev_info.pcie_smmu_enabled = true;
+
+	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled !=
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) {
+		IPA_MPM_DBG("PCIE/IPA SMMU config mismatch\n");
+		return -EINVAL;
+	}
+
+	cb->va_start = carved_iova_ap_mapping[0];
+	cb->va_size = carved_iova_ap_mapping[1];
+	cb->va_end = cb->va_start + cb->va_size;
+
+	if (cb->va_start >= ap_cb->va_start && cb->va_start < ap_cb->va_end) {
+		IPA_MPM_ERR("MPM iommu and AP overlap addr 0x%lx\n",
+				cb->va_start);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	cb->dev = ipa_mpm_ctx->dev_info.dev;
+	cb->valid = true;
+	cb->next_addr = cb->va_start;
+
+	if (dma_set_mask_and_coherent(ipa_mpm_ctx->dev_info.dev,
+		DMA_BIT_MASK(64))) {
+		IPA_MPM_ERR("setting DMA mask to 64 failed.\n");
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int ipa_mpm_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int i = 0;
+	int idx = 0;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (ipa_mpm_ctx) {
+		IPA_MPM_DBG("MPM is already probed, returning\n");
+		return 0;
+	}
+
+	ret = ipa_register_ipa_ready_cb(ipa_mpm_ipa3_ready_cb, (void *)pdev);
+	/*
+	 * If we received -EEXIST, IPA has initialized. So we need
+	 * to continue the probing process.
+	 */
+	if (!ret) {
+		IPA_MPM_DBG("IPA not ready yet, registering callback\n");
+		return ret;
+	}
+	IPA_MPM_DBG("IPA is ready, continue with probe\n");
+
+	ipa_mpm_ctx = kzalloc(sizeof(*ipa_mpm_ctx), GFP_KERNEL);
+
+	if (!ipa_mpm_ctx)
+		return -ENOMEM;
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++)
+		mutex_init(&ipa_mpm_ctx->md[i].mutex);
+	ipa_mpm_ctx->dev_info.pdev = pdev;
+	ipa_mpm_ctx->dev_info.dev = &pdev->dev;
+
+	ipa_mpm_init_mhip_channel_info();
+
+	if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base",
+		&ipa_mpm_ctx->dev_info.chdb_base)) {
+		IPA_MPM_ERR("failed to read qcom,mhi-chdb-base\n");
+		goto fail_probe;
+	}
+	IPA_MPM_DBG("chdb-base=0x%x\n", ipa_mpm_ctx->dev_info.chdb_base);
+
+	if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base",
+		&ipa_mpm_ctx->dev_info.erdb_base)) {
+		IPA_MPM_ERR("failed to read qcom,mhi-erdb-base\n");
+		goto fail_probe;
+	}
+	IPA_MPM_DBG("erdb-base=0x%x\n", ipa_mpm_ctx->dev_info.erdb_base);
+
+	ret = ipa_mpm_populate_smmu_info(pdev);
+
+	if (ret) {
+		IPA_MPM_DBG("SMMU Config failed\n");
+		goto fail_probe;
+	}
+
+	atomic_set(&ipa_mpm_ctx->ipa_clk_ref_cnt, 0);
+	atomic_set(&ipa_mpm_ctx->pcie_clk_ref_cnt, 0);
+
+	for (idx = 0; idx < IPA_MPM_MHIP_CH_ID_MAX; idx++)
+		ipa_mpm_ctx->md[idx].gsi_state = GSI_INIT;
+
+	ret = mhi_driver_register(&mhi_driver);
+	if (ret) {
+		IPA_MPM_ERR("mhi_driver_register failed %d\n", ret);
+		goto fail_probe;
+	}
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+
+fail_probe:
+	kfree(ipa_mpm_ctx);
+	ipa_mpm_ctx = NULL;
+	return -EFAULT;
+}
+
+static int ipa_mpm_remove(struct platform_device *pdev)
+{
+	IPA_MPM_FUNC_ENTRY();
+
+	mhi_driver_unregister(&mhi_driver);
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+}
+
+static const struct of_device_id ipa_mpm_dt_match[] = {
+	{ .compatible = "qcom,ipa-mpm" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ipa_mpm_dt_match);
+
+static struct platform_driver ipa_ipa_mpm_driver = {
+	.driver = {
+		.name = "ipa_mpm",
+		.of_match_table = ipa_mpm_dt_match,
+	},
+	.probe = ipa_mpm_probe,
+	.remove = ipa_mpm_remove,
+};
+
+/**
+ * ipa_mpm_init() - Registers ipa_mpm as a platform device for a APQ
+ *
+ * This function is called after bootup for APQ device.
+ * ipa_mpm will register itself as a platform device, and probe
+ * function will get called.
+ *
+ * Return: None
+ */
+static int __init ipa_mpm_init(void)
+{
+	IPA_MPM_DBG("register ipa_mpm platform device\n");
+	return platform_driver_register(&ipa_ipa_mpm_driver);
+}
+
+/**
+ * ipa3_is_mhip_offload_enabled() - check if IPA MPM module was initialized
+ * successfully. If it is initialized, MHIP is enabled for teth
+ *
+ * Return value: 1 for yes; 0 for no
+ */
+int ipa3_is_mhip_offload_enabled(void)
+{
+	if (ipa_mpm_ctx == NULL)
+		return 0;
+	else
+		return 1;
+}
+
+late_initcall(ipa_mpm_init);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Proxy Manager Driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index f808f69..6d14b83 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -792,7 +792,7 @@
 {
 	struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp;
 	struct ipa_msg_desc req_desc, resp_desc;
-	int rc;
+	int rc, i;
 
 	IPAWANDBG("IPACM pass %u rules to Q6\n",
 		req->firewall_rules_list_len);
@@ -812,6 +812,37 @@
 	}
 	mutex_unlock(&ipa3_qmi_lock);
 
+	/* check if modem is up */
+	if (!ipa3_qmi_indication_fin ||
+		!ipa3_qmi_modem_init_fin ||
+		!ipa_q6_clnt) {
+		IPAWANDBG("modem QMI service is not up yet\n");
+		return -EINVAL;
+	}
+
+	/* Passing 0 rules means that firewall is disabled */
+	if (req->firewall_rules_list_len == 0)
+		IPAWANDBG("IPACM passed 0 rules to Q6\n");
+
+	if (req->firewall_rules_list_len >= QMI_IPA_MAX_UL_FIREWALL_RULES_V01) {
+		IPAWANERR(
+		"Number of rules passed by IPACM, %d, exceed limit %d\n",
+			req->firewall_rules_list_len,
+			QMI_IPA_MAX_UL_FIREWALL_RULES_V01);
+		return -EINVAL;
+	}
+
+	/* Check for valid IP type */
+	for (i = 0; i < req->firewall_rules_list_len; i++) {
+		if (req->firewall_rules_list[i].ip_type !=
+				QMI_IPA_IP_TYPE_V4_V01 &&
+			req->firewall_rules_list[i].ip_type !=
+				QMI_IPA_IP_TYPE_V6_V01)
+			IPAWANERR("Invalid IP type %d\n",
+					req->firewall_rules_list[i].ip_type);
+		return -EINVAL;
+	}
+
 	req_desc.max_msg_len =
 		QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01;
 	req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01;
@@ -825,7 +856,6 @@
 	resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01;
 	resp_desc.ei_array =
 		ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei;
-
 	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
 		&req_desc, req,
 		&resp_desc, &resp,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
index 6043fce..14ca0566 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -1,6 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
  */
 
 #include <linux/ipa_qmi_service_v01.h>
@@ -1394,6 +1404,329 @@
 	},
 };
 
+static struct qmi_elem_info ipa_filter_rule_req2_type_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   rule_eq_bitmap),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   pure_ack_eq_present),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   pure_ack_eq),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   protocol_eq_present),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   protocol_eq),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   num_ihl_offset_range_16),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01,
+		.elem_size      = sizeof(
+			struct ipa_ipfltr_range_eq_16_type_v01),
+		.array_type       = STATIC_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_range_16),
+		.ei_array      = ipa3_ipfltr_range_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   num_offset_meq_32),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01,
+		.elem_size      = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.array_type       = STATIC_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   offset_meq_32),
+		.ei_array      = ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   tc_eq_present),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   tc_eq),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   flow_eq_present),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   flow_eq),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_eq_16_present),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct ipa_ipfltr_eq_16_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_eq_16),
+		.ei_array      = ipa3_ipfltr_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_eq_32_present),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct ipa_ipfltr_eq_32_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_eq_32),
+		.ei_array      = ipa3_ipfltr_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   num_ihl_offset_meq_32),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01,
+		.elem_size      = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.array_type       = STATIC_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_meq_32),
+		.ei_array      = ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   num_offset_meq_128),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01,
+		.elem_size      = sizeof(
+			struct ipa_ipfltr_mask_eq_128_type_v01),
+		.array_type       = STATIC_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   offset_meq_128),
+		.ei_array      = ipa3_ipfltr_mask_eq_128_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   metadata_meq32_present),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   metadata_meq32),
+		.ei_array      = ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ipv4_frag_eq_present),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_filter_spec_ex2_type_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ip_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   ip_type),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct ipa_filter_rule_req2_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   filter_rule),
+		.ei_array      = ipa_filter_rule_req2_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_filter_action_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   filter_action),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   is_routing_table_index_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   route_table_index),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   is_mux_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   mux_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   rule_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   is_rule_hashable),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
 struct qmi_elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[] = {
 	{
 		.data_type	= QMI_OPT_FLAG,
@@ -1548,6 +1881,37 @@
 		.ei_array	= ipa_filter_spec_ex_type_data_v01_ei,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex2_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex2_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(struct ipa_filter_spec_ex2_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex2_list),
+		.ei_array      = ipa_filter_spec_ex2_type_v01_ei,
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.array_type	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -2993,6 +3357,37 @@
 			struct ipa_install_fltr_rule_req_ex_msg_v01,
 			xlat_filter_indices_list),
 	},
+		{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			filter_spec_ex2_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			filter_spec_ex2_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(struct ipa_filter_spec_ex2_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			filter_spec_ex2_list),
+		.ei_array      = ipa_filter_spec_ex2_type_v01_ei,
+	},
 	{
 		.data_type	= QMI_EOTI,
 		.array_type	= NO_ARRAY,
@@ -4042,3 +4437,474 @@
 		.tlv_type = QMI_COMMON_TLV_TYPE,
 	},
 };
+
+static struct qmi_elem_info ipa_ep_id_type_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ic_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_ep_id_type_v01,
+					   ic_type),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ep_desc_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_ep_id_type_v01,
+					   ep_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_ep_id_type_v01,
+					   ep_id),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ep_status_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_ep_id_type_v01,
+					   ep_status),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_endp_desc_indication_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			ep_info_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			ep_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_ENDP_DESC_NUM_MAX_V01,
+		.elem_size      = sizeof(struct ipa_ep_id_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			ep_info),
+		.ei_array      = ipa_ep_id_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			num_eps_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			num_eps),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_mhi_prime_aggr_info_type_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ic_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			ic_type),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ep_desc_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			ep_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			bytes_count),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			pkt_count),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_aggr_enum_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			aggr_type),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_prime_aggr_info_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			aggr_info_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			aggr_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_ENDP_DESC_NUM_MAX_V01,
+		.elem_size      = sizeof(
+			struct ipa_mhi_prime_aggr_info_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			aggr_info),
+		.ei_array      = ipa_mhi_prime_aggr_info_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			num_eps_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			num_eps),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_prime_aggr_info_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_resp_msg_v01,
+			resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_add_offload_connection_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			xlat_filter_indices_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			xlat_filter_indices_list_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(u32),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			xlat_filter_indices_list),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			filter_spec_ex2_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			filter_spec_ex2_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(struct ipa_filter_spec_ex2_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			filter_spec_ex2_list),
+		.ei_array      = ipa_filter_spec_ex2_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_add_offload_connection_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_resp_msg_v01,
+			resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_resp_msg_v01,
+			filter_handle_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_resp_msg_v01,
+			filter_handle_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_resp_msg_v01,
+			filter_handle_list),
+		.ei_array      =
+			ipa3_filter_rule_identifier_to_handle_map_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_remove_offload_connection_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			filter_handle_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			filter_handle_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			filter_handle_list),
+		.ei_array      =
+			ipa3_filter_rule_identifier_to_handle_map_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_remove_offload_connection_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_resp_msg_v01,
+			resp_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_resp_msg_v01,
+			resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index a5391a9..82cf654 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -497,6 +497,38 @@
 	return 0;
 }
 
+static int ipa_create_ap_smmu_mapping_pa(phys_addr_t pa, size_t len,
+		bool device, unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+			PAGE_SIZE);
+	int ret;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	if (len > PAGE_SIZE)
+		va = roundup(cb->next_addr, len);
+
+	ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+			true_len,
+			device ? (prot | IOMMU_MMIO) : prot);
+	if (ret) {
+		IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+		return -EINVAL;
+	}
+
+	ipa3_ctx->wdi_map_cnt++;
+	cb->next_addr = va + true_len;
+	*iova = va + pa - rounddown(pa, PAGE_SIZE);
+	return 0;
+}
+
 static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
 		bool device, unsigned long *iova)
 {
@@ -526,6 +558,67 @@
 	return 0;
 }
 
+static int ipa_create_ap_smmu_mapping_sgt(struct sg_table *sgt,
+		unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	int ret, i;
+	struct scatterlist *sg;
+	unsigned long start_iova = va;
+	phys_addr_t phys;
+	size_t len = 0;
+	int count = 0;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return -EINVAL;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan-driver */
+		len += PAGE_ALIGN(sg->offset + sg->length);
+	}
+
+	if (len > PAGE_SIZE) {
+		va = roundup(cb->next_addr,
+				roundup_pow_of_two(len));
+		start_iova = va;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan-driver */
+		phys = sg->dma_address;
+		len = PAGE_ALIGN(sg->offset + sg->length);
+
+		ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
+		if (ret) {
+			IPAERR("iommu map failed for pa=%pa len=%zu\n",
+					&phys, len);
+			goto bad_mapping;
+		}
+		va += len;
+		ipa3_ctx->wdi_map_cnt++;
+		count++;
+	}
+	cb->next_addr = va;
+	*iova = start_iova;
+
+	return 0;
+
+bad_mapping:
+	for_each_sg(sgt->sgl, sg, count, i)
+		iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
+				sg_dma_len(sg));
+	return -EINVAL;
+}
+
+
 static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
 		unsigned long *iova)
 {
@@ -576,6 +669,43 @@
 	return -EINVAL;
 }
 
+static void ipa_release_ap_smmu_mappings(enum ipa_client_type client)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	int i, j, start, end;
+
+	if (IPA_CLIENT_IS_CONS(client)) {
+		start = IPA_WDI_TX_RING_RES;
+		if (ipa3_ctx->ipa_wdi3_over_gsi)
+			end = IPA_WDI_TX_DB_RES;
+		else
+			end = IPA_WDI_CE_DB_RES;
+	} else {
+		start = IPA_WDI_RX_RING_RES;
+		if (ipa3_ctx->ipa_wdi2 ||
+			ipa3_ctx->ipa_wdi3_over_gsi)
+			end = IPA_WDI_RX_COMP_RING_WP_RES;
+		else
+			end = IPA_WDI_RX_RING_RP_RES;
+	}
+
+	for (i = start; i <= end; i++) {
+		if (wdi_res[i].valid) {
+			for (j = 0; j < wdi_res[i].nents; j++) {
+				iommu_unmap(cb->mapping->domain,
+					wdi_res[i].res[j].iova,
+					wdi_res[i].res[j].size);
+				ipa3_ctx->wdi_map_cnt--;
+			}
+			kfree(wdi_res[i].res);
+			wdi_res[i].valid = false;
+		}
+	}
+
+	if (ipa3_ctx->wdi_map_cnt == 0)
+		cb->next_addr = cb->va_end;
+}
+
 static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
 {
 	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
@@ -751,9 +881,11 @@
 
 	/* no SMMU on WLAN but SMMU on IPA */
 	if (!wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
-		if (ipa3_smmu_map_peer_buff(*iova, pa, len,
-						sgt, IPA_SMMU_CB_WLAN)) {
-			IPAERR("Fail to create mapping res %d\n", res_idx);
+		if (ipa_create_ap_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+					iova)) {
+			IPAERR("Fail to create mapping res %d\n",
+					res_idx);
 			return -EFAULT;
 		}
 		ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
@@ -765,10 +897,12 @@
 		case IPA_WDI_RX_RING_RP_RES:
 		case IPA_WDI_RX_COMP_RING_WP_RES:
 		case IPA_WDI_CE_DB_RES:
-			if (ipa3_smmu_map_peer_buff(*iova, pa, len, sgt,
-							IPA_SMMU_CB_WLAN)) {
+		case IPA_WDI_TX_DB_RES:
+			if (ipa_create_ap_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+						iova)) {
 				IPAERR("Fail to create mapping res %d\n",
-					res_idx);
+						res_idx);
 				return -EFAULT;
 			}
 			ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
@@ -777,10 +911,9 @@
 		case IPA_WDI_RX_COMP_RING_RES:
 		case IPA_WDI_TX_RING_RES:
 		case IPA_WDI_CE_RING_RES:
-			if (ipa3_smmu_map_peer_reg(pa, true,
-							IPA_SMMU_CB_WLAN)) {
+			if (ipa_create_ap_smmu_mapping_sgt(sgt, iova)) {
 				IPAERR("Fail to create mapping res %d\n",
-					res_idx);
+						res_idx);
 				return -EFAULT;
 			}
 			ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
@@ -1304,7 +1437,7 @@
 ipa_cfg_ep_fail:
 	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
 gsi_timeout:
-	ipa_release_uc_smmu_mappings(in->sys.client);
+	ipa_release_ap_smmu_mappings(in->sys.client);
 	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
 fail:
 	return result;
@@ -1864,7 +1997,7 @@
 				result);
 		goto fail_dealloc_channel;
 	}
-	ipa_release_uc_smmu_mappings(clnt_hdl);
+	ipa_release_ap_smmu_mappings(clnt_hdl);
 
 	/* for AP+STA stats update */
 	if (ipa3_ctx->uc_wdi_ctx.stats_notify)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index a3331e2..c53c26a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -167,8 +167,7 @@
 #define IPA_v4_2_DST_GROUP_MAX		(1)
 
 #define IPA_v4_5_MHI_GROUP_PCIE		(0)
-#define IPA_v4_5_GROUP_UL_DL_DST	(0)
-#define IPA_v4_5_GROUP_UL_DL_SRC	(1)
+#define IPA_v4_5_GROUP_UL_DL		(1)
 #define IPA_v4_5_MHI_GROUP_DDR		(1)
 #define IPA_v4_5_MHI_GROUP_DMA		(2)
 #define IPA_v4_5_MHI_GROUP_QDSS		(3)
@@ -245,6 +244,7 @@
 	IPA_4_0,
 	IPA_4_0_MHI,
 	IPA_4_1,
+	IPA_4_1_APQ,
 	IPA_4_2,
 	IPA_4_5,
 	IPA_4_5_MHI,
@@ -366,9 +366,9 @@
 		{5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
 	},
 	[IPA_4_5] = {
-		/* unused  UL_DL_SRC  unused  unused  UC_RX_Q N/A */
+		/* unused  UL_DL  unused  unused  UC_RX_Q N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
-		{0, 0}, {1, 63}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
+		{0, 0}, {1, 11}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
 		{0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
@@ -379,7 +379,7 @@
 		{0, 0}, {24, 24}, {0, 0}, {0, 0}, {8, 8}, {0, 0} },
 	},
 	[IPA_4_5_MHI] = {
-		/* PCIE  DDR  DMA  QDSS  unused  N/A  N/A */
+		/* PCIE  DDR  DMA  QDSS  unused  N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
 		{3, 8}, {4, 11}, {1, 1}, {1, 1}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
@@ -392,9 +392,9 @@
 		{22, 22}, {16, 16}, {6, 6}, {2, 2}, {0, 0}, {0, 0} },
 	},
 	[IPA_4_5_APQ] = {
-		/* unused  UL_DL_SRC  unused  unused  UC_RX_Q N/A */
+		/* unused  UL_DL  unused  unused  UC_RX_Q N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
-		{0, 0}, {1, 63}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
+		{0, 0}, {1, 11}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
 		{0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
@@ -467,11 +467,11 @@
 		{1, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
 	},
 	[IPA_4_5] = {
-		/* UL/DL/DPL_DST unused unused unused uC N/A */
+		/* unused  UL/DL/DPL unused  unused  uC  N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
-		{16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+		{0, 0}, {16, 16}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
-		{2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
+		{0, 0}, {2, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
 	},
 	[IPA_4_5_MHI] = {
 		/* PCIE/DPL  DDR  DMA/CV2X  QDSS  uC  N/A */
@@ -481,11 +481,11 @@
 		{2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
 	},
 	[IPA_4_5_APQ] = {
-		/* UL/DL/DPL_DST unused unused unused uC N/A */
+		/* unused  UL/DL/DPL unused  unused  uC  N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
-		{16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+		{0, 0}, {16, 16}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
-		{2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
+		{0, 0}, {2, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
 	},
 };
 
@@ -1900,6 +1900,39 @@
 			QMB_MASTER_SELECT_DDR,
 			{ 31, 31, 8, 8, IPA_EE_AP } },
 
+	/* MHI PRIME PIPES - Client producer / IPA Consumer pipes */
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_DPL_PROD] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_TETH_PROD] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_RMNET_PROD] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	/* MHI PRIME PIPES - Client Consumer / IPA Producer pipes */
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_TETH_CONS] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 13, 9, 9, IPA_EE_AP } },
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_RMNET_CONS] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 14, 9, 9, IPA_EE_AP } },
+
 	/* IPA_4_2 */
 	[IPA_4_2][IPA_CLIENT_WLAN1_PROD]          = {
 			true, IPA_v4_2_GROUP_UL_DL,
@@ -2078,177 +2111,177 @@
 
 	/* IPA_4_5 */
 	[IPA_4_5][IPA_CLIENT_WLAN2_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 9, 12, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 9, 12, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_USB_PROD]            = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_APPS_LAN_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 14, 10, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_APPS_WAN_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 7, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } },
 	[IPA_4_5][IPA_CLIENT_APPS_CMD_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_ODU_PROD]            = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 13, 8, 19, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_ETHERNET_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 12, 0, 8, 16, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_Q6_WAN_PROD]         = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 8 } },
+			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_Q6_CMD_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 6, 1, 20, 24, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_Q6_DL_NLO_DATA_PROD] = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } },
 	/* Only for test purpose */
 	[IPA_4_5][IPA_CLIENT_TEST_PROD]           = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST1_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST2_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 5, 8, 16, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST3_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 12, 8, 16, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST4_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 14, 8, 16, IPA_EE_AP } },
 
 	[IPA_4_5][IPA_CLIENT_WLAN2_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 24, 3, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_USB_CONS]            = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 26, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_USB_DPL_CONS]        = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 15, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_ODL_DPL_CONS]        = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_APPS_LAN_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_APPS_WAN_COAL_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 4, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_APPS_WAN_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 1, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_ODU_EMB_CONS]        = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 23, 8, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_ETHERNET_CONS]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 28, 1, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_Q6_LAN_CONS]         = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 17, 3, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_Q6_WAN_CONS]         = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 21, 7, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_Q6_UL_NLO_DATA_CONS] = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 19, 5, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_Q6_UL_NLO_ACK_CONS]  = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 20, 6, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_Q6_QBAP_STATUS_CONS] = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
@@ -2256,38 +2289,38 @@
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_4_5][IPA_CLIENT_TEST_CONS]           = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 1, 9, 9, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST1_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 1, 9, 9, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST2_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 24, 3, 8, 14, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST3_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 26, 17, 9, 9, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST4_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 27, 18, 9, 9, IPA_EE_AP } },
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_5][IPA_CLIENT_DUMMY_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
@@ -2305,7 +2338,7 @@
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 8 } },
+			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_5_MHI][IPA_CLIENT_Q6_CMD_PROD]		= {
 			true, IPA_v4_5_MHI_GROUP_PCIE,
 			false,
@@ -2344,7 +2377,7 @@
 			{ 10, 13, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	/* Only for test purpose */
 	[IPA_4_5_MHI][IPA_CLIENT_TEST_PROD]           = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, QMB_MASTER_SELECT_DDR,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
@@ -2425,125 +2458,125 @@
 
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_5_MHI][IPA_CLIENT_DUMMY_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, QMB_MASTER_SELECT_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 31, 31, 8, 8, IPA_EE_AP } },
 
 	/* IPA_4_5 APQ */
-	[IPA_4_5_APQ][IPA_CLIENT_WLAN1_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+	[IPA_4_5_APQ][IPA_CLIENT_WLAN2_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 9, 3, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 9, 3, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 1, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_USB_PROD]            = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_APPS_LAN_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 4, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_APPS_CMD_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 12, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	/* Only for test purpose */
 	[IPA_4_5_APQ][IPA_CLIENT_TEST_PROD]           = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST1_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST2_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 1, 8, 16, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST3_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 3, 8, 16, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST4_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 10, 8, 16, IPA_EE_AP } },
 
-	[IPA_4_5_APQ][IPA_CLIENT_WLAN1_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+	[IPA_4_5_APQ][IPA_CLIENT_WLAN2_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 23, 8, 8, 13, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+			{ 23, 8, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG1_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 14, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG2_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 20, 18, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG3_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 22, 5, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG4_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 29, 10, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_USB_CONS]            = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 24, 9, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_USB_DPL_CONS]        = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 16, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_APPS_LAN_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 13, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_ODL_DPL_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
@@ -2551,42 +2584,74 @@
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_4_5_APQ][IPA_CLIENT_TEST_CONS]           = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 16, 5, 5, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST1_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 16, 5, 5, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST2_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 22, 5, 9, 9, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST3_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 24, 9, 9, 9, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST4_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 23, 8, 8, 13, IPA_EE_AP } },
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_5_APQ][IPA_CLIENT_DUMMY_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 31, 31, 8, 8, IPA_EE_AP } },
+	/* MHI PRIME PIPES - Client producer / IPA Consumer pipes */
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_DPL_PROD] = {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{3, 2, 8, 16, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_TETH_PROD] = {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 7, 8, 16, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_RMNET_PROD] = {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 11, 16, 32, IPA_EE_AP } },
+	/* MHI PRIME PIPES - Client Consumer / IPA Producer pipes */
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_TETH_CONS] = {
+			true, IPA_v4_5_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 28, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_RMNET_CONS] = {
+			true, IPA_v4_5_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 17, 9, 9, IPA_EE_AP } },
 };
 
 static struct ipa3_mem_partition ipa_4_1_mem_part = {
@@ -6627,7 +6692,7 @@
 		if (src) {
 			switch (group_index) {
 			case IPA_v4_5_MHI_GROUP_PCIE:
-			case IPA_v4_5_GROUP_UL_DL_SRC:
+			case IPA_v4_5_GROUP_UL_DL:
 				ipahal_write_reg_n_fields(
 					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
 					n, val);
@@ -6651,8 +6716,8 @@
 			}
 		} else {
 			switch (group_index) {
-			case IPA_v4_5_GROUP_UL_DL_DST:
-			case IPA_v4_5_MHI_GROUP_DDR:
+			case IPA_v4_5_MHI_GROUP_PCIE:
+			case IPA_v4_5_GROUP_UL_DL:
 				ipahal_write_reg_n_fields(
 					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
 					n, val);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
index b9b8519..1e98d20 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
@@ -74,6 +74,7 @@
 	const struct ipa_gsi_ep_config *gsi_ep_info;
 	int result, len;
 	unsigned long va;
+	uint32_t addr_low, addr_high;
 
 	if (!info || !info_smmu || !ep) {
 		IPAERR("invalid input\n");
@@ -154,6 +155,10 @@
 	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
 	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
 	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.prefetch_mode =
+		gsi_ep_info->prefetch_mode;
+	gsi_channel_props.empty_lvl_threshold =
+		gsi_ep_info->prefetch_threshold;
 	gsi_channel_props.low_weight = 1;
 	gsi_channel_props.err_cb = ipa3_wdi3_gsi_chan_err_cb;
 
@@ -207,17 +212,105 @@
 		IPAERR("failed to write evt ring scratch\n");
 		goto fail_write_scratch;
 	}
-	/* write event ring db address */
+
+	if (!is_smmu_enabled) {
+		IPADBG("smmu disabled\n");
+		if (info->is_evt_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info->event_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info->event_ring_doorbell_pa >> 32));
+	} else {
+		IPADBG("smmu enabled\n");
+		if (info_smmu->is_evt_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info_smmu->event_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info_smmu->event_ring_doorbell_pa >> 32));
+	}
+
+	if (!is_smmu_enabled) {
+		addr_low = (u32)info->event_ring_doorbell_pa;
+		addr_high = (u32)((u64)info->event_ring_doorbell_pa >> 32);
+	} else {
+		if (dir == IPA_WDI3_TX_DIR) {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES,
+				true, info_smmu->event_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+		} else {
+			if (ipa_create_gsi_smmu_mapping(
+				IPA_WDI_RX_COMP_RING_WP_RES,
+				true, info_smmu->event_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+		}
+		addr_low = (u32)va;
+		addr_high = (u32)((u64)va >> 32);
+	}
+
+	/*
+	 * Arch specific:
+	 * pcie addr which are not via smmu, use pa directly!
+	 * pcie and DDR via 2 different port
+	 * assert bit 40 to indicate it is pcie addr
+	 * WDI-3.0, MSM --> pcie via smmu
+	 * WDI-3.0, MDM --> pcie not via smmu + dual port
+	 * assert bit 40 in case
+	 */
+	if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+		is_smmu_enabled) {
+		/*
+		 * Ir-respective of smmu enabled don't use IOVA addr
+		 * since pcie not via smmu in MDM's
+		 */
+		if (info_smmu->is_evt_rn_db_pcie_addr == true) {
+			addr_low = (u32)info_smmu->event_ring_doorbell_pa;
+			addr_high =
+				(u32)((u64)info_smmu->event_ring_doorbell_pa
+				>> 32);
+		}
+	}
+
+	/*
+	 * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
+	 * from wdi-3.0 interface document
+	 */
+	if (!is_smmu_enabled) {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info->is_evt_rn_db_pcie_addr)
+			addr_high |= (1 << 8);
+	} else {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info_smmu->is_evt_rn_db_pcie_addr)
+			addr_high |= (1 << 8);
+	}
+
 	gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
-		(u32)info->event_ring_doorbell_pa,
-		(u32)((u64)info->event_ring_doorbell_pa >> 32));
+			addr_low,
+			addr_high);
 
 	/* write channel scratch */
 	memset(&ch_scratch, 0, sizeof(ch_scratch));
 	ch_scratch.wdi3.update_rp_moderation_threshold =
 		UPDATE_RP_MODERATION_THRESHOLD;
 	if (dir == IPA_WDI3_RX_DIR) {
-		ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
+		if (!is_smmu_enabled)
+			ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
+		else
+			ch_scratch.wdi3.rx_pkt_offset = info_smmu->pkt_offset;
 		/* this metadata reg offset need to be in words */
 		ch_scratch.wdi3.endp_metadata_reg_offset =
 			ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
@@ -225,6 +318,28 @@
 	}
 
 	if (!is_smmu_enabled) {
+		IPADBG_LOW("smmu disabled\n");
+		if (info->is_txr_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info->transfer_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info->transfer_ring_doorbell_pa >> 32));
+	} else {
+		IPADBG_LOW("smmu eabled\n");
+		if (info_smmu->is_txr_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info_smmu->transfer_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info_smmu->transfer_ring_doorbell_pa >> 32));
+	}
+
+	if (!is_smmu_enabled) {
 		ch_scratch.wdi3.wifi_rp_address_low =
 			(u32)info->transfer_ring_doorbell_pa;
 		ch_scratch.wdi3.wifi_rp_address_high =
@@ -258,6 +373,49 @@
 				(u32)((u64)va >> 32);
 		}
 	}
+
+	/*
+	 * Arch specific:
+	 * pcie addr which are not via smmu, use pa directly!
+	 * pcie and DDR via 2 different port
+	 * assert bit 40 to indicate it is pcie addr
+	 * WDI-3.0, MSM --> pcie via smmu
+	 * WDI-3.0, MDM --> pcie not via smmu + dual port
+	 * assert bit 40 in case
+	 */
+	if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+		is_smmu_enabled) {
+		/*
+		 * Ir-respective of smmu enabled don't use IOVA addr
+		 * since pcie not via smmu in MDM's
+		 */
+		if (info_smmu->is_txr_rn_db_pcie_addr == true) {
+			ch_scratch.wdi3.wifi_rp_address_low =
+				(u32)info_smmu->transfer_ring_doorbell_pa;
+			ch_scratch.wdi3.wifi_rp_address_high =
+				(u32)((u64)info_smmu->transfer_ring_doorbell_pa
+				>> 32);
+		}
+	}
+
+	/*
+	 * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
+	 * from wdi-3.0 interface document
+	 */
+	if (!is_smmu_enabled) {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info->is_txr_rn_db_pcie_addr)
+			ch_scratch.wdi3.wifi_rp_address_high =
+			(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
+			(1 << 8));
+	} else {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info_smmu->is_txr_rn_db_pcie_addr)
+			ch_scratch.wdi3.wifi_rp_address_high =
+			(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
+			(1 << 8));
+	}
+
 	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("failed to write evt ring scratch\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
index b8bc0b6..2f02db7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
@@ -31,7 +31,7 @@
 static int ipa3_wigig_uc_loaded_handler(struct notifier_block *self,
 	unsigned long val, void *data)
 {
-	IPADBG("val %d\n", val);
+	IPADBG("val %ld\n", val);
 
 	if (!ipa3_ctx) {
 		IPAERR("IPA ctx is null\n");
@@ -469,6 +469,7 @@
 	channel_props.use_db_eng = GSI_CHAN_DB_MODE;
 	channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
 	channel_props.prefetch_mode = ep_gsi->prefetch_mode;
+	channel_props.empty_lvl_threshold = ep_gsi->prefetch_threshold;
 	channel_props.low_weight = 1;
 	channel_props.err_cb = ipa_gsi_chan_err_cb;
 
@@ -828,7 +829,7 @@
 		if (
 		IPA_WIGIG_MSB(input->dbuff.data_buffer_base_pa) & 0xFFFFFF00) {
 			IPAERR(
-				"data_buffers_base_address_msb is over the 8 bit limit (0xpa)\n"
+				"data_buffers_base_address_msb is over the 8 bit limit (0x%pa)\n"
 				, &input->dbuff.data_buffer_base_pa);
 			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 			return -EFAULT;
@@ -970,7 +971,7 @@
 			!= IPA_WIGIG_8_MSB(
 				input_smmu->pipe_smmu.status_ring_HWTAIL_pa)) {
 			IPAERR(
-				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%X tail 0x%X\n"
+				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
 			, input_smmu->pipe_smmu.status_ring_HWHEAD_pa,
 			input_smmu->pipe_smmu.status_ring_HWTAIL_pa);
 			return -EFAULT;
@@ -1009,7 +1010,7 @@
 			!= IPA_WIGIG_8_MSB(
 				input->pipe.status_ring_HWTAIL_pa)) {
 			IPAERR(
-				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%X tail 0x%X\n"
+				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
 				, input->pipe.status_ring_HWHEAD_pa,
 				input->pipe.status_ring_HWTAIL_pa);
 			return -EFAULT;
@@ -1471,7 +1472,7 @@
 			ep->gsi_mem_info.chan_ring_len -
 			IPA_WIGIG_DESC_RING_EL_SIZE;
 
-		IPADBG("ring ch doorbell (0x%llX) TX %d\n", val,
+		IPADBG("ring ch doorbell (0x%llX) TX %ld\n", val,
 			ep->gsi_chan_hdl);
 		res = gsi_ring_ch_ring_db(ep->gsi_chan_hdl, val);
 		if (res) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 525aebf..3f38a3a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -883,13 +883,15 @@
 	status->flt_hash = hw_status->flt_hash;
 	status->flt_global = hw_status->flt_hash;
 	status->flt_ret_hdr = hw_status->flt_ret_hdr;
-	status->flt_miss = ~(hw_status->flt_rule_id) ? false : true;
+	status->flt_miss = (hw_status->rt_rule_id ==
+		IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
 	status->flt_rule_id = hw_status->flt_rule_id;
 	status->rt_local = hw_status->rt_local;
 	status->rt_hash = hw_status->rt_hash;
 	status->ucp = hw_status->ucp;
 	status->rt_tbl_idx = hw_status->rt_tbl_idx;
-	status->rt_miss = ~(hw_status->rt_rule_id) ? false : true;
+	status->rt_miss = (hw_status->rt_rule_id ==
+		IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
 	status->rt_rule_id = hw_status->rt_rule_id;
 	status->nat_hit = hw_status->nat_hit;
 	status->nat_entry_idx = hw_status->nat_entry_idx;
@@ -1180,6 +1182,13 @@
 	memcpy(base + offset, hdr, hdr_len);
 }
 
+/* Header address update logic. */
+#define IPAHAL_CP_PROC_CTX_HEADER_UPDATE(hdr_lsb, hdr_msb, addr) \
+	do { \
+		hdr_lsb = lower_32_bits(addr); \
+		hdr_msb = upper_32_bits(addr); \
+	} while (0)
+
 /*
  * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
  * base address and offset given.
@@ -1193,26 +1202,31 @@
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
  */
 static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset,
 		u32 hdr_len, bool is_hdr_proc_ctx,
-		dma_addr_t phys_base, u32 hdr_base_addr,
+		dma_addr_t phys_base, u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
-		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
+		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64)
 {
+	u64 hdr_addr;
+
 	if (type == IPA_HDR_PROC_NONE) {
 		struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
 
 		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
 			(base + offset);
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
-		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
 		ctx->hdr_add.tlv.value = hdr_len;
-		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
 			hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
-			ctx->hdr_add.hdr_addr);
+			hdr_addr);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
 		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
 		ctx->end.length = 0;
 		ctx->end.value = 0;
@@ -1222,12 +1236,14 @@
 		ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *)
 			(base + offset);
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
-		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
 		ctx->hdr_add.tlv.value = hdr_len;
-		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
 			hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
-			ctx->hdr_add.hdr_addr);
+			hdr_addr);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
 		ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
 		ctx->l2tp_params.tlv.length = 1;
 		ctx->l2tp_params.tlv.value =
@@ -1249,12 +1265,14 @@
 		ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *)
 			(base + offset);
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
-		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
 		ctx->hdr_add.tlv.value = hdr_len;
-		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
 			hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx length %d\n",
-			ctx->hdr_add.hdr_addr, ctx->hdr_add.tlv.value);
+			hdr_addr, ctx->hdr_add.tlv.value);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
 		ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
 		ctx->l2tp_params.tlv.length = 1;
 		ctx->l2tp_params.tlv.value =
@@ -1285,12 +1303,14 @@
 		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
 			(base + offset);
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
-		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
 		ctx->hdr_add.tlv.value = hdr_len;
-		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
 			hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
-			ctx->hdr_add.hdr_addr);
+			hdr_addr);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
 		ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
 		ctx->cmd.length = 0;
 		switch (type) {
@@ -1346,9 +1366,10 @@
 	int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
 			void *const base, u32 offset, u32 hdr_len,
 			bool is_hdr_proc_ctx, dma_addr_t phys_base,
-			u32 hdr_base_addr,
+			u64 hdr_base_addr,
 			struct ipa_hdr_offset_entry *offset_entry,
-			struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
+			struct ipa_l2tp_hdr_proc_ctx_params l2tp_params,
+			bool is_64);
 
 	int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
 };
@@ -1414,17 +1435,18 @@
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset, u32 hdr_len,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
-		u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
-		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
+		u64 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
+		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64)
 {
 	IPAHAL_DBG(
-		"type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %pK\n"
+		"type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK, bool %d\n"
 			, type, base, offset, hdr_len, is_hdr_proc_ctx,
-			hdr_base_addr, offset_entry);
+			hdr_base_addr, offset_entry, is_64);
 
 	if (!base ||
 		!hdr_len ||
@@ -1432,7 +1454,7 @@
 		(!is_hdr_proc_ctx && !offset_entry) ||
 		(!is_hdr_proc_ctx && !hdr_base_addr)) {
 		IPAHAL_ERR(
-			"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
+			"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%llu is_hdr_proc_ctx:%d offset_entry:%pK\n"
 			, hdr_len, &phys_base, hdr_base_addr
 			, is_hdr_proc_ctx, offset_entry);
 		return -EINVAL;
@@ -1440,7 +1462,7 @@
 
 	return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
 			hdr_len, is_hdr_proc_ctx, phys_base,
-			hdr_base_addr, offset_entry, l2tp_params);
+			hdr_base_addr, offset_entry, l2tp_params, is_64);
 }
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 8a710da..942fa52 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPAHAL_H_
@@ -630,13 +630,14 @@
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *base, u32 offset, u32 hdr_len,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
-		u32 hdr_base_addr,
+		u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
-		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
+		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64);
 
 /*
  * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 0b2b24f..717b54d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ipa.h>
@@ -1853,7 +1853,8 @@
 	if (attrib->fl_eq_present)
 		rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest);
 
-	extra = ipa_pad_to_64(extra);
+	if (extra)
+		extra = ipa_pad_to_64(extra);
 	rest = ipa_pad_to_64(rest);
 	*buf = rest;
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 0688eef..6811244 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPAHAL_I_H_
@@ -64,6 +64,8 @@
 
 #define IPAHAL_IPC_LOG_PAGES 50
 
+#define IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID 0x3ff
+
 /*
  * struct ipahal_context - HAL global context data
  * @hw_type: IPA H/W type/version.
@@ -646,7 +648,8 @@
  */
 struct ipa_hw_hdr_proc_ctx_hdr_add {
 	struct ipa_hw_hdr_proc_ctx_tlv tlv;
-	u64 hdr_addr;
+	u32 hdr_addr;
+	u32 hdr_addr_hi;
 };
 
 /**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c
index 5d33e0d..0ad0fd5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/debugfs.h>
@@ -10,7 +10,6 @@
 
 #define IPA_64_LOW_32_MASK (0xFFFFFFFF)
 #define IPA_64_HIGH_32_MASK (0xFFFFFFFF00000000ULL)
-#define IPAHAL_NAT_INVALID_PROTOCOL (0xFF)
 
 static const char *ipahal_nat_type_to_str[IPA_NAT_MAX] = {
 	__stringify(IPAHAL_NAT_IPV4),
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 4c692a6..b9043d5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -157,6 +157,7 @@
 	u32 outstanding_high_ctl;
 	u32 outstanding_low;
 	struct rmnet_ipa_debugfs dbgfs;
+	bool dl_csum_offload_enabled;
 };
 
 static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -190,21 +191,22 @@
 
 	strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
 				IPA_RESOURCE_NAME_MAX);
-	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		rmnet_ipa3_ctx->dl_csum_offload_enabled) {
 		hdr_entry->hdr_len = IPA_DL_CHECKSUM_LENGTH; /* 8 bytes */
 		/* new DL QMAP header format */
-		hdr->hdr[0].hdr[0] = 0x40;
-		hdr->hdr[0].hdr[1] = 0;
-		hdr->hdr[0].hdr[2] = 0;
-		hdr->hdr[0].hdr[3] = 0;
-		hdr->hdr[0].hdr[4] = 0x4;
+		hdr_entry->hdr[0] = 0x40;
+		hdr_entry->hdr[1] = 0;
+		hdr_entry->hdr[2] = 0;
+		hdr_entry->hdr[3] = 0;
+		hdr_entry->hdr[4] = 0x4;
 		/*
 		 * Need to set csum required/valid bit on which will be replaced
 		 * by HW if checksum is incorrect after validation
 		 */
-		hdr->hdr[0].hdr[5] = 0x80;
-		hdr->hdr[0].hdr[6] = 0;
-		hdr->hdr[0].hdr[7] = 0;
+		hdr_entry->hdr[5] = 0x80;
+		hdr_entry->hdr[6] = 0;
+		hdr_entry->hdr[7] = 0;
 	} else
 		hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
 
@@ -326,8 +328,27 @@
 	 strlcpy(hdr_entry->name, hdr_name,
 				IPA_RESOURCE_NAME_MAX);
 
-	hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
-	hdr_entry->hdr[1] = (uint8_t) mux_id;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		rmnet_ipa3_ctx->dl_csum_offload_enabled) {
+		hdr_entry->hdr_len = IPA_DL_CHECKSUM_LENGTH; /* 8 bytes */
+		/* new DL QMAP header format */
+		hdr_entry->hdr[0] = 0x40;
+		hdr_entry->hdr[1] = (uint8_t) mux_id;
+		hdr_entry->hdr[2] = 0;
+		hdr_entry->hdr[3] = 0;
+		hdr_entry->hdr[4] = 0x4;
+		/*
+		 * Need to set csum required/valid bit on which will be replaced
+		 * by HW if checksum is incorrect after validation
+		 */
+		hdr_entry->hdr[5] = 0x80;
+		hdr_entry->hdr[6] = 0;
+		hdr_entry->hdr[7] = 0;
+	} else {
+		hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+		hdr_entry->hdr[1] = (uint8_t) mux_id;
+	}
+
 	IPAWANDBG("header (%s) with mux-id: (%d)\n",
 		hdr_name,
 		hdr_entry->hdr[1]);
@@ -446,12 +467,19 @@
 
 static void ipa3_copy_qmi_flt_rule_ex(
 	struct ipa_ioc_ext_intf_prop *q6_ul_flt_rule_ptr,
-	struct ipa_filter_spec_ex_type_v01 *flt_spec_ptr)
+	void *flt_spec_ptr_void)
 {
 	int j;
+	struct ipa_filter_spec_ex_type_v01 *flt_spec_ptr;
 	struct ipa_ipfltr_range_eq_16 *q6_ul_filter_nat_ptr;
 	struct ipa_ipfltr_range_eq_16_type_v01 *filter_spec_nat_ptr;
 
+	/*
+	 * pure_ack and tos has the same size and type and we will treat tos
+	 * field as pure_ack in ipa4.5 version
+	 */
+	flt_spec_ptr = (struct ipa_filter_spec_ex_type_v01 *) flt_spec_ptr_void;
+
 	q6_ul_flt_rule_ptr->ip = flt_spec_ptr->ip_type;
 	q6_ul_flt_rule_ptr->action = flt_spec_ptr->filter_action;
 	if (flt_spec_ptr->is_routing_table_index_valid == true)
@@ -563,7 +591,6 @@
 		flt_spec_ptr->filter_rule.ipv4_frag_eq_present;
 }
 
-
 int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
 		*rule_req)
 {
@@ -571,14 +598,25 @@
 
 	/* prevent multi-threads accessing rmnet_ipa3_ctx->num_q6_rules */
 	mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
-	if (rule_req->filter_spec_ex_list_valid == true) {
+	if (rule_req->filter_spec_ex_list_valid == true &&
+		rule_req->filter_spec_ex2_list_valid == false) {
 		rmnet_ipa3_ctx->num_q6_rules =
 			rule_req->filter_spec_ex_list_len;
-		IPAWANDBG("Received (%d) install_flt_req\n",
+		IPAWANDBG("Received (%d) install_flt_req_ex_list\n",
+			rmnet_ipa3_ctx->num_q6_rules);
+	} else if (rule_req->filter_spec_ex2_list_valid == true &&
+		rule_req->filter_spec_ex_list_valid == false) {
+		rmnet_ipa3_ctx->num_q6_rules =
+			rule_req->filter_spec_ex2_list_len;
+		IPAWANDBG("Received (%d) install_flt_req_ex2_list\n",
 			rmnet_ipa3_ctx->num_q6_rules);
 	} else {
 		rmnet_ipa3_ctx->num_q6_rules = 0;
-		IPAWANERR("got no UL rules from modem\n");
+		if (rule_req->filter_spec_ex2_list_valid == true)
+			IPAWANERR(
+			"both ex and ex2 flt rules are set to valid\n");
+		else
+			IPAWANERR("got no UL rules from modem\n");
 		mutex_unlock(
 			&rmnet_ipa3_ctx->add_mux_channel_lock);
 		return -EINVAL;
@@ -594,8 +632,14 @@
 				rmnet_ipa3_ctx->num_q6_rules);
 			goto failure;
 		}
-		ipa3_copy_qmi_flt_rule_ex(&ipa3_qmi_ctx->q6_ul_filter_rule[i],
-			&rule_req->filter_spec_ex_list[i]);
+		if (rule_req->filter_spec_ex_list_valid == true)
+			ipa3_copy_qmi_flt_rule_ex(
+				&ipa3_qmi_ctx->q6_ul_filter_rule[i],
+				&rule_req->filter_spec_ex_list[i]);
+		else if (rule_req->filter_spec_ex2_list_valid == true)
+			ipa3_copy_qmi_flt_rule_ex(
+				&ipa3_qmi_ctx->q6_ul_filter_rule[i],
+				&rule_req->filter_spec_ex2_list[i]);
 	}
 
 	if (rule_req->xlat_filter_indices_list_valid) {
@@ -1348,10 +1392,14 @@
 	}
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
-		(in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+		(in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM) {
 		ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8;
-	else
+		rmnet_ipa3_ctx->dl_csum_offload_enabled = true;
+	} else {
 		ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4;
+		rmnet_ipa3_ctx->dl_csum_offload_enabled = false;
+	}
+
 	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
 	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
 	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
@@ -1389,7 +1437,19 @@
 	   &rmnet_ipa3_ctx->ipa3_to_apps_hdl);
 
 	mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
+	if (ret)
+		goto end;
 
+	/* construct default WAN RT tbl for IPACM */
+	ret = ipa3_setup_a7_qmap_hdr();
+	if (ret)
+		goto end;
+
+	ret = ipa3_setup_dflt_wan_rt_tables();
+	if (ret)
+		ipa3_del_a7_qmap_hdr();
+
+end:
 	if (ret)
 		IPAWANERR("failed to configure ingress\n");
 
@@ -2478,16 +2538,6 @@
 		/* LE platform not loads uC */
 		ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
 
-	/* construct default WAN RT tbl for IPACM */
-	if (wan_cons_ep != IPA_EP_NOT_ALLOCATED) {
-		ret = ipa3_setup_a7_qmap_hdr();
-		if (ret)
-			goto setup_a7_qmap_hdr_err;
-		ret = ipa3_setup_dflt_wan_rt_tables();
-		if (ret)
-			goto setup_dflt_wan_rt_tables_err;
-	}
-
 	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
 		/* Start transport-driver fd ioctl for ipacm for first init */
 		ret = ipa3_wan_ioctl_init();
@@ -2602,12 +2652,6 @@
 alloc_netdev_err:
 	ipa3_wan_ioctl_deinit();
 wan_ioctl_init_err:
-	if (wan_cons_ep != IPA_EP_NOT_ALLOCATED)
-		ipa3_del_dflt_wan_rt_tables();
-setup_dflt_wan_rt_tables_err:
-	if (wan_cons_ep != IPA_EP_NOT_ALLOCATED)
-		ipa3_del_a7_qmap_hdr();
-setup_a7_qmap_hdr_err:
 	ipa3_qmi_service_exit();
 	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
 	return ret;
@@ -2654,6 +2698,8 @@
 	if (!ipa3_qmi_ctx->modem_cfg_emb_pipe_flt)
 		ipa3_wwan_del_ul_flt_rule_to_ipa();
 	ipa3_cleanup_deregister_intf();
+	/* reset dl_csum_offload_enabled */
+	rmnet_ipa3_ctx->dl_csum_offload_enabled = false;
 	atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
 	IPAWANINFO("rmnet_ipa completed deinitialization\n");
 	return 0;
@@ -2797,6 +2843,11 @@
 	if (!ipa3_rmnet_ctx.ipa_rmnet_ssr)
 		return NOTIFY_DONE;
 
+	if (!ipa3_ctx) {
+		IPAWANERR_RL("ipa3_ctx was not initialized\n");
+		return NOTIFY_DONE;
+	}
+
 	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
 		IPAWANERR("Local modem SSR event=%lu on APQ platform\n",
 			code);
@@ -4044,6 +4095,15 @@
 		IPAWANERR("Can't allocate memory for tether_info\n");
 		return -ENOMEM;
 	}
+
+	if (data->client_event != IPA_PER_CLIENT_STATS_CONNECT_EVENT &&
+		data->client_event != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) {
+		IPAWANERR("Wrong event given. Event:- %d\n",
+			data->client_event);
+		kfree(lan_client);
+		return -EINVAL;
+	}
+	data->lan_client.lanIface[IPA_RESOURCE_NAME_MAX-1] = '\0';
 	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
 	memcpy(lan_client, &data->lan_client,
 		sizeof(struct ipa_lan_client_msg));
@@ -4375,6 +4435,10 @@
 	void *ssr_hdl;
 	int rc = 0;
 
+	if (!ipa3_ctx) {
+		IPAWANERR_RL("ipa3_ctx was not initialized\n");
+		return -EINVAL;
+	}
 	rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
 
 	if (!rmnet_ipa3_ctx)
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 8077cd3..f68280a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/init.h>
@@ -12,6 +12,7 @@
 #include <linux/uaccess.h>
 #include <linux/rmnet_ipa_fd_ioctl.h>
 #include "ipa_qmi_service.h"
+#include "ipa_i.h"
 
 #define DRIVER_NAME "wwan_ioctl"
 
@@ -335,6 +336,10 @@
 			break;
 		}
 
+		if (ipa_mpm_notify_wan_state()) {
+			IPAWANERR("WAN_IOC_NOTIFY_WAN_STATE failed\n");
+			retval = -EPERM;
+		}
 		break;
 	case WAN_IOC_ENABLE_PER_CLIENT_STATS:
 		IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n");
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 1296d9c..54c7fc7 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -26,9 +26,6 @@
 #include "wil_platform.h"
 #include "msm_11ad.h"
 
-#define SMMU_BASE	0x20000000 /* Device address range base */
-#define SMMU_SIZE	((SZ_1G * 4ULL) - SMMU_BASE)
-
 #define WIGIG_ENABLE_DELAY	50
 
 #define WIGIG_SUBSYS_NAME	"WIGIG"
@@ -39,9 +36,12 @@
 #define VDD_MIN_UV	1028000
 #define VDD_MAX_UV	1028000
 #define VDD_MAX_UA	575000
-#define VDDIO_MIN_UV	1950000
+#define VDDIO_MIN_UV	1824000
 #define VDDIO_MAX_UV	2040000
 #define VDDIO_MAX_UA	70300
+#define VDD_LDO_MIN_UV	1800000
+#define VDD_LDO_MAX_UV	1800000
+#define VDD_LDO_MAX_UA	100000
 
 #define WIGIG_MIN_CPU_BOOST_KBPS	150000
 
@@ -89,15 +89,8 @@
 	u32 rc_index; /* PCIE root complex index */
 	struct pci_dev *pcidev;
 	struct pci_saved_state *pristine_state;
-
-	/* SMMU */
-	bool use_smmu; /* have SMMU enabled? */
-	int smmu_s1_en;
-	int smmu_fast_map;
-	int smmu_coherent;
-	struct dma_iommu_mapping *mapping;
-	u32 smmu_base;
-	u32 smmu_size;
+	struct pci_saved_state *golden_state;
+	struct msm_pcie_register_event pci_event;
 
 	/* bus frequency scaling */
 	struct msm_bus_scale_pdata *bus_scale;
@@ -120,8 +113,9 @@
 	/* external vregs and clocks */
 	struct msm11ad_vreg vdd;
 	struct msm11ad_vreg vddio;
-	struct msm11ad_clk rf_clk3;
-	struct msm11ad_clk rf_clk3_pin;
+	struct msm11ad_vreg vdd_ldo;
+	struct msm11ad_clk rf_clk;
+	struct msm11ad_clk rf_clk_pin;
 
 	/* cpu boost support */
 	bool use_cpu_boost;
@@ -254,8 +248,18 @@
 	ctx->vddio.min_uV = VDDIO_MIN_UV;
 	ctx->vddio.max_uA = VDDIO_MAX_UA;
 
+	rc = msm_11ad_init_vreg(dev, &ctx->vdd_ldo, "vdd-ldo");
+	if (rc)
+		goto vdd_ldo_fail;
+
+	ctx->vdd_ldo.max_uV = VDD_LDO_MAX_UV;
+	ctx->vdd_ldo.min_uV = VDD_LDO_MIN_UV;
+	ctx->vdd_ldo.max_uA = VDD_LDO_MAX_UA;
+
 	return rc;
 
+vdd_ldo_fail:
+	msm_11ad_release_vreg(dev, &ctx->vddio);
 vddio_fail:
 	msm_11ad_release_vreg(dev, &ctx->vdd);
 out:
@@ -264,6 +268,7 @@
 
 static void msm_11ad_release_vregs(struct msm11ad_ctx *ctx)
 {
+	msm_11ad_release_vreg(ctx->dev, &ctx->vdd_ldo);
 	msm_11ad_release_vreg(ctx->dev, &ctx->vdd);
 	msm_11ad_release_vreg(ctx->dev, &ctx->vddio);
 }
@@ -379,8 +384,14 @@
 	if (rc)
 		goto vddio_fail;
 
+	rc = msm_11ad_enable_vreg(ctx, &ctx->vdd_ldo);
+	if (rc)
+		goto vdd_ldo_fail;
+
 	return rc;
 
+vdd_ldo_fail:
+	msm_11ad_disable_vreg(ctx, &ctx->vddio);
 vddio_fail:
 	msm_11ad_disable_vreg(ctx, &ctx->vdd);
 out:
@@ -389,10 +400,11 @@
 
 static int msm_11ad_disable_vregs(struct msm11ad_ctx *ctx)
 {
-	if (!ctx->vdd.reg && !ctx->vddio.reg)
+	if (!ctx->vdd.reg && !ctx->vddio.reg && !ctx->vdd_ldo.reg)
 		goto out;
 
 	/* ignore errors on disable vreg */
+	msm_11ad_disable_vreg(ctx, &ctx->vdd_ldo);
 	msm_11ad_disable_vreg(ctx, &ctx->vdd);
 	msm_11ad_disable_vreg(ctx, &ctx->vddio);
 
@@ -444,13 +456,13 @@
 {
 	int rc;
 
-	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk);
 	if (rc)
 		return rc;
 
-	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3_pin);
+	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk_pin);
 	if (rc)
-		msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+		msm_11ad_disable_clk(ctx, &ctx->rf_clk);
 
 	return rc;
 }
@@ -459,22 +471,22 @@
 {
 	int rc;
 	struct device *dev = ctx->dev;
-	int rf_clk3_pin_idx;
+	int rf_clk_pin_idx;
 
 	if (!of_property_read_bool(dev->of_node, "qcom,use-ext-clocks"))
 		return 0;
 
-	rc = msm_11ad_init_clk(dev, &ctx->rf_clk3, "rf_clk3_clk");
+	rc = msm_11ad_init_clk(dev, &ctx->rf_clk, "rf_clk_clk");
 	if (rc)
 		return rc;
 
-	rf_clk3_pin_idx = of_property_match_string(dev->of_node, "clock-names",
-						   "rf_clk3_pin_clk");
-	if (rf_clk3_pin_idx >= 0) {
-		rc = msm_11ad_init_clk(dev, &ctx->rf_clk3_pin,
-				       "rf_clk3_pin_clk");
+	rf_clk_pin_idx = of_property_match_string(dev->of_node, "clock-names",
+						   "rf_clk_pin_clk");
+	if (rf_clk_pin_idx >= 0) {
+		rc = msm_11ad_init_clk(dev, &ctx->rf_clk_pin,
+				       "rf_clk_pin_clk");
 		if (rc)
-			msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3);
+			msm_11ad_release_clk(ctx->dev, &ctx->rf_clk);
 	}
 
 	return rc;
@@ -482,14 +494,14 @@
 
 static void msm_11ad_release_clocks(struct msm11ad_ctx *ctx)
 {
-	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3_pin);
-	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3);
+	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk_pin);
+	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk);
 }
 
 static void msm_11ad_disable_clocks(struct msm11ad_ctx *ctx)
 {
-	msm_11ad_disable_clk(ctx, &ctx->rf_clk3_pin);
-	msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+	msm_11ad_disable_clk(ctx, &ctx->rf_clk_pin);
+	msm_11ad_disable_clk(ctx, &ctx->rf_clk);
 }
 
 static int msm_11ad_turn_device_power_off(struct msm11ad_ctx *ctx)
@@ -562,11 +574,13 @@
 
 	pcidev = ctx->pcidev;
 
+	/* free the old saved state and save the latest state */
 	rc = pci_save_state(pcidev);
 	if (rc) {
 		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
 		goto out;
 	}
+	kfree(ctx->pristine_state);
 	ctx->pristine_state = pci_store_saved_state(pcidev);
 
 	rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
@@ -583,6 +597,72 @@
 	return rc;
 }
 
+static int ops_pci_linkdown_recovery(void *handle)
+{
+	struct msm11ad_ctx *ctx = handle;
+	struct pci_dev *pcidev;
+	int rc;
+
+	if (!ctx) {
+		pr_err("11ad pci_linkdown_recovery: No context\n");
+		return -ENODEV;
+	}
+
+	pcidev = ctx->pcidev;
+
+	/* suspend */
+	dev_dbg(ctx->dev, "11ad pci_linkdown_recovery, suspend the device\n");
+	pci_disable_device(pcidev);
+	rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+				 pcidev, NULL, 0);
+	if (rc) {
+		dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed: %d\n",
+			rc);
+		goto out;
+	}
+
+	rc = msm_11ad_turn_device_power_off(ctx);
+	if (rc) {
+		dev_err(ctx->dev, "failed to turn off device: %d\n",
+			rc);
+		goto out;
+	}
+
+	/* resume */
+	rc = msm_11ad_turn_device_power_on(ctx);
+	if (rc)
+		goto out;
+
+	rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
+				 pcidev, NULL, 0);
+	if (rc) {
+		dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed: %d\n",
+			rc);
+		goto err_disable_power;
+	}
+
+	pci_set_power_state(pcidev, PCI_D0);
+
+	if (ctx->golden_state)
+		pci_load_saved_state(pcidev, ctx->golden_state);
+	pci_restore_state(pcidev);
+
+	rc = pci_enable_device(pcidev);
+	if (rc) {
+		dev_err(ctx->dev, "pci_enable_device failed (%d)\n", rc);
+		goto err_disable_power;
+	}
+
+	pci_set_master(pcidev);
+
+out:
+	return rc;
+
+err_disable_power:
+	msm_11ad_turn_device_power_off(ctx);
+	return rc;
+}
+
 static int ops_suspend(void *handle, bool keep_device_power)
 {
 	struct msm11ad_ctx *ctx = handle;
@@ -603,6 +683,7 @@
 	dev_dbg(ctx->dev, "disable device and save config\n");
 	pci_disable_device(pcidev);
 	pci_save_state(pcidev);
+	kfree(ctx->pristine_state);
 	ctx->pristine_state = pci_store_saved_state(pcidev);
 	dev_dbg(ctx->dev, "moving to D3\n");
 	pci_set_power_state(pcidev, PCI_D3hot);
@@ -698,86 +779,6 @@
 	return rc;
 }
 
-static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
-{
-	int atomic_ctx = 1;
-	int rc;
-	int force_pt_coherent = 1;
-	int smmu_bypass = !ctx->smmu_s1_en;
-
-	if (!ctx->use_smmu)
-		return 0;
-
-	dev_info(ctx->dev, "Initialize SMMU, bypass=%d, fastmap=%d, coherent=%d\n",
-		 smmu_bypass, ctx->smmu_fast_map, ctx->smmu_coherent);
-
-	ctx->mapping = __depr_arm_iommu_create_mapping(&platform_bus_type,
-						ctx->smmu_base, ctx->smmu_size);
-	if (IS_ERR_OR_NULL(ctx->mapping)) {
-		rc = PTR_ERR(ctx->mapping) ?: -ENODEV;
-		dev_err(ctx->dev, "Failed to create IOMMU mapping (%d)\n", rc);
-		return rc;
-	}
-
-	rc = iommu_domain_set_attr(ctx->mapping->domain,
-				   DOMAIN_ATTR_ATOMIC,
-				   &atomic_ctx);
-	if (rc) {
-		dev_err(ctx->dev, "Set atomic attribute to SMMU failed (%d)\n",
-			rc);
-		goto release_mapping;
-	}
-
-	if (smmu_bypass) {
-		rc = iommu_domain_set_attr(ctx->mapping->domain,
-					   DOMAIN_ATTR_S1_BYPASS,
-					   &smmu_bypass);
-		if (rc) {
-			dev_err(ctx->dev, "Set bypass attribute to SMMU failed (%d)\n",
-				rc);
-			goto release_mapping;
-		}
-	} else {
-		/* Set dma-coherent and page table coherency */
-		if (ctx->smmu_coherent) {
-			arch_setup_dma_ops(&ctx->pcidev->dev, 0, 0, NULL, true);
-			rc = iommu_domain_set_attr(ctx->mapping->domain,
-				   DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
-				   &force_pt_coherent);
-			if (rc) {
-				dev_err(ctx->dev,
-					"Set SMMU PAGE_TABLE_FORCE_COHERENT attr failed (%d)\n",
-					rc);
-				goto release_mapping;
-			}
-		}
-
-		if (ctx->smmu_fast_map) {
-			rc = iommu_domain_set_attr(ctx->mapping->domain,
-						   DOMAIN_ATTR_FAST,
-						   &ctx->smmu_fast_map);
-			if (rc) {
-				dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
-					rc);
-				goto release_mapping;
-			}
-		}
-	}
-
-	rc = __depr_arm_iommu_attach_device(&ctx->pcidev->dev, ctx->mapping);
-	if (rc) {
-		dev_err(ctx->dev, "arm_iommu_attach_device failed (%d)\n", rc);
-		goto release_mapping;
-	}
-	dev_dbg(ctx->dev, "attached to IOMMU\n");
-
-	return 0;
-release_mapping:
-	__depr_arm_iommu_release_mapping(ctx->mapping);
-	ctx->mapping = NULL;
-	return rc;
-}
-
 static int msm_11ad_ssr_shutdown(const struct subsys_desc *subsys,
 				 bool force_stop)
 {
@@ -987,6 +988,32 @@
 	}
 }
 
+static void msm_11ad_pci_event_cb(struct msm_pcie_notify *notify)
+{
+	struct pci_dev *pcidev = notify->user;
+	struct msm11ad_ctx *ctx = pcidev2ctx(pcidev);
+
+	if (!ctx)
+		return;
+
+	if (!ctx->rops.notify || !ctx->wil_handle) {
+		dev_info(ctx->dev,
+			 "no registered notif CB, cannot hadle pci notifications\n");
+		return;
+	}
+
+	switch (notify->event) {
+	case MSM_PCIE_EVENT_LINKDOWN:
+		dev_err(ctx->dev, "PCIe linkdown\n");
+		ctx->rops.notify(ctx->wil_handle,
+				 WIL_PLATFORM_NOTIF_PCI_LINKDOWN);
+		break;
+	default:
+		break;
+	}
+
+}
+
 static int msm_11ad_probe(struct platform_device *pdev)
 {
 	struct msm11ad_ctx *ctx;
@@ -994,9 +1021,9 @@
 	struct device_node *of_node = dev->of_node;
 	struct device_node *rc_node;
 	struct pci_dev *pcidev = NULL;
-	u32 smmu_mapping[2];
 	int rc, i;
 	bool pcidev_found = false;
+	struct msm_pcie_register_event *pci_event;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -1020,7 +1047,6 @@
 	 *	qcom,msm-bus,vectors-KBps =
 	 *		<100 512 0 0>,
 	 *		<100 512 600000 800000>;
-	 *	qcom,smmu-support;
 	 *};
 	 * rc_node stands for "qcom,pcie", selected entries:
 	 * cell-index = <1>; (ctx->rc_index)
@@ -1051,7 +1077,6 @@
 		dev_err(ctx->dev, "Parent PCIE device index not found\n");
 		return -EINVAL;
 	}
-	ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
 	ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
 		"qcom,keep-radio-on-during-sleep");
 	ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
@@ -1060,28 +1085,6 @@
 		return -EINVAL;
 	}
 
-	ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
-	if (ctx->smmu_s1_en) {
-		ctx->smmu_fast_map = of_property_read_bool(
-						of_node, "qcom,smmu-fast-map");
-		ctx->smmu_coherent = of_property_read_bool(
-						of_node, "qcom,smmu-coherent");
-	}
-	rc = of_property_read_u32_array(dev->of_node, "qcom,smmu-mapping",
-			smmu_mapping, 2);
-	if (rc) {
-		dev_err(ctx->dev,
-			"Failed to read base/size smmu addresses %d, fallback to default\n",
-			rc);
-		ctx->smmu_base = SMMU_BASE;
-		ctx->smmu_size = SMMU_SIZE;
-	} else {
-		ctx->smmu_base = smmu_mapping[0];
-		ctx->smmu_size = smmu_mapping[1];
-	}
-	dev_dbg(ctx->dev, "smmu_base=0x%x smmu_sise=0x%x\n",
-		ctx->smmu_base, ctx->smmu_size);
-
 	/*== execute ==*/
 	/* turn device on */
 	rc = msm_11ad_init_vregs(ctx);
@@ -1212,14 +1215,36 @@
 		 "  gpio_dc = %d\n"
 		 "  sleep_clk_en = %d\n"
 		 "  rc_index = %d\n"
-		 "  use_smmu = %d\n"
 		 "  pcidev = %pK\n"
 		 "}\n", ctx, ctx->gpio_en, ctx->gpio_dc, ctx->sleep_clk_en,
-		 ctx->rc_index, ctx->use_smmu, ctx->pcidev);
+		 ctx->rc_index, ctx->pcidev);
 
 	platform_set_drvdata(pdev, ctx);
 	device_disable_async_suspend(&pcidev->dev);
 
+	/* Save golden config space for pci linkdown recovery */
+	rc = pci_save_state(pcidev);
+	if (rc) {
+		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
+		goto out_suspend;
+	}
+	ctx->golden_state = pci_store_saved_state(pcidev);
+
+	pci_event = &ctx->pci_event;
+	pci_event->events = MSM_PCIE_EVENT_LINKDOWN;
+	pci_event->user = ctx->pcidev;
+	pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
+	pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY;
+	pci_event->callback = msm_11ad_pci_event_cb;
+
+	rc = msm_pcie_register_event(pci_event);
+	if (rc) {
+		dev_err(ctx->dev, "failed to register msm pcie event: %d\n",
+			rc);
+		kfree(ctx->golden_state);
+		goto out_suspend;
+	}
+
 	list_add_tail(&ctx->list, &dev_list);
 	msm_11ad_suspend_power_off(ctx);
 
@@ -1255,11 +1280,13 @@
 {
 	struct msm11ad_ctx *ctx = platform_get_drvdata(pdev);
 
+	msm_pcie_deregister_event(&ctx->pci_event);
 	msm_11ad_ssr_deinit(ctx);
 	list_del(&ctx->list);
 	dev_info(ctx->dev, "%s: pdev %pK pcidev %pK\n", __func__, pdev,
 		 ctx->pcidev);
 	kfree(ctx->pristine_state);
+	kfree(ctx->golden_state);
 
 	pci_dev_put(ctx->pcidev);
 	if (ctx->gpio_en >= 0) {
@@ -1420,12 +1447,6 @@
 		ctx->msm_bus_handle = 0;
 	}
 
-	if (ctx->use_smmu) {
-		__depr_arm_iommu_detach_device(&ctx->pcidev->dev);
-		__depr_arm_iommu_release_mapping(ctx->mapping);
-		ctx->mapping = NULL;
-	}
-
 	memset(&ctx->rops, 0, sizeof(ctx->rops));
 	ctx->wil_handle = NULL;
 
@@ -1456,6 +1477,7 @@
 {
 	struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
 	int rc = 0;
+	struct pci_dev *pcidev = ctx->pcidev;
 
 	switch (evt) {
 	case WIL_PLATFORM_EVT_FW_CRASH:
@@ -1463,12 +1485,12 @@
 		break;
 	case WIL_PLATFORM_EVT_PRE_RESET:
 		/*
-		 * Enable rf_clk3 clock before resetting the device to ensure
+		 * Enable rf_clk clock before resetting the device to ensure
 		 * stable ref clock during the device reset
 		 */
 		if (ctx->features &
 		    BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL)) {
-			rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+			rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk);
 			if (rc) {
 				dev_err(ctx->dev,
 					"failed to enable clk, rc %d\n", rc);
@@ -1478,12 +1500,25 @@
 		break;
 	case WIL_PLATFORM_EVT_FW_RDY:
 		/*
-		 * Disable rf_clk3 clock after the device is up to allow
+		 * Disable rf_clk clock after the device is up to allow
 		 * the device to control it via its GPIO for power saving
 		 */
 		if (ctx->features &
 		    BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL))
-			msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+			msm_11ad_disable_clk(ctx, &ctx->rf_clk);
+
+		/*
+		 * Save golden config space for pci linkdown recovery.
+		 * golden_state is also saved after enumeration, free the old
+		 * saved state before reallocating
+		 */
+		rc = pci_save_state(pcidev);
+		if (rc) {
+			dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
+			return rc;
+		}
+		kfree(ctx->golden_state);
+		ctx->golden_state = pci_store_saved_state(pcidev);
 		break;
 	default:
 		pr_debug("%s: Unhandled event %d\n", __func__, evt);
@@ -1522,6 +1557,10 @@
 {
 	struct pci_dev *pcidev = to_pci_dev(dev);
 	struct msm11ad_ctx *ctx = pcidev2ctx(pcidev);
+	struct iommu_domain *domain;
+	int bypass = 0;
+	int fastmap = 0;
+	int coherent = 0;
 
 	if (!ctx) {
 		pr_err("Context not found for pcidev %pK\n", pcidev);
@@ -1536,11 +1575,19 @@
 		return NULL;
 	}
 	dev_info(ctx->dev, "msm_bus handle 0x%x\n", ctx->msm_bus_handle);
-	/* smmu */
-	if (msm_11ad_smmu_init(ctx)) {
-		msm_bus_scale_unregister_client(ctx->msm_bus_handle);
-		ctx->msm_bus_handle = 0;
-		return NULL;
+
+	domain = iommu_get_domain_for_dev(&pcidev->dev);
+	if (domain) {
+		iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
+		iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap);
+		iommu_domain_get_attr(domain,
+				      DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+				      &coherent);
+
+		dev_info(ctx->dev, "SMMU initialized, bypass=%d, fastmap=%d, coherent=%d\n",
+			 bypass, fastmap, coherent);
+	} else {
+		dev_warn(ctx->dev, "Unable to get iommu domain\n");
 	}
 
 	/* subsystem restart */
@@ -1558,6 +1605,7 @@
 	ops->notify = ops_notify;
 	ops->get_capa = ops_get_capa;
 	ops->set_features = ops_set_features;
+	ops->pci_linkdown_recovery = ops_pci_linkdown_recovery;
 
 	return ctx;
 }
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index d0b5cbd..414ebd4 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"%s: " fmt, __func__
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/of_platform.h>
 #include <linux/msm_ext_display.h>
+#include <linux/extcon-provider.h>
 
 struct msm_ext_disp_list {
 	struct msm_ext_disp_init_data *data;
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index e5e0dc4..c9c574e 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -23,9 +23,23 @@
 #define GENI_SE_IOMMU_VA_START	(0x40000000)
 #define GENI_SE_IOMMU_VA_SIZE	(0xC0000000)
 
+#ifdef CONFIG_ARM64
+#define GENI_SE_DMA_PTR_L(ptr) ((u32)ptr)
+#define GENI_SE_DMA_PTR_H(ptr) ((u32)(ptr >> 32))
+#else
+#define GENI_SE_DMA_PTR_L(ptr) ((u32)ptr)
+#define GENI_SE_DMA_PTR_H(ptr) 0
+#endif
+
 #define NUM_LOG_PAGES 2
 #define MAX_CLK_PERF_LEVEL 32
-static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000, 100000000};
+static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000,
+				100000000, 150000000, 200000000, 236000000};
+
+struct bus_vectors {
+	int src;
+	int dst;
+};
 
 /**
  * @struct geni_se_device - Data structure to represent the QUPv3 Core
@@ -36,18 +50,32 @@
  * @iommu_s1_bypass:	Bypass IOMMU stage 1 translation.
  * @base:		Base address of this instance of QUPv3 core.
  * @bus_bw:		Client handle to the bus bandwidth request.
+ * @bus_bw_noc:		Client handle to the QUP clock and DDR path bus
+			bandwidth request.
  * @bus_mas_id:		Master Endpoint ID for bus BW request.
  * @bus_slv_id:		Slave Endpoint ID for bus BW request.
  * @geni_dev_lock:		Lock to protect the bus ab & ib values, list.
  * @ab_list_head:	Sorted resource list based on average bus BW.
  * @ib_list_head:	Sorted resource list based on instantaneous bus BW.
+ * @ab_list_head_noc:	Sorted resource list based on average DDR path bus BW.
+ * @ib_list_head_noc:	Sorted resource list based on instantaneous DDR path
+			bus BW.
  * @cur_ab:		Current Bus Average BW request value.
  * @cur_ib:		Current Bus Instantaneous BW request value.
+ * @cur_ab_noc:		Current DDR Bus Average BW request value.
+ * @cur_ib_noc:		Current DDR Bus Instantaneous BW request value.
  * @bus_bw_set:		Clock plan for the bus driver.
+ * @bus_bw_set_noc:	Clock plan for DDR path.
  * @cur_bus_bw_idx:	Current index within the bus clock plan.
+ * @cur_bus_bw_idx_noc:	Current index within the DDR path clock plan.
  * @num_clk_levels:	Number of valid clock levels in clk_perf_tbl.
  * @clk_perf_tbl:	Table of clock frequency input to Serial Engine clock.
- * @log_ctx:		Logging context to hold the debug information
+ * @log_ctx:		Logging context to hold the debug information.
+ * @vectors:		Structure to store Master End and Slave End IDs for
+			QUPv3 clock and DDR path bus BW request.
+ * @num_paths:		Two paths. QUPv3 clock and DDR paths.
+ * @num_usecases:	One usecase to vote for both QUPv3 clock and DDR paths.
+ * @pdata:		To register our client handle with the ICB driver.
  */
 struct geni_se_device {
 	struct device *dev;
@@ -57,19 +85,31 @@
 	bool iommu_s1_bypass;
 	void __iomem *base;
 	struct msm_bus_client_handle *bus_bw;
+	uint32_t bus_bw_noc;
 	u32 bus_mas_id;
 	u32 bus_slv_id;
 	struct mutex geni_dev_lock;
 	struct list_head ab_list_head;
 	struct list_head ib_list_head;
+	struct list_head ab_list_head_noc;
+	struct list_head ib_list_head_noc;
 	unsigned long cur_ab;
 	unsigned long cur_ib;
+	unsigned long cur_ab_noc;
+	unsigned long cur_ib_noc;
 	int bus_bw_set_size;
+	int bus_bw_set_size_noc;
 	unsigned long *bus_bw_set;
+	unsigned long *bus_bw_set_noc;
 	int cur_bus_bw_idx;
+	int cur_bus_bw_idx_noc;
 	unsigned int num_clk_levels;
 	unsigned long *clk_perf_tbl;
 	void *log_ctx;
+	struct bus_vectors *vectors;
+	int num_paths;
+	int num_usecases;
+	struct msm_bus_scale_pdata *pdata;
 };
 
 /* Offset of QUPV3 Hardware Version Register */
@@ -282,6 +322,10 @@
 	geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
 	geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
 
+	/* Clearing registers before reading */
+	geni_write_reg(0x00000000, base, SE_GENI_M_IRQ_EN);
+	geni_write_reg(0x00000000, base, SE_GENI_S_IRQ_EN);
+
 	common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
 	common_geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
 	geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
@@ -301,9 +345,7 @@
 
 static int geni_se_select_dma_mode(void __iomem *base)
 {
-	int proto = get_se_proto(base);
 	unsigned int geni_dma_mode = 0;
-	unsigned int common_geni_m_irq_en;
 
 	geni_write_reg(0, base, SE_GSI_EVENT_EN);
 	geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR);
@@ -311,13 +353,9 @@
 	geni_write_reg(0xFFFFFFFF, base, SE_DMA_TX_IRQ_CLR);
 	geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
 	geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
+	geni_write_reg(0x00000000, base, SE_GENI_M_IRQ_EN);
+	geni_write_reg(0x00000000, base, SE_GENI_S_IRQ_EN);
 
-	common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
-	if (proto != UART)
-		common_geni_m_irq_en &=
-			~(M_TX_FIFO_WATERMARK_EN | M_RX_FIFO_WATERMARK_EN);
-
-	geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
 	geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
 	geni_dma_mode |= GENI_DMA_MODE_EN;
 	geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
@@ -614,8 +652,11 @@
 	int new_bus_bw_idx = geni_se_dev->bus_bw_set_size - 1;
 	unsigned long new_bus_bw;
 	bool bus_bw_update = false;
+	/* Convert agg ab into bytes per second */
+	unsigned long new_ab_in_hz = DEFAULT_BUS_WIDTH *
+					((2*geni_se_dev->cur_ab)*10000);
 
-	new_bus_bw = max(geni_se_dev->cur_ib, geni_se_dev->cur_ab) /
+	new_bus_bw = max(geni_se_dev->cur_ib, new_ab_in_hz) /
 							DEFAULT_BUS_WIDTH;
 	for (i = 0; i < geni_se_dev->bus_bw_set_size; i++) {
 		if (geni_se_dev->bus_bw_set[i] >= new_bus_bw) {
@@ -631,11 +672,37 @@
 	return bus_bw_update;
 }
 
+static bool geni_se_check_bus_bw_noc(struct geni_se_device *geni_se_dev)
+{
+	int i;
+	int new_bus_bw_idx = geni_se_dev->bus_bw_set_size_noc - 1;
+	unsigned long new_bus_bw;
+	bool bus_bw_update = false;
+
+	new_bus_bw = max(geni_se_dev->cur_ib_noc, geni_se_dev->cur_ab_noc) /
+							DEFAULT_BUS_WIDTH;
+
+	for (i = 0; i < geni_se_dev->bus_bw_set_size_noc; i++) {
+		if (geni_se_dev->bus_bw_set_noc[i] >= new_bus_bw) {
+			new_bus_bw_idx = i;
+			break;
+		}
+	}
+
+	if (geni_se_dev->cur_bus_bw_idx_noc != new_bus_bw_idx) {
+		geni_se_dev->cur_bus_bw_idx_noc = new_bus_bw_idx;
+		bus_bw_update = true;
+	}
+
+	return bus_bw_update;
+}
+
 static int geni_se_rmv_ab_ib(struct geni_se_device *geni_se_dev,
 			     struct se_geni_rsc *rsc)
 {
 	struct se_geni_rsc *tmp;
 	bool bus_bw_update = false;
+	bool bus_bw_update_noc = false;
 	int ret = 0;
 
 	if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list)))
@@ -654,14 +721,51 @@
 		geni_se_dev->cur_ib = 0;
 
 	bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
-	if (bus_bw_update)
+
+	if (geni_se_dev->num_paths == 2) {
+		geni_se_dev->pdata->usecase[1].vectors[0].ab  =
+			geni_se_dev->cur_ab;
+		geni_se_dev->pdata->usecase[1].vectors[0].ib  =
+			geni_se_dev->cur_ib;
+	}
+
+	if (bus_bw_update && geni_se_dev->num_paths != 2) {
 		ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
 						geni_se_dev->cur_ab,
 						geni_se_dev->cur_ib);
-	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
-		    "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
-		    geni_se_dev->cur_ab, geni_se_dev->cur_ib,
-		    rsc->ab, rsc->ib, bus_bw_update);
+		GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+			"%s: %s: cur_ab_ib(%lu:%lu) req_ab_ib(%lu:%lu) %d\n",
+			__func__, dev_name(rsc->ctrl_dev), geni_se_dev->cur_ab,
+			geni_se_dev->cur_ib, rsc->ab, rsc->ib, bus_bw_update);
+	}
+
+	if (geni_se_dev->num_paths == 2) {
+		if (unlikely(list_empty(&rsc->ab_list_noc) ||
+					list_empty(&rsc->ib_list_noc)))
+			return -EINVAL;
+
+		list_del_init(&rsc->ab_list_noc);
+		geni_se_dev->cur_ab_noc -= rsc->ab_noc;
+
+		list_del_init(&rsc->ib_list_noc);
+		tmp = list_first_entry_or_null(&geni_se_dev->ib_list_head_noc,
+					struct se_geni_rsc, ib_list_noc);
+		if (tmp && tmp->ib_noc != geni_se_dev->cur_ib_noc)
+			geni_se_dev->cur_ib_noc = tmp->ib_noc;
+		else if (!tmp && geni_se_dev->cur_ib_noc)
+			geni_se_dev->cur_ib_noc = 0;
+
+		bus_bw_update_noc = geni_se_check_bus_bw_noc(geni_se_dev);
+
+			geni_se_dev->pdata->usecase[1].vectors[1].ab  =
+				geni_se_dev->cur_ab_noc;
+			geni_se_dev->pdata->usecase[1].vectors[1].ib  =
+				geni_se_dev->cur_ib_noc;
+
+		if (bus_bw_update_noc || bus_bw_update)
+			ret = msm_bus_scale_client_update_request
+						(geni_se_dev->bus_bw_noc, 1);
+	}
 	mutex_unlock(&geni_se_dev->geni_dev_lock);
 	return ret;
 }
@@ -682,7 +786,8 @@
 		return -EINVAL;
 
 	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
-	if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+	if (unlikely(!geni_se_dev || !(geni_se_dev->bus_bw ||
+					geni_se_dev->bus_bw_noc)))
 		return -ENODEV;
 
 	clk_disable_unprepare(rsc->se_clk);
@@ -693,6 +798,7 @@
 	if (ret)
 		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
 			"%s: Error %d during bus_bw_update\n", __func__, ret);
+
 	return ret;
 }
 EXPORT_SYMBOL(se_geni_clks_off);
@@ -713,7 +819,9 @@
 		return -EINVAL;
 
 	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
-	if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+	if (unlikely(!geni_se_dev ||
+			!(geni_se_dev->bus_bw ||
+					geni_se_dev->bus_bw_noc)))
 		return -ENODEV;
 
 	ret = se_geni_clks_off(rsc);
@@ -733,10 +841,13 @@
 {
 	struct se_geni_rsc *tmp = NULL;
 	struct list_head *ins_list_head;
+	struct list_head *ins_list_head_noc;
 	bool bus_bw_update = false;
+	bool bus_bw_update_noc = false;
 	int ret = 0;
 
 	mutex_lock(&geni_se_dev->geni_dev_lock);
+
 	list_add(&rsc->ab_list, &geni_se_dev->ab_list_head);
 	geni_se_dev->cur_ab += rsc->ab;
 
@@ -752,14 +863,51 @@
 		geni_se_dev->cur_ib = rsc->ib;
 
 	bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
-	if (bus_bw_update)
+
+	if (geni_se_dev->num_paths == 2) {
+		geni_se_dev->pdata->usecase[1].vectors[0].ab  =
+			geni_se_dev->cur_ab;
+		geni_se_dev->pdata->usecase[1].vectors[0].ib  =
+			geni_se_dev->cur_ib;
+	}
+
+	if (bus_bw_update && geni_se_dev->num_paths != 2) {
 		ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
 						geni_se_dev->cur_ab,
 						geni_se_dev->cur_ib);
-	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
-		    "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
-		    geni_se_dev->cur_ab, geni_se_dev->cur_ib,
-		    rsc->ab, rsc->ib, bus_bw_update);
+		GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+			"%s: %lu:%lu (%lu:%lu) %d\n", __func__,
+			geni_se_dev->cur_ab, geni_se_dev->cur_ib,
+			rsc->ab, rsc->ib, bus_bw_update);
+	}
+
+	if (geni_se_dev->num_paths == 2) {
+
+		list_add(&rsc->ab_list_noc, &geni_se_dev->ab_list_head_noc);
+		geni_se_dev->cur_ab_noc += rsc->ab_noc;
+		ins_list_head_noc = &geni_se_dev->ib_list_head_noc;
+
+		list_for_each_entry(tmp, &geni_se_dev->ib_list_head_noc,
+					ib_list_noc) {
+			if (tmp->ib < rsc->ib)
+				break;
+			ins_list_head_noc = &tmp->ib_list_noc;
+		}
+		list_add(&rsc->ib_list_noc, ins_list_head_noc);
+
+		if (ins_list_head_noc == &geni_se_dev->ib_list_head_noc)
+			geni_se_dev->cur_ib_noc = rsc->ib_noc;
+
+		bus_bw_update_noc = geni_se_check_bus_bw_noc(geni_se_dev);
+
+			geni_se_dev->pdata->usecase[1].vectors[1].ab  =
+				geni_se_dev->cur_ab_noc;
+			geni_se_dev->pdata->usecase[1].vectors[1].ib  =
+				geni_se_dev->cur_ib_noc;
+		if (bus_bw_update_noc || bus_bw_update)
+			ret = msm_bus_scale_client_update_request
+						(geni_se_dev->bus_bw_noc, 1);
+	}
 	mutex_unlock(&geni_se_dev->geni_dev_lock);
 	return ret;
 }
@@ -872,21 +1020,42 @@
 	if (unlikely(!geni_se_dev))
 		return -EPROBE_DEFER;
 
-	if (unlikely(IS_ERR_OR_NULL(geni_se_dev->bus_bw))) {
-		geni_se_dev->bus_bw = msm_bus_scale_register(
-					geni_se_dev->bus_mas_id,
-					geni_se_dev->bus_slv_id,
-					(char *)dev_name(geni_se_dev->dev),
-					false);
-		if (IS_ERR_OR_NULL(geni_se_dev->bus_bw)) {
-			GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
-				"%s: Error creating bus client\n", __func__);
-			return (int)PTR_ERR(geni_se_dev->bus_bw);
+	if (geni_se_dev->num_paths == 2) {
+		if (unlikely(!(geni_se_dev->bus_bw_noc))) {
+			geni_se_dev->bus_bw_noc =
+			msm_bus_scale_register_client(geni_se_dev->pdata);
+			if (!(geni_se_dev->bus_bw_noc)) {
+				GENI_SE_ERR(geni_se_dev->log_ctx,
+					false, NULL,
+				"%s: Error creating bus client\n",  __func__);
+				return -EFAULT;
+			}
 		}
+
+		rsc->ab = ab;
+		rsc->ib = ab;
+		rsc->ab_noc = 0;
+		rsc->ib_noc = ib;
+		INIT_LIST_HEAD(&rsc->ab_list_noc);
+		INIT_LIST_HEAD(&rsc->ib_list_noc);
+	} else {
+		if (unlikely(IS_ERR_OR_NULL(geni_se_dev->bus_bw))) {
+			geni_se_dev->bus_bw = msm_bus_scale_register(
+						geni_se_dev->bus_mas_id,
+						geni_se_dev->bus_slv_id,
+					(char *)dev_name(geni_se_dev->dev),
+						false);
+			if (IS_ERR_OR_NULL(geni_se_dev->bus_bw)) {
+				GENI_SE_ERR(geni_se_dev->log_ctx,
+					false, NULL,
+				"%s: Error creating bus client\n", __func__);
+				return (int)PTR_ERR(geni_se_dev->bus_bw);
+			}
+		}
+		rsc->ab = ab;
+		rsc->ib = ib;
 	}
 
-	rsc->ab = ab;
-	rsc->ib = ib;
 	INIT_LIST_HEAD(&rsc->ab_list);
 	INIT_LIST_HEAD(&rsc->ib_list);
 
@@ -983,6 +1152,9 @@
 	unsigned long *tbl;
 	int num_clk_levels;
 	int i;
+	unsigned long best_delta = 0;
+	unsigned long new_delta;
+	unsigned int divider;
 
 	num_clk_levels = geni_se_clk_tbl_get(rsc, &tbl);
 	if (num_clk_levels < 0)
@@ -992,17 +1164,21 @@
 		return -EFAULT;
 
 	*res_freq = 0;
-	for (i = 0; i < num_clk_levels; i++) {
-		if (!(tbl[i] % req_freq)) {
-			*index = i;
-			*res_freq = tbl[i];
-			return 0;
-		}
 
-		if (!(*res_freq) || ((tbl[i] > *res_freq) &&
-				     (tbl[i] < req_freq))) {
+	for (i = 0; i < num_clk_levels; i++) {
+		divider = DIV_ROUND_UP(tbl[i], req_freq);
+		new_delta = req_freq - (tbl[i] / divider);
+
+		if (!best_delta || new_delta < best_delta) {
+			/* We have a new best! */
 			*index = i;
 			*res_freq = tbl[i];
+
+			/*If the new best is exact then we're done*/
+			if (new_delta == 0)
+				return 0;
+
+			best_delta = new_delta;
 		}
 	}
 
@@ -1039,8 +1215,8 @@
 		return ret;
 
 	geni_write_reg(7, base, SE_DMA_TX_IRQ_EN_SET);
-	geni_write_reg((u32)(*tx_dma), base, SE_DMA_TX_PTR_L);
-	geni_write_reg((u32)((*tx_dma) >> 32), base, SE_DMA_TX_PTR_H);
+	geni_write_reg(GENI_SE_DMA_PTR_L(*tx_dma), base, SE_DMA_TX_PTR_L);
+	geni_write_reg(GENI_SE_DMA_PTR_H(*tx_dma), base, SE_DMA_TX_PTR_H);
 	geni_write_reg(1, base, SE_DMA_TX_ATTR);
 	geni_write_reg(tx_len, base, SE_DMA_TX_LEN);
 	return 0;
@@ -1073,8 +1249,8 @@
 		return ret;
 
 	geni_write_reg(7, base, SE_DMA_RX_IRQ_EN_SET);
-	geni_write_reg((u32)(*rx_dma), base, SE_DMA_RX_PTR_L);
-	geni_write_reg((u32)((*rx_dma) >> 32), base, SE_DMA_RX_PTR_H);
+	geni_write_reg(GENI_SE_DMA_PTR_L(*rx_dma), base, SE_DMA_RX_PTR_L);
+	geni_write_reg(GENI_SE_DMA_PTR_H(*rx_dma), base, SE_DMA_RX_PTR_H);
 	/* RX does not have EOT bit */
 	geni_write_reg(0, base, SE_DMA_RX_ATTR);
 	geni_write_reg(rx_len, base, SE_DMA_RX_LEN);
@@ -1412,6 +1588,87 @@
 	{}
 };
 
+static struct msm_bus_scale_pdata *ab_ib_register(struct platform_device *pdev,
+				struct geni_se_device *host)
+{
+	int rc = 0;
+	struct device *dev = &pdev->dev;
+	int i = 0, j, len;
+	bool mem_err = false;
+	const uint32_t *vec_arr = NULL;
+	struct msm_bus_scale_pdata *pdata = NULL;
+	struct msm_bus_paths *usecase = NULL;
+
+	vec_arr = of_get_property(dev->of_node,
+			"qcom,msm-bus,vectors-bus-ids", &len);
+	if (vec_arr == NULL) {
+		pr_err("Error: Vector array not found\n");
+		rc = 1;
+		goto out;
+	}
+
+	if (len != host->num_paths * sizeof(uint32_t) * 2) {
+		pr_err("Error: Length-error on getting vectors\n");
+		rc = 1;
+		goto out;
+	}
+
+
+	pdata = devm_kzalloc(dev, sizeof(struct msm_bus_scale_pdata),
+							GFP_KERNEL);
+	if (!pdata) {
+		mem_err = true;
+		goto out;
+	}
+
+	pdata->name = (char *)dev_name(host->dev);
+
+	pdata->num_usecases = 2;
+
+	pdata->active_only = 0;
+
+	usecase = devm_kzalloc(dev, (sizeof(struct msm_bus_paths) *
+		pdata->num_usecases), GFP_KERNEL);
+	if (!usecase) {
+		mem_err = true;
+		goto out;
+	}
+
+	for (i = 0; i < pdata->num_usecases; i++) {
+		usecase[i].num_paths = host->num_paths;
+		usecase[i].vectors = devm_kzalloc(dev, host->num_paths *
+			sizeof(struct msm_bus_vectors), GFP_KERNEL);
+		if (!usecase[i].vectors) {
+			mem_err = true;
+			pr_err("Error: Mem alloc failure in vectors\n");
+			goto out;
+		}
+
+		for (j = 0; j < host->num_paths; j++) {
+			int index = (j * 2);
+
+			usecase[i].vectors[j].src =
+					be32_to_cpu(vec_arr[index]);
+			usecase[i].vectors[j].dst =
+					be32_to_cpu(vec_arr[index + 1]);
+			usecase[i].vectors[j].ab = 0;
+			usecase[i].vectors[j].ib = 0;
+		}
+	}
+
+	pdata->usecase = usecase;
+
+	return pdata;
+out:
+	if (mem_err) {
+		for ( ; i > 0; i--)
+			devm_kfree(dev, usecase[i-1].vectors);
+		devm_kfree(dev, usecase);
+		devm_kfree(dev, pdata);
+	}
+	return NULL;
+}
+
 static int geni_se_iommu_probe(struct device *dev)
 {
 	struct geni_se_device *geni_se_dev;
@@ -1472,29 +1729,57 @@
 	}
 
 	geni_se_dev->dev = dev;
-	geni_se_dev->cb_dev = dev;
-	ret = of_property_read_u32(dev->of_node, "qcom,bus-mas-id",
-				   &geni_se_dev->bus_mas_id);
-	if (ret) {
-		dev_err(dev, "%s: Error missing bus master id\n", __func__);
-		devm_iounmap(dev, geni_se_dev->base);
-		devm_kfree(dev, geni_se_dev);
+
+	ret = of_property_read_u32(dev->of_node, "qcom,msm-bus,num-paths",
+					&geni_se_dev->num_paths);
+	if (!ret) {
+		geni_se_dev->pdata = ab_ib_register(pdev, geni_se_dev);
+		if (geni_se_dev->pdata == NULL) {
+			dev_err(dev,
+			"%s: Error missing bus master and slave id\n",
+								__func__);
+			devm_iounmap(dev, geni_se_dev->base);
+			devm_kfree(dev, geni_se_dev);
+		}
 	}
-	ret = of_property_read_u32(dev->of_node, "qcom,bus-slv-id",
+
+	else {
+		geni_se_dev->num_paths = 1;
+		ret = of_property_read_u32(dev->of_node, "qcom,bus-mas-id",
+				   &geni_se_dev->bus_mas_id);
+		if (ret) {
+			dev_err(dev, "%s: Error missing bus master id\n",
+								__func__);
+			devm_iounmap(dev, geni_se_dev->base);
+			devm_kfree(dev, geni_se_dev);
+		}
+		ret = of_property_read_u32(dev->of_node, "qcom,bus-slv-id",
 				   &geni_se_dev->bus_slv_id);
-	if (ret) {
-		dev_err(dev, "%s: Error missing bus slave id\n", __func__);
-		devm_iounmap(dev, geni_se_dev->base);
-		devm_kfree(dev, geni_se_dev);
+		if (ret) {
+			dev_err(dev, "%s: Error missing bus slave id\n",
+								 __func__);
+			devm_iounmap(dev, geni_se_dev->base);
+			devm_kfree(dev, geni_se_dev);
+		}
 	}
 
 	geni_se_dev->iommu_s1_bypass = of_property_read_bool(dev->of_node,
 							"qcom,iommu-s1-bypass");
 	geni_se_dev->bus_bw_set = default_bus_bw_set;
-	geni_se_dev->bus_bw_set_size = ARRAY_SIZE(default_bus_bw_set);
+	geni_se_dev->bus_bw_set_size =
+				ARRAY_SIZE(default_bus_bw_set);
+	if (geni_se_dev->num_paths == 2) {
+		geni_se_dev->bus_bw_set_noc = default_bus_bw_set;
+		geni_se_dev->bus_bw_set_size_noc =
+				ARRAY_SIZE(default_bus_bw_set);
+	}
 	mutex_init(&geni_se_dev->iommu_lock);
 	INIT_LIST_HEAD(&geni_se_dev->ab_list_head);
 	INIT_LIST_HEAD(&geni_se_dev->ib_list_head);
+	if (geni_se_dev->num_paths == 2) {
+		INIT_LIST_HEAD(&geni_se_dev->ab_list_head_noc);
+		INIT_LIST_HEAD(&geni_se_dev->ib_list_head_noc);
+	}
 	mutex_init(&geni_se_dev->geni_dev_lock);
 	geni_se_dev->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
 						dev_name(geni_se_dev->dev), 0);
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 81c2ec5..b325676 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -2328,8 +2328,11 @@
 	mutex_lock(&bam->lock);
 	sps_bam_device_de_init(bam);
 	mutex_unlock(&bam->lock);
+	ipc_log_context_destroy(bam->ipc_log0);
 	ipc_log_context_destroy(bam->ipc_log1);
 	ipc_log_context_destroy(bam->ipc_log2);
+	ipc_log_context_destroy(bam->ipc_log3);
+	ipc_log_context_destroy(bam->ipc_log4);
 	if (bam->props.virt_size)
 		(void)iounmap(bam->props.virt_addr);
 
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0c1aa6c..7563c07 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -856,6 +856,7 @@
 config ACPI_CMPC
 	tristate "CMPC Laptop Extras"
 	depends on ACPI && INPUT
+	depends on BACKLIGHT_LCD_SUPPORT
 	depends on RFKILL || RFKILL=n
 	select BACKLIGHT_CLASS_DEVICE
 	help
@@ -1077,6 +1078,7 @@
 config SAMSUNG_Q10
 	tristate "Samsung Q10 Extras"
 	depends on ACPI
+	depends on BACKLIGHT_LCD_SUPPORT
 	select BACKLIGHT_CLASS_DEVICE
 	---help---
 	  This driver provides support for backlight control on Samsung Q10
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index db2af09..b6f2ff9 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -442,8 +442,7 @@
 	{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
 	{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
 	{ KE_KEY, 0x32, { KEY_MUTE } },
-	{ KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
-	{ KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
+	{ KE_KEY, 0x35, { KEY_SCREENLOCK } },
 	{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
 	{ KE_KEY, 0x41, { KEY_NEXTSONG } },
 	{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 2d6e272..db3556d 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2231,7 +2231,8 @@
 		err = asus_wmi_backlight_init(asus);
 		if (err && err != -ENODEV)
 			goto fail_backlight;
-	}
+	} else
+		err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
 
 	status = wmi_install_notify_handler(asus->driver->event_guid,
 					    asus_wmi_notify, asus);
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index d89936c..78b4aa4 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -83,12 +83,12 @@
 #define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET	0xe7
 #define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET	0xe8
 #define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET	0xe9
-#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET	0xea
-#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET	0xeb
-#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET	0xec
-#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET	0xed
-#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET	0xee
-#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET	0xef
+#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET	0xeb
+#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET	0xec
+#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET	0xed
+#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET	0xee
+#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET	0xef
+#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET	0xf0
 #define MLXPLAT_CPLD_LPC_IO_RANGE		0x100
 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF		0xdb
 #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF		0xda
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 6da79ae..5a97e42 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -428,14 +428,14 @@
 		if (ret)
 			return ret;
 
-		val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
+		val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
 		break;
 	case POWER_SUPPLY_PROP_TEMP_AMBIENT:
 		ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
 		if (ret)
 			return ret;
 
-		val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
+		val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
 		ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index d16e4b7..301006d 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -450,6 +450,7 @@
 	POWER_SUPPLY_ATTR(qc_opti_disable),
 	POWER_SUPPLY_ATTR(cc_soc),
 	POWER_SUPPLY_ATTR(batt_age_level),
+	POWER_SUPPLY_ATTR(scale_mode_en),
 	/* Charge pump properties */
 	POWER_SUPPLY_ATTR(cp_status1),
 	POWER_SUPPLY_ATTR(cp_status2),
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index d9efe65..705462d 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
@@ -40,6 +40,7 @@
 #define PL_FCC_LOW_VOTER		"PL_FCC_LOW_VOTER"
 #define ICL_LIMIT_VOTER			"ICL_LIMIT_VOTER"
 #define FCC_STEPPER_VOTER		"FCC_STEPPER_VOTER"
+#define FCC_VOTER			"FCC_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -57,6 +58,7 @@
 	struct votable		*hvdcp_hw_inov_dis_votable;
 	struct votable		*usb_icl_votable;
 	struct votable		*pl_enable_votable_indirect;
+	struct votable		*cp_ilim_votable;
 	struct delayed_work	status_change_work;
 	struct work_struct	pl_disable_forever_work;
 	struct work_struct	pl_taper_work;
@@ -68,6 +70,7 @@
 	struct power_supply	*batt_psy;
 	struct power_supply	*usb_psy;
 	struct power_supply	*dc_psy;
+	struct power_supply	*cp_master_psy;
 	int			charge_type;
 	int			total_settled_ua;
 	int			pl_settled_ua;
@@ -85,6 +88,7 @@
 	struct wakeup_source	*pl_ws;
 	struct notifier_block	nb;
 	bool			pl_disable;
+	bool			cp_disabled;
 	int			taper_entry_fv;
 	int			main_fcc_max;
 	/* debugfs directory */
@@ -476,6 +480,46 @@
 	}
 }
 
+static void get_main_fcc_config(struct pl_data *chip, int *total_fcc)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (!chip->cp_master_psy)
+		chip->cp_master_psy =
+			power_supply_get_by_name("charge_pump_master");
+	if (!chip->cp_master_psy)
+		goto out;
+
+	rc = power_supply_get_property(chip->cp_master_psy,
+			POWER_SUPPLY_PROP_CP_SWITCHER_EN, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get switcher enable status, rc=%d\n", rc);
+		goto out;
+	}
+
+	if (!pval.intval) {
+		/*
+		 * To honor main charger upper FCC limit, on CP switcher
+		 * disable, skip fcc slewing as it will cause delay in limiting
+		 * the charge current flowing through main charger.
+		 */
+		if (!chip->cp_disabled) {
+			chip->fcc_stepper_enable = false;
+			pl_dbg(chip, PR_PARALLEL,
+				"Disabling FCC slewing on CP Switcher disable\n");
+		}
+		chip->cp_disabled = true;
+	} else {
+		chip->cp_disabled = false;
+		pl_dbg(chip, PR_PARALLEL,
+			"CP Switcher is enabled, don't limit main fcc\n");
+		return;
+	}
+out:
+	*total_fcc = min(*total_fcc, chip->main_fcc_max);
+}
+
 static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
 			int parallel_fcc_ua)
 {
@@ -552,6 +596,25 @@
 			goto done;
 		}
 
+		/*
+		 * Due to reduction of float voltage in JEITA condition taper
+		 * charging can be initiated at a lower FV. On removal of JEITA
+		 * condition, FV readjusts itself. However, once taper charging
+		 * is initiated, it doesn't exits until parallel chaging is
+		 * disabled due to which FCC doesn't scale back to its original
+		 * value, leading to slow charging thereafter.
+		 * Check if FV increases in comparison to FV at which taper
+		 * charging was initiated, and if yes, exit taper charging.
+		 */
+		if (get_effective_result(chip->fv_votable) >
+						chip->taper_entry_fv) {
+			pl_dbg(chip, PR_PARALLEL, "Float voltage increased. Exiting taper\n");
+			goto done;
+		} else {
+			chip->taper_entry_fv =
+					get_effective_result(chip->fv_votable);
+		}
+
 		rc = power_supply_get_property(chip->batt_psy,
 				       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
 		if (rc < 0) {
@@ -577,26 +640,9 @@
 			vote(chip->fcc_votable, TAPER_STEPPER_VOTER,
 					true, eff_fcc_ua);
 		} else {
-			/*
-			 * Due to reduction of float voltage in JEITA condition
-			 * taper charging can be initiated at a lower FV. On
-			 * removal of JEITA condition, FV readjusts itself.
-			 * However, once taper charging is initiated, it doesn't
-			 * exits until parallel chaging is disabled due to which
-			 * FCC doesn't scale back to its original value, leading
-			 * to slow charging thereafter.
-			 * Check if FV increases in comparison to FV at which
-			 * taper charging was initiated, and if yes, exit taper
-			 * charging.
-			 */
-			if (get_effective_result(chip->fv_votable) >
-						chip->taper_entry_fv) {
-				pl_dbg(chip, PR_PARALLEL, "Float voltage increased. Exiting taper\n");
-				goto done;
-			} else {
-				pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
-			}
+			pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
 		}
+
 		/* wait for the charger state to deglitch after FCC change */
 		msleep(PL_TAPER_WORK_DELAY_MS);
 	}
@@ -618,6 +664,9 @@
 	if (!chip->main_psy)
 		return 0;
 
+	if (!chip->cp_ilim_votable)
+		chip->cp_ilim_votable = find_votable("CP_ILIM");
+
 	if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
 		get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
 				&slave_fcc_ua);
@@ -814,6 +863,10 @@
 	chip->main_fcc_ua = main_fcc;
 	chip->slave_fcc_ua = parallel_fcc;
 
+	if (chip->cp_ilim_votable)
+		vote(chip->cp_ilim_votable, FCC_VOTER, true,
+					chip->main_fcc_ua / 2);
+
 	if (reschedule_ms) {
 		schedule_delayed_work(&chip->fcc_stepper_work,
 				msecs_to_jiffies(reschedule_ms));
@@ -929,6 +982,9 @@
 
 	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
 
+	if (chip->cp_ilim_votable)
+		vote(chip->cp_ilim_votable, ICL_CHANGE_VOTER, true, icl_ua);
+
 	return 0;
 }
 
@@ -1161,8 +1217,7 @@
 			(slave_fcc_ua * 100) / total_fcc_ua);
 	} else {
 		if (chip->main_fcc_max)
-			total_fcc_ua = min(total_fcc_ua,
-						chip->main_fcc_max);
+			get_main_fcc_config(chip, &total_fcc_ua);
 
 		if (!chip->fcc_stepper_enable) {
 			if (IS_USBIN(chip->pl_mode))
@@ -1188,6 +1243,10 @@
 				return rc;
 			}
 
+			if (chip->cp_ilim_votable)
+				vote(chip->cp_ilim_votable, FCC_VOTER, true,
+						total_fcc_ua / 2);
+
 			/* reset parallel FCC */
 			chip->slave_fcc_ua = 0;
 			chip->total_settled_ua = 0;
@@ -1700,6 +1759,7 @@
 	}
 
 	chip->pl_disable = true;
+	chip->cp_disabled = true;
 	chip->qcom_batt_class.name = "qcom-battery",
 	chip->qcom_batt_class.owner = THIS_MODULE,
 	chip->qcom_batt_class.class_groups = batt_class_groups;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index f7ed5819..4b84efd 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -6,6 +6,7 @@
 #define pr_fmt(fmt)	"FG: %s: " fmt, __func__
 
 #include <linux/alarmtimer.h>
+#include <linux/irq.h>
 #include <linux/ktime.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -188,8 +189,10 @@
 	bool	five_pin_battery;
 	bool	multi_profile_load;
 	bool	esr_calib_dischg;
+	bool	soc_hi_res;
 	int	cutoff_volt_mv;
 	int	empty_volt_mv;
+	int	sys_min_volt_mv;
 	int	cutoff_curr_ma;
 	int	sys_term_curr_ma;
 	int	delta_soc_thr;
@@ -236,7 +239,7 @@
 	struct work_struct	esr_calib_work;
 	struct alarm		esr_fast_cal_timer;
 	struct delayed_work	pl_enable_work;
-	struct delayed_work	pl_current_en_work;
+	struct work_struct	pl_current_en_work;
 	struct completion	mem_attn;
 	char			batt_profile[PROFILE_LEN];
 	enum slope_limit_status	slope_limit_sts;
@@ -261,6 +264,7 @@
 	bool			rslow_low;
 	bool			rapid_soc_dec_en;
 	bool			vbatt_low;
+	bool			chg_term_good;
 };
 
 struct bias_config {
@@ -851,6 +855,35 @@
 	return 0;
 }
 
+static int fg_gen4_get_prop_capacity_raw(struct fg_gen4_chip *chip, int *val)
+{
+	struct fg_dev *fg = &chip->fg;
+	int rc;
+
+	if (!chip->dt.soc_hi_res) {
+		rc = fg_get_msoc_raw(fg, val);
+		return rc;
+	}
+
+	if (!is_input_present(fg)) {
+		rc = fg_gen4_get_prop_capacity(fg, val);
+		if (!rc)
+			*val = *val * 100;
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(&chip->fg, FG_SRAM_MONOTONIC_SOC, val);
+	if (rc < 0) {
+		pr_err("Error in getting MONOTONIC_SOC, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Show it in centi-percentage */
+	*val = (*val * 10000) / 0xFFFF;
+
+	return 0;
+}
+
 static inline void get_esr_meas_current(int curr_ma, u8 *val)
 {
 	switch (curr_ma) {
@@ -874,6 +907,43 @@
 	*val <<= ESR_PULL_DOWN_IVAL_SHIFT;
 }
 
+static int fg_gen4_get_power(struct fg_gen4_chip *chip, int *val, bool average)
+{
+	struct fg_dev *fg = &chip->fg;
+	int rc, v_min, v_pred, esr_uohms, rslow_uohms;
+	s64 power;
+
+	rc = fg_get_sram_prop(fg, FG_SRAM_VOLTAGE_PRED, &v_pred);
+	if (rc < 0)
+		return rc;
+
+	v_min = chip->dt.sys_min_volt_mv * 1000;
+	power = (s64)v_min * (v_pred - v_min);
+
+	rc = fg_get_sram_prop(fg, FG_SRAM_ESR_ACT, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR_ACT, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(fg, FG_SRAM_RSLOW, &rslow_uohms);
+	if (rc < 0) {
+		pr_err("failed to get Rslow, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (average)
+		power = div_s64(power, esr_uohms + rslow_uohms);
+	else
+		power = div_s64(power, esr_uohms);
+
+	pr_debug("V_min: %d V_pred: %d ESR: %d Rslow: %d power: %lld\n", v_min,
+		v_pred, esr_uohms, rslow_uohms, power);
+
+	*val = power;
+	return 0;
+}
+
 /* ALG callback functions below */
 
 static int fg_gen4_get_ttf_param(void *data, enum ttf_param param, int *val)
@@ -2279,8 +2349,16 @@
 				new_recharge_soc = msoc - (FULL_CAPACITY -
 								recharge_soc);
 				fg->recharge_soc_adjusted = true;
+				if (fg->health == POWER_SUPPLY_HEALTH_GOOD)
+					chip->chg_term_good = true;
 			} else {
-				/* adjusted already, do nothing */
+				/*
+				 * If charge termination happened properly then
+				 * do nothing.
+				 */
+				if (chip->chg_term_good)
+					return 0;
+
 				if (fg->health != POWER_SUPPLY_HEALTH_GOOD)
 					return 0;
 
@@ -2291,7 +2369,7 @@
 
 				new_recharge_soc = recharge_soc;
 				fg->recharge_soc_adjusted = false;
-				return 0;
+				chip->chg_term_good = false;
 			}
 		} else {
 			if (!fg->recharge_soc_adjusted)
@@ -2310,11 +2388,13 @@
 			/* Restore the default value */
 			new_recharge_soc = recharge_soc;
 			fg->recharge_soc_adjusted = false;
+			chip->chg_term_good = false;
 		}
 	} else {
 		/* Restore the default value */
 		new_recharge_soc = recharge_soc;
 		fg->recharge_soc_adjusted = false;
+		chip->chg_term_good = false;
 	}
 
 	if (recharge_soc_status == fg->recharge_soc_adjusted)
@@ -3182,9 +3262,15 @@
 	fg_dbg(fg, FG_STATUS, "esr_raw: 0x%x esr_char_raw: 0x%x esr_meas_diff: 0x%x esr_delta: 0x%x\n",
 		esr_raw, esr_char_raw, esr_meas_diff, esr_delta);
 
-	fg_esr_meas_diff = esr_delta - esr_meas_diff;
-	esr_filtered = fg_esr_meas_diff >> chip->dt.esr_filter_factor;
-	esr_delta = esr_delta - esr_filtered;
+	fg_esr_meas_diff = esr_meas_diff - (esr_delta / 32);
+
+	/* Don't filter for the first attempt so that ESR can converge faster */
+	if (!chip->delta_esr_count)
+		esr_filtered = fg_esr_meas_diff;
+	else
+		esr_filtered = fg_esr_meas_diff >> chip->dt.esr_filter_factor;
+
+	esr_delta = esr_delta + (esr_filtered * 32);
 
 	/* Bound the limits */
 	if (esr_delta > SHRT_MAX)
@@ -3221,31 +3307,16 @@
 {
 	struct fg_gen4_chip *chip = container_of(work,
 				struct fg_gen4_chip,
-				pl_current_en_work.work);
+				pl_current_en_work);
 	struct fg_dev *fg = &chip->fg;
 	bool input_present = is_input_present(fg), en;
 
 	en = fg->charge_done ? false : input_present;
 
-	/*
-	 * If mem_attn_irq is disabled and parallel summing current
-	 * configuration needs to be modified, then enable mem_attn_irq and
-	 * wait for 1 second before doing it.
-	 */
-	if (get_effective_result(chip->parallel_current_en_votable) != en &&
-		!get_effective_result(chip->mem_attn_irq_en_votable)) {
-		vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER,
-			true, 0);
-		schedule_delayed_work(&chip->pl_current_en_work,
-			msecs_to_jiffies(1000));
-		return;
-	}
-
-	if (!get_effective_result(chip->mem_attn_irq_en_votable))
+	if (get_effective_result(chip->parallel_current_en_votable) == en)
 		return;
 
 	vote(chip->parallel_current_en_votable, FG_PARALLEL_EN_VOTER, en, 0);
-	vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER, false, 0);
 }
 
 static void pl_enable_work(struct work_struct *work)
@@ -3339,9 +3410,10 @@
 	if (rc < 0)
 		pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
 
-	if (is_parallel_charger_available(fg) &&
-		!delayed_work_pending(&chip->pl_current_en_work))
-		schedule_delayed_work(&chip->pl_current_en_work, 0);
+	if (is_parallel_charger_available(fg)) {
+		cancel_work_sync(&chip->pl_current_en_work);
+		schedule_work(&chip->pl_current_en_work);
+	}
 
 	ttf_update(chip->ttf, input_present);
 	fg->prev_charge_status = fg->charge_status;
@@ -3548,7 +3620,7 @@
 {
 	struct fg_gen4_chip *chip = power_supply_get_drvdata(psy);
 	struct fg_dev *fg = &chip->fg;
-	int rc = 0;
+	int rc = 0, val;
 	int64_t temp;
 
 	switch (psp) {
@@ -3556,7 +3628,16 @@
 		rc = fg_gen4_get_prop_capacity(fg, &pval->intval);
 		break;
 	case POWER_SUPPLY_PROP_CAPACITY_RAW:
-		rc = fg_get_msoc_raw(fg, &pval->intval);
+		rc = fg_gen4_get_prop_capacity_raw(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CC_SOC:
+		rc = fg_get_sram_prop(&chip->fg, FG_SRAM_CC_SOC, &val);
+		if (rc < 0) {
+			pr_err("Error in getting CC_SOC, rc=%d\n", rc);
+			return rc;
+		}
+		/* Show it in centi-percentage */
+		pval->intval = div_s64((int64_t)val * 10000,  CC_SOC_30BIT);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		if (fg->battery_missing)
@@ -3594,6 +3675,9 @@
 	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
 		rc = fg_gen4_get_charge_raw(chip, &pval->intval);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		pval->intval = chip->cl->init_cap_uah;
+		break;
 	case POWER_SUPPLY_PROP_CHARGE_FULL:
 		rc = fg_gen4_get_learned_capacity(chip, &temp);
 		if (!rc)
@@ -3656,6 +3740,12 @@
 	case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
 		pval->intval = chip->batt_age_level;
 		break;
+	case POWER_SUPPLY_PROP_POWER_NOW:
+		rc = fg_gen4_get_power(chip, &pval->intval, false);
+		break;
+	case POWER_SUPPLY_PROP_POWER_AVG:
+		rc = fg_gen4_get_power(chip, &pval->intval, true);
+		break;
 	default:
 		pr_err("unsupported property %d\n", psp);
 		rc = -EINVAL;
@@ -3775,6 +3865,7 @@
 static enum power_supply_property fg_psy_props[] = {
 	POWER_SUPPLY_PROP_CAPACITY,
 	POWER_SUPPLY_PROP_CAPACITY_RAW,
+	POWER_SUPPLY_PROP_CC_SOC,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_VOLTAGE_OCV,
@@ -3787,6 +3878,7 @@
 	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
@@ -3801,6 +3893,8 @@
 	POWER_SUPPLY_PROP_CC_STEP,
 	POWER_SUPPLY_PROP_CC_STEP_SEL,
 	POWER_SUPPLY_PROP_BATT_AGE_LEVEL,
+	POWER_SUPPLY_PROP_POWER_NOW,
+	POWER_SUPPLY_PROP_POWER_AVG,
 };
 
 static const struct power_supply_desc fg_psy_desc = {
@@ -3909,6 +4003,8 @@
 	int rc;
 	u8 val, mask;
 
+	vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER, true, 0);
+
 	/* Wait for MEM_ATTN interrupt */
 	rc = fg_wait_for_mem_attn(chip);
 	if (rc < 0)
@@ -3921,6 +4017,7 @@
 		pr_err("Error in writing to 0x%04x, rc=%d\n",
 			BATT_INFO_FG_CNV_CHAR_CFG(fg), rc);
 
+	vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER, false, 0);
 	fg_dbg(fg, FG_STATUS, "Parallel current summing: %d\n", enable);
 
 	return rc;
@@ -4674,6 +4771,7 @@
 
 #define DEFAULT_CUTOFF_VOLT_MV		3100
 #define DEFAULT_EMPTY_VOLT_MV		2812
+#define DEFAULT_SYS_MIN_VOLT_MV		2800
 #define DEFAULT_SYS_TERM_CURR_MA	-125
 #define DEFAULT_CUTOFF_CURR_MA		200
 #define DEFAULT_DELTA_SOC_THR		5	/* 0.5 % */
@@ -4942,6 +5040,11 @@
 					"qcom,five-pin-battery");
 	chip->dt.multi_profile_load = of_property_read_bool(node,
 					"qcom,multi-profile-load");
+	chip->dt.soc_hi_res = of_property_read_bool(node, "qcom,soc-hi-res");
+
+	chip->dt.sys_min_volt_mv = DEFAULT_SYS_MIN_VOLT_MV;
+	of_property_read_u32(node, "qcom,fg-sys-min-voltage",
+				&chip->dt.sys_min_volt_mv);
 	return 0;
 }
 
@@ -4954,7 +5057,7 @@
 	cancel_work_sync(&fg->status_change_work);
 	cancel_delayed_work_sync(&fg->profile_load_work);
 	cancel_delayed_work_sync(&fg->sram_dump_work);
-	cancel_delayed_work_sync(&chip->pl_current_en_work);
+	cancel_work_sync(&chip->pl_current_en_work);
 
 	power_supply_unreg_notifier(&fg->nb);
 	debugfs_remove_recursive(fg->dfs_root);
@@ -5017,7 +5120,7 @@
 	INIT_DELAYED_WORK(&fg->profile_load_work, profile_load_work);
 	INIT_DELAYED_WORK(&fg->sram_dump_work, sram_dump_work);
 	INIT_DELAYED_WORK(&chip->pl_enable_work, pl_enable_work);
-	INIT_DELAYED_WORK(&chip->pl_current_en_work, pl_current_en_work);
+	INIT_WORK(&chip->pl_current_en_work, pl_current_en_work);
 
 	fg->awake_votable = create_votable("FG_WS", VOTE_SET_ANY,
 					fg_awake_cb, fg);
@@ -5131,6 +5234,10 @@
 		goto exit;
 	}
 
+	if (fg->irqs[MEM_ATTN_IRQ].irq)
+		irq_set_status_flags(fg->irqs[MEM_ATTN_IRQ].irq,
+					IRQ_DISABLE_UNLAZY);
+
 	/* Keep SOC_UPDATE irq disabled until we require it */
 	if (fg->irqs[SOC_UPDATE_IRQ].irq)
 		disable_irq_nosync(fg->irqs[SOC_UPDATE_IRQ].irq);
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 86926db..da7e614 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"QG-K: %s: " fmt, __func__
@@ -1807,6 +1807,9 @@
 	case POWER_SUPPLY_PROP_SOH:
 		pval->intval = chip->soh;
 		break;
+	case POWER_SUPPLY_PROP_CC_SOC:
+		rc = qg_get_cc_soc(chip, &pval->intval);
+		break;
 	default:
 		pr_debug("Unsupported property %d\n", psp);
 		break;
@@ -1857,6 +1860,7 @@
 	POWER_SUPPLY_PROP_ESR_ACTUAL,
 	POWER_SUPPLY_PROP_ESR_NOMINAL,
 	POWER_SUPPLY_PROP_SOH,
+	POWER_SUPPLY_PROP_CC_SOC,
 };
 
 static const struct power_supply_desc qg_psy_desc = {
diff --git a/drivers/power/supply/qcom/qpnp-qnovo5.c b/drivers/power/supply/qcom/qpnp-qnovo5.c
index a319936..6ec3f3a 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo5.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo5.c
@@ -1,8 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  */
 
+#define pr_fmt(fmt)	"Qnovo: %s: " fmt, __func__
+
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -13,6 +15,7 @@
 #include <linux/of_irq.h>
 #include <linux/pmic-voter.h>
 #include <linux/delay.h>
+#include <linux/pinctrl/consumer.h>
 
 #define QNOVO_PE_CTRL			0x45
 #define QNOVO_PTRAIN_EN_BIT		BIT(7)
@@ -72,6 +75,7 @@
 #define USER_VOTER		"user_voter"
 #define SHUTDOWN_VOTER		"user_voter"
 #define OK_TO_QNOVO_VOTER	"ok_to_qnovo_voter"
+#define HW_OK_TO_QNOVO_VOTER	"HW_OK_TO_QNOVO_VOTER"
 
 #define QNOVO_VOTER		"qnovo_voter"
 #define QNOVO_OVERALL_VOTER	"QNOVO_OVERALL_VOTER"
@@ -108,6 +112,9 @@
 	struct class		qnovo_class;
 	struct power_supply	*batt_psy;
 	struct power_supply	*usb_psy;
+	struct pinctrl		*pinctrl;
+	struct pinctrl_state	*pinctrl_state1;
+	struct pinctrl_state	*pinctrl_state2;
 	struct notifier_block	nb;
 	struct votable		*disable_votable;
 	struct votable		*pt_dis_votable;
@@ -297,6 +304,30 @@
 		return rc;
 	}
 
+	chip->pinctrl = devm_pinctrl_get(chip->dev);
+	if (IS_ERR(chip->pinctrl)) {
+		pr_err("Couldn't get pinctrl rc=%d\n", PTR_ERR(chip->pinctrl));
+		chip->pinctrl = NULL;
+	}
+
+	if (chip->pinctrl) {
+		chip->pinctrl_state1 = pinctrl_lookup_state(chip->pinctrl,
+						"q_state1");
+		if (IS_ERR(chip->pinctrl_state1)) {
+			rc = PTR_ERR(chip->pinctrl_state1);
+			pr_err("Couldn't get pinctrl state1 rc=%d\n", rc);
+			return rc;
+		}
+
+		chip->pinctrl_state2 = pinctrl_lookup_state(chip->pinctrl,
+						"q_state2");
+		if (IS_ERR(chip->pinctrl_state2)) {
+			rc = PTR_ERR(chip->pinctrl_state2);
+			pr_err("Couldn't get pinctrl state2 rc=%d\n", rc);
+			return rc;
+		}
+	}
+
 	return 0;
 }
 
@@ -1098,8 +1129,8 @@
 	struct qnovo *chip = container_of(work,
 			struct qnovo, status_change_work);
 	union power_supply_propval pval;
-	bool usb_present = false;
-	int rc;
+	bool usb_present = false, hw_ok_to_qnovo = false;
+	int rc, battery_health, charge_status;
 
 	if (is_usb_available(chip)) {
 		rc = power_supply_get_property(chip->usb_psy,
@@ -1113,6 +1144,17 @@
 		cancel_delayed_work_sync(&chip->usb_debounce_work);
 		vote(chip->awake_votable, USB_READY_VOTER, false, 0);
 		vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+		if (chip->pinctrl) {
+			rc = pinctrl_select_state(chip->pinctrl,
+					chip->pinctrl_state1);
+			if (rc < 0)
+				pr_err("Couldn't select state 1 rc=%d\n", rc);
+
+			rc = pinctrl_select_state(chip->pinctrl,
+					chip->pinctrl_state2);
+			if (rc < 0)
+				pr_err("Couldn't select state 2 rc=%d\n", rc);
+		}
 	} else if (!chip->usb_present && usb_present) {
 		/* insertion */
 		chip->usb_present = 1;
@@ -1120,6 +1162,36 @@
 		schedule_delayed_work(&chip->usb_debounce_work,
 				msecs_to_jiffies(DEBOUNCE_MS));
 	}
+
+	if (!is_batt_available(chip))
+		return;
+
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+					&pval);
+	if (rc < 0) {
+		pr_err("Error in getting battery health, rc=%d\n", rc);
+		return;
+	}
+	battery_health = pval.intval;
+
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+					&pval);
+	if (rc < 0) {
+		pr_err("Error in getting charging status, rc=%d\n", rc);
+		return;
+	}
+	charge_status = pval.intval;
+
+	pr_debug("USB present: %d health:%d charge_status: %d\n",
+		chip->usb_present, battery_health, charge_status);
+
+	if (chip->usb_present) {
+		hw_ok_to_qnovo =
+			(battery_health == POWER_SUPPLY_HEALTH_GOOD) &&
+			(charge_status == POWER_SUPPLY_STATUS_CHARGING);
+		vote(chip->not_ok_to_qnovo_votable, HW_OK_TO_QNOVO_VOTER,
+					!hw_ok_to_qnovo, 0);
+	}
 }
 
 static int qnovo_notifier_call(struct notifier_block *nb,
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index e49989f..b11818e 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -54,7 +54,7 @@
 	},
 	.icl_stat		= {
 		.name   = "input current limit status",
-		.reg    = AICL_ICL_STATUS_REG,
+		.reg    = ICL_STATUS_REG,
 		.min_u  = 0,
 		.max_u  = 3000000,
 		.step_u = 50000,
@@ -66,13 +66,6 @@
 		.max_u	= 1000000,
 		.step_u	= 250000,
 	},
-	.dc_icl		= {
-		.name   = "DC input current limit",
-		.reg    = DCDC_CFG_REF_MAX_PSNS_REG,
-		.min_u  = 0,
-		.max_u  = 1500000,
-		.step_u = 50000,
-	},
 	.jeita_cc_comp_hot	= {
 		.name	= "jeita fcc reduction",
 		.reg	= JEITA_CCCOMP_CFG_HOT_REG,
@@ -95,6 +88,22 @@
 		.step_u	= 400,
 		.set_proc = smblib_set_chg_freq,
 	},
+	.aicl_5v_threshold		= {
+		.name   = "AICL 5V threshold",
+		.reg    = USBIN_5V_AICL_THRESHOLD_REG,
+		.min_u  = 4000,
+		.max_u  = 4700,
+		.step_u = 100,
+	},
+	.aicl_cont_threshold		= {
+		.name   = "AICL CONT threshold",
+		.reg    = USBIN_CONT_AICL_THRESHOLD_REG,
+		.min_u  = 4000,
+		.max_u  = 8800,
+		.step_u = 100,
+		.get_proc = smblib_get_aicl_cont_threshold,
+		.set_proc = smblib_set_aicl_cont_threshold,
+	},
 };
 
 static struct smb_params smb5_pm8150b_params = {
@@ -171,6 +180,22 @@
 		.step_u	= 400,
 		.set_proc = smblib_set_chg_freq,
 	},
+	.aicl_5v_threshold		= {
+		.name   = "AICL 5V threshold",
+		.reg    = USBIN_5V_AICL_THRESHOLD_REG,
+		.min_u  = 4000,
+		.max_u  = 4700,
+		.step_u = 100,
+	},
+	.aicl_cont_threshold		= {
+		.name   = "AICL CONT threshold",
+		.reg    = USBIN_CONT_AICL_THRESHOLD_REG,
+		.min_u  = 4000,
+		.max_u  = 11800,
+		.step_u = 100,
+		.get_proc = smblib_get_aicl_cont_threshold,
+		.set_proc = smblib_set_aicl_cont_threshold,
+	},
 };
 
 struct smb_dt_props {
@@ -180,6 +205,7 @@
 	int			chg_inhibit_thr_mv;
 	bool			no_battery;
 	bool			hvdcp_disable;
+	bool			hvdcp_autonomous;
 	int			sec_charger_config;
 	int			auto_recharge_soc;
 	int			auto_recharge_vbat_mv;
@@ -189,6 +215,7 @@
 	int			term_current_src;
 	int			term_current_thresh_hi_ma;
 	int			term_current_thresh_lo_ma;
+	int			disable_suspend_on_collapse;
 };
 
 struct smb5 {
@@ -256,6 +283,13 @@
 };
 ATTRIBUTE_GROUPS(smb5);
 
+enum {
+	BAT_THERM = 0,
+	MISC_THERM,
+	CONN_THERM,
+	SMB_THERM,
+};
+
 #define PMI632_MAX_ICL_UA	3000000
 #define PM6150_MAX_FCC_UA	3000000
 static int smb5_chg_config_init(struct smb5 *chip)
@@ -288,21 +322,29 @@
 		chip->chg.smb_version = PM8150B_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm8150b_charger";
+		chg->wa_flags |= CHG_TERMINATION_WA;
 		break;
 	case PM6150_SUBTYPE:
 		chip->chg.smb_version = PM6150_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm6150_charger";
-		chg->wa_flags |= SW_THERM_REGULATION_WA;
+		chg->wa_flags |= SW_THERM_REGULATION_WA | CHG_TERMINATION_WA;
+		if (pmic_rev_id->rev4 >= 2)
+			chg->uusb_moisture_protection_capable = true;
 		chg->main_fcc_max = PM6150_MAX_FCC_UA;
 		break;
 	case PMI632_SUBTYPE:
 		chip->chg.smb_version = PMI632_SUBTYPE;
+		chg->wa_flags |= WEAK_ADAPTER_WA | USBIN_OV_WA
+				| CHG_TERMINATION_WA;
 		chg->param = smb5_pmi632_params;
 		chg->use_extcon = true;
 		chg->name = "pmi632_charger";
 		/* PMI632 does not support PD */
 		chg->pd_not_supported = true;
+		chg->lpd_disabled = true;
+		if (pmic_rev_id->rev4 >= 2)
+			chg->uusb_moisture_protection_enabled = true;
 		chg->hw_max_icl_ua =
 			(chip->dt.usb_icl_ua > 0) ? chip->dt.usb_icl_ua
 						: PMI632_MAX_ICL_UA;
@@ -327,22 +369,55 @@
 	return rc;
 }
 
+#define PULL_NO_PULL	0
+#define PULL_30K	30
+#define PULL_100K	100
+#define PULL_400K	400
+static int get_valid_pullup(int pull_up)
+{
+	/* pull up can only be 0/30K/100K/400K) */
+	switch (pull_up) {
+	case PULL_NO_PULL:
+		return INTERNAL_PULL_NO_PULL;
+	case PULL_30K:
+		return INTERNAL_PULL_30K_PULL;
+	case PULL_100K:
+		return INTERNAL_PULL_100K_PULL;
+	case PULL_400K:
+		return INTERNAL_PULL_400K_PULL;
+	default:
+		return INTERNAL_PULL_100K_PULL;
+	}
+}
+
+#define INTERNAL_PULL_UP_MASK	0x3
+static int smb5_configure_internal_pull(struct smb_charger *chg, int type,
+					int pull)
+{
+	int rc;
+	int shift = type * 2;
+	u8 mask = INTERNAL_PULL_UP_MASK << shift;
+	u8 val = pull << shift;
+
+	rc = smblib_masked_write(chg, BATIF_ADC_INTERNAL_PULL_UP_REG,
+				mask, val);
+	if (rc < 0)
+		dev_err(chg->dev,
+			"Couldn't configure ADC pull-up reg rc=%d\n", rc);
+
+	return rc;
+}
+
 #define MICRO_1P5A			1500000
 #define MICRO_P1A			100000
 #define MICRO_1PA			1000000
 #define MICRO_3PA			3000000
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
 #define DEFAULT_WD_BARK_TIME		64
-static int smb5_parse_dt(struct smb5 *chip)
+static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
 {
+	int rc = 0, byte_len;
 	struct smb_charger *chg = &chip->chg;
-	struct device_node *node = chg->dev->of_node;
-	int rc, byte_len;
-
-	if (!node) {
-		pr_err("device tree node missing\n");
-		return -EINVAL;
-	}
 
 	of_property_read_u32(node, "qcom,sec-charger-config",
 					&chip->dt.sec_charger_config);
@@ -357,9 +432,17 @@
 	chg->step_chg_enabled = of_property_read_bool(node,
 				"qcom,step-charging-enable");
 
+	chg->typec_legacy_use_rp_icl = of_property_read_bool(node,
+				"qcom,typec-legacy-rp-icl");
+
 	chg->sw_jeita_enabled = of_property_read_bool(node,
 				"qcom,sw-jeita-enable");
 
+	chg->pd_not_supported = chg->pd_not_supported ||
+			of_property_read_bool(node, "qcom,usb-pd-disable");
+
+	chg->lpd_disabled = of_property_read_bool(node, "qcom,lpd-disable");
+
 	rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
 					&chip->dt.wd_bark_time);
 	if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
@@ -368,39 +451,6 @@
 	chip->dt.no_battery = of_property_read_bool(node,
 						"qcom,batteryless-platform");
 
-	rc = of_property_read_u32(node,
-			"qcom,fcc-max-ua", &chip->dt.batt_profile_fcc_ua);
-	if (rc < 0)
-		chip->dt.batt_profile_fcc_ua = -EINVAL;
-
-	rc = of_property_read_u32(node,
-				"qcom,fv-max-uv", &chip->dt.batt_profile_fv_uv);
-	if (rc < 0)
-		chip->dt.batt_profile_fv_uv = -EINVAL;
-
-	rc = of_property_read_u32(node,
-				"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
-	if (rc < 0)
-		chip->dt.usb_icl_ua = -EINVAL;
-
-	rc = of_property_read_u32(node,
-				"qcom,otg-cl-ua", &chg->otg_cl_ua);
-	if (rc < 0)
-		chg->otg_cl_ua = (chip->chg.smb_version == PMI632_SUBTYPE) ?
-							MICRO_1PA : MICRO_3PA;
-
-	rc = of_property_read_u32(node, "qcom,chg-term-src",
-			&chip->dt.term_current_src);
-	if (rc < 0)
-		chip->dt.term_current_src = ITERM_SRC_UNSPECIFIED;
-
-	rc = of_property_read_u32(node, "qcom,chg-term-current-ma",
-			&chip->dt.term_current_thresh_hi_ma);
-
-	if (chip->dt.term_current_src == ITERM_SRC_ADC)
-		rc = of_property_read_u32(node, "qcom,chg-term-base-current-ma",
-				&chip->dt.term_current_thresh_lo_ma);
-
 	if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
 		chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
 			GFP_KERNEL);
@@ -441,13 +491,8 @@
 						"qcom,hvdcp-disable");
 	chg->hvdcp_disable = chip->dt.hvdcp_disable;
 
-	rc = of_property_read_u32(node, "qcom,chg-inhibit-threshold-mv",
-				&chip->dt.chg_inhibit_thr_mv);
-	if (!rc && (chip->dt.chg_inhibit_thr_mv < 0 ||
-				chip->dt.chg_inhibit_thr_mv > 300)) {
-		pr_err("qcom,chg-inhibit-threshold-mv is incorrect\n");
-		return -EINVAL;
-	}
+	chip->dt.hvdcp_autonomous = of_property_read_bool(node,
+						"qcom,hvdcp-autonomous-enable");
 
 	chip->dt.auto_recharge_soc = -EINVAL;
 	rc = of_property_read_u32(node, "qcom,auto-recharge-soc",
@@ -459,16 +504,6 @@
 	}
 	chg->auto_recharge_soc = chip->dt.auto_recharge_soc;
 
-	chip->dt.auto_recharge_vbat_mv = -EINVAL;
-	rc = of_property_read_u32(node, "qcom,auto-recharge-vbat-mv",
-				&chip->dt.auto_recharge_vbat_mv);
-	if (!rc && (chip->dt.auto_recharge_vbat_mv < 0)) {
-		pr_err("qcom,auto-recharge-vbat-mv is incorrect\n");
-		return -EINVAL;
-	}
-
-	chg->dcp_icl_ua = chip->dt.usb_icl_ua;
-
 	chg->suspend_input_on_debug_batt = of_property_read_bool(node,
 					"qcom,suspend-input-on-debug-batt");
 
@@ -480,7 +515,34 @@
 	chg->fcc_stepper_enable = of_property_read_bool(node,
 					"qcom,fcc-stepping-enable");
 
-	/* Extract ADC channels */
+	if (chg->uusb_moisture_protection_capable)
+		chg->uusb_moisture_protection_enabled =
+			of_property_read_bool(node,
+					"qcom,uusb-moisture-protection-enable");
+
+	chg->hw_die_temp_mitigation = of_property_read_bool(node,
+					"qcom,hw-die-temp-mitigation");
+
+	chg->hw_connector_mitigation = of_property_read_bool(node,
+					"qcom,hw-connector-mitigation");
+
+	chg->hw_skin_temp_mitigation = of_property_read_bool(node,
+					"qcom,hw-skin-temp-mitigation");
+
+	chg->connector_pull_up = -EINVAL;
+	of_property_read_u32(node, "qcom,connector-internal-pull-kohm",
+					&chg->connector_pull_up);
+
+	chip->dt.disable_suspend_on_collapse = of_property_read_bool(node,
+					"qcom,disable-suspend-on-collapse");
+
+	return 0;
+}
+
+static int smb5_parse_dt_adc_channels(struct smb_charger *chg)
+{
+	int rc = 0;
+
 	rc = smblib_get_iio_channel(chg, "mid_voltage", &chg->iio.mid_chan);
 	if (rc < 0)
 		return rc;
@@ -534,6 +596,101 @@
 	return 0;
 }
 
+static int smb5_parse_dt_currents(struct smb5 *chip, struct device_node *node)
+{
+	int rc = 0;
+	struct smb_charger *chg = &chip->chg;
+
+	rc = of_property_read_u32(node,
+			"qcom,fcc-max-ua", &chip->dt.batt_profile_fcc_ua);
+	if (rc < 0)
+		chip->dt.batt_profile_fcc_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
+	if (rc < 0)
+		chip->dt.usb_icl_ua = -EINVAL;
+	chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+
+	rc = of_property_read_u32(node,
+				"qcom,otg-cl-ua", &chg->otg_cl_ua);
+	if (rc < 0)
+		chg->otg_cl_ua = (chip->chg.smb_version == PMI632_SUBTYPE) ?
+							MICRO_1PA : MICRO_3PA;
+
+	rc = of_property_read_u32(node, "qcom,chg-term-src",
+			&chip->dt.term_current_src);
+	if (rc < 0)
+		chip->dt.term_current_src = ITERM_SRC_UNSPECIFIED;
+
+	if (chip->dt.term_current_src == ITERM_SRC_ADC)
+		rc = of_property_read_u32(node, "qcom,chg-term-base-current-ma",
+				&chip->dt.term_current_thresh_lo_ma);
+
+	rc = of_property_read_u32(node, "qcom,chg-term-current-ma",
+			&chip->dt.term_current_thresh_hi_ma);
+
+	return 0;
+}
+
+static int smb5_parse_dt_voltages(struct smb5 *chip, struct device_node *node)
+{
+	int rc = 0;
+
+	rc = of_property_read_u32(node,
+				"qcom,fv-max-uv", &chip->dt.batt_profile_fv_uv);
+	if (rc < 0)
+		chip->dt.batt_profile_fv_uv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,chg-inhibit-threshold-mv",
+				&chip->dt.chg_inhibit_thr_mv);
+	if (!rc && (chip->dt.chg_inhibit_thr_mv < 0 ||
+				chip->dt.chg_inhibit_thr_mv > 300)) {
+		pr_err("qcom,chg-inhibit-threshold-mv is incorrect\n");
+		return -EINVAL;
+	}
+
+	chip->dt.auto_recharge_vbat_mv = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,auto-recharge-vbat-mv",
+				&chip->dt.auto_recharge_vbat_mv);
+	if (!rc && (chip->dt.auto_recharge_vbat_mv < 0)) {
+		pr_err("qcom,auto-recharge-vbat-mv is incorrect\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int smb5_parse_dt(struct smb5 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	int rc = 0;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	rc = smb5_parse_dt_voltages(chip, node);
+	if (rc < 0)
+		return rc;
+
+	rc = smb5_parse_dt_currents(chip, node);
+	if (rc < 0)
+		return rc;
+
+	rc = smb5_parse_dt_adc_channels(chg);
+	if (rc < 0)
+		return rc;
+
+	rc = smb5_parse_dt_misc(chip, node);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
 /************************
  * USB PSY REGISTRATION *
  ************************/
@@ -547,7 +704,6 @@
 	POWER_SUPPLY_PROP_TYPEC_MODE,
 	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
 	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
-	POWER_SUPPLY_PROP_TYPEC_SRC_RP,
 	POWER_SUPPLY_PROP_LOW_POWER,
 	POWER_SUPPLY_PROP_PD_ACTIVE,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
@@ -557,16 +713,17 @@
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 	POWER_SUPPLY_PROP_REAL_TYPE,
-	POWER_SUPPLY_PROP_PR_SWAP,
 	POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
-	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CONNECTOR_TYPE,
 	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_SMB_EN_MODE,
 	POWER_SUPPLY_PROP_SMB_EN_REASON,
 	POWER_SUPPLY_PROP_SCOPE,
+	POWER_SUPPLY_PROP_MOISTURE_DETECTED,
+	POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
+	POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
 };
 
 static int smb5_usb_get_prop(struct power_supply *psy,
@@ -575,27 +732,15 @@
 {
 	struct smb5 *chip = power_supply_get_drvdata(psy);
 	struct smb_charger *chg = &chip->chg;
-	union power_supply_propval pval;
 	int rc = 0;
+	val->intval = 0;
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_PRESENT:
 		rc = smblib_get_prop_usb_present(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_ONLINE:
-		rc = smblib_get_prop_usb_online(chg, val);
-		if (!val->intval)
-			break;
-
-		if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) ||
-		   (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
-			&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
-			val->intval = 0;
-		else
-			val->intval = 1;
-
-		if (chg->real_charger_type == POWER_SUPPLY_TYPE_UNKNOWN)
-			val->intval = 0;
+		rc = smblib_get_usb_online(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_prop_usb_voltage_max(chg, val);
@@ -616,31 +761,19 @@
 		val->intval = chg->real_charger_type;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_MODE:
-		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
-			val->intval = POWER_SUPPLY_TYPEC_NONE;
-		else
-			val->intval = chg->typec_mode;
+		rc = smblib_get_usb_prop_typec_mode(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
-		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
-			val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
-		else
-			rc = smblib_get_prop_typec_power_role(chg, val);
+		rc = smblib_get_prop_typec_power_role(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
-		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
-			val->intval = 0;
-		else
-			rc = smblib_get_prop_typec_cc_orientation(chg, val);
+		rc = smblib_get_prop_typec_cc_orientation(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_SRC_RP:
 		rc = smblib_get_prop_typec_select_rp(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_LOW_POWER:
-		if (chg->sink_src_mode == SRC_MODE)
-			rc = smblib_get_prop_low_power(chg, val);
-		else
-			rc = -ENODATA;
+		rc = smblib_get_prop_low_power(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_PD_ACTIVE:
 		val->intval = chg->pd_active;
@@ -686,19 +819,10 @@
 		val->intval = chg->connector_type;
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
-		if (chg->connector_health == -EINVAL)
-			val->intval = smblib_get_prop_connector_health(chg);
-		else
-			val->intval = chg->connector_health;
+		val->intval = smblib_get_prop_connector_health(chg);
 		break;
 	case POWER_SUPPLY_PROP_SCOPE:
-		val->intval = POWER_SUPPLY_SCOPE_UNKNOWN;
-		rc = smblib_get_prop_usb_present(chg, &pval);
-		if (rc < 0)
-			break;
-		val->intval = pval.intval ? POWER_SUPPLY_SCOPE_DEVICE
-				: chg->otg_present ? POWER_SUPPLY_SCOPE_SYSTEM
-						: POWER_SUPPLY_SCOPE_UNKNOWN;
+		rc = smblib_get_prop_scope(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_SMB_EN_MODE:
 		mutex_lock(&chg->smb_lock);
@@ -708,6 +832,19 @@
 	case POWER_SUPPLY_PROP_SMB_EN_REASON:
 		val->intval = chg->cp_reason;
 		break;
+	case POWER_SUPPLY_PROP_MOISTURE_DETECTED:
+		val->intval = chg->moisture_present;
+		break;
+	case POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED:
+		val->intval = !chg->flash_active;
+		break;
+	case POWER_SUPPLY_PROP_QC_OPTI_DISABLE:
+		if (chg->hw_die_temp_mitigation)
+			val->intval = POWER_SUPPLY_QC_THERMAL_BALANCE_DISABLE
+					| POWER_SUPPLY_QC_INOV_THERMAL_DISABLE;
+		if (chg->hw_connector_mitigation)
+			val->intval |= POWER_SUPPLY_QC_CTM_DISABLE;
+		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -1001,6 +1138,7 @@
 {
 	struct smb5 *chip = power_supply_get_drvdata(psy);
 	struct smb_charger *chg = &chip->chg;
+	union power_supply_propval pval = {0, };
 	int rc = 0;
 
 	switch (psp) {
@@ -1014,7 +1152,35 @@
 		rc = smblib_set_icl_current(chg, val->intval);
 		break;
 	case POWER_SUPPLY_PROP_FLASH_ACTIVE:
-		chg->flash_active = val->intval;
+		if ((chg->smb_version == PMI632_SUBTYPE)
+				&& (chg->flash_active != val->intval)) {
+			chg->flash_active = val->intval;
+
+			rc = smblib_get_prop_usb_present(chg, &pval);
+			if (rc < 0)
+				pr_err("Failed to get USB preset status rc=%d\n",
+						rc);
+			if (pval.intval) {
+				rc = smblib_force_vbus_voltage(chg,
+					chg->flash_active ? FORCE_5V_BIT
+								: IDLE_BIT);
+				if (rc < 0)
+					pr_err("Failed to force 5V\n");
+				else
+					chg->pulse_cnt = 0;
+			} else {
+				/* USB absent & flash not-active - vote 100mA */
+				vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER,
+							true, SDP_100_MA);
+			}
+
+			pr_debug("flash active VBUS 5V restriction %s\n",
+				chg->flash_active ? "applied" : "removed");
+
+			/* Update userspace */
+			if (chg->batt_psy)
+				power_supply_changed(chg->batt_psy);
+		}
 		break;
 	case POWER_SUPPLY_PROP_TOGGLE_STAT:
 		rc = smblib_toggle_smb_en(chg, val->intval);
@@ -1301,8 +1467,7 @@
 				QNOVO_VOTER);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_NOW:
-		rc = smblib_get_prop_from_bms(chg,
-				POWER_SUPPLY_PROP_CURRENT_NOW, val);
+		rc = smblib_get_batt_current_now(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
 		val->intval = get_client_vote_locked(chg->fcc_votable,
@@ -1333,10 +1498,7 @@
 		val->intval = 0;
 		break;
 	case POWER_SUPPLY_PROP_DIE_HEALTH:
-		if (chg->die_health == -EINVAL)
-			val->intval = smblib_get_prop_die_health(chg);
-		else
-			val->intval = chg->die_health;
+		rc = smblib_get_die_health(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_DP_DM:
 		val->intval = chg->pulse_cnt;
@@ -1415,13 +1577,6 @@
 	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
 		chg->step_chg_enabled = !!val->intval;
 		break;
-	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
-		if (chg->sw_jeita_enabled != (!!val->intval)) {
-			rc = smblib_disable_hw_jeita(chg, !!val->intval);
-			if (rc == 0)
-				chg->sw_jeita_enabled = !!val->intval;
-		}
-		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		chg->batt_profile_fcc_ua = val->intval;
 		vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
@@ -1448,10 +1603,11 @@
 		rc = smblib_set_prop_ship_mode(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_RERUN_AICL:
-		rc = smblib_rerun_aicl(chg);
+		rc = smblib_run_aicl(chg, RERUN_AICL);
 		break;
 	case POWER_SUPPLY_PROP_DP_DM:
-		rc = smblib_dp_dm(chg, val->intval);
+		if (!chg->flash_active)
+			rc = smblib_dp_dm(chg, val->intval);
 		break;
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
 		rc = smblib_set_prop_input_current_limited(chg, val);
@@ -1472,6 +1628,9 @@
 			vote(chg->chg_disable_votable, FORCE_RECHARGE_VOTER,
 					false, 0);
 		break;
+	case POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE:
+		chg->fcc_stepper_enable = val->intval;
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -1492,7 +1651,6 @@
 	case POWER_SUPPLY_PROP_RERUN_AICL:
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
 	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
-	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
 	case POWER_SUPPLY_PROP_DIE_HEALTH:
 		return 1;
 	default:
@@ -1623,7 +1781,42 @@
  ***************************/
 static int smb5_configure_typec(struct smb_charger *chg)
 {
+	union power_supply_propval pval = {0, };
 	int rc;
+	u8 val = 0;
+
+	rc = smblib_read(chg, LEGACY_CABLE_STATUS_REG, &val);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read Legacy status rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * Across reboot, standard typeC cables get detected as legacy cables
+	 * due to VBUS attachment prior to CC attach/dettach. To handle this,
+	 * "early_usb_attach" flag is used, which assumes that across reboot,
+	 * the cable connected can be standard typeC. However, its jurisdiction
+	 * is limited to PD capable designs only. Hence, for non-PD type designs
+	 * reset legacy cable detection by disabling/enabling typeC mode.
+	 */
+	if (chg->pd_not_supported && (val & TYPEC_LEGACY_CABLE_STATUS_BIT)) {
+		pval.intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		smblib_set_prop_typec_power_role(chg, &pval);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't disable TYPEC rc=%d\n", rc);
+			return rc;
+		}
+
+		/* delay before enabling typeC */
+		msleep(50);
+
+		pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+		smblib_set_prop_typec_power_role(chg, &pval);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't enable TYPEC rc=%d\n", rc);
+			return rc;
+		}
+	}
 
 	smblib_apsd_enable(chg, true);
 	smblib_hvdcp_detect_enable(chg, false);
@@ -1645,32 +1838,49 @@
 		return rc;
 	}
 
+	val = chg->lpd_disabled ? 0 : TYPEC_WATER_DETECTION_INT_EN_BIT;
 	/* Use simple write to enable only required interrupts */
 	rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
-				TYPEC_SRC_BATT_HPWR_INT_EN_BIT |
-				TYPEC_WATER_DETECTION_INT_EN_BIT);
+				TYPEC_SRC_BATT_HPWR_INT_EN_BIT | val);
 	if (rc < 0) {
 		dev_err(chg->dev,
 			"Couldn't configure Type-C interrupts rc=%d\n", rc);
 		return rc;
 	}
 
+	/* enable try.snk and clear force sink for DRP mode */
 	rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
-				EN_TRY_SNK_BIT, EN_TRY_SNK_BIT);
+				EN_TRY_SNK_BIT | EN_SNK_ONLY_BIT,
+				EN_TRY_SNK_BIT);
 	if (rc < 0) {
 		dev_err(chg->dev,
-			"Couldn't enable try.snk rc=%d\n", rc);
+			"Couldn't configure TYPE_C_MODE_CFG_REG rc=%d\n",
+				rc);
 		return rc;
 	}
 	chg->typec_try_mode |= EN_TRY_SNK_BIT;
 
-	/* configure VCONN for software control */
-	rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
+	/* For PD capable targets configure VCONN for software control */
+	if (!chg->pd_not_supported) {
+		rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
 				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
 				 VCONN_EN_SRC_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't configure VCONN for SW control rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	/* Enable detection of unoriented debug accessory in source mode */
+	rc = smblib_masked_write(chg, DEBUG_ACCESS_SRC_CFG_REG,
+				 EN_UNORIENTED_DEBUG_ACCESS_SRC_BIT,
+				 EN_UNORIENTED_DEBUG_ACCESS_SRC_BIT);
 	if (rc < 0) {
 		dev_err(chg->dev,
-			"Couldn't configure VCONN for SW control rc=%d\n", rc);
+			"Couldn't configure TYPE_C_DEBUG_ACCESS_SRC_CFG_REG rc=%d\n",
+				rc);
 		return rc;
 	}
 
@@ -1701,33 +1911,72 @@
 		return rc;
 	}
 
+	if (chg->uusb_moisture_protection_enabled) {
+		/* Enable moisture detection interrupt */
+		rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
+				TYPEC_WATER_DETECTION_INT_EN_BIT,
+				TYPEC_WATER_DETECTION_INT_EN_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't enable moisture detection interrupt rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Enable uUSB factory mode */
+		rc = smblib_masked_write(chg, TYPEC_U_USB_CFG_REG,
+					EN_MICRO_USB_FACTORY_MODE_BIT,
+					EN_MICRO_USB_FACTORY_MODE_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't enable uUSB factory mode c=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Disable periodic monitoring of CC_ID pin */
+		rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
+			PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+			TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't disable periodic monitoring of CC_ID rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	return rc;
 }
 
+#define RAW_ITERM(iterm_ma, max_range)				\
+		div_s64((int64_t)iterm_ma * ADC_CHG_ITERM_MASK, max_range)
 static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip)
 {
 	u8 *buf;
 	int rc = 0;
-	s16 raw_hi_thresh, raw_lo_thresh;
+	s16 raw_hi_thresh, raw_lo_thresh, max_limit_ma;
 	struct smb_charger *chg = &chip->chg;
 
-	if (chip->dt.term_current_thresh_hi_ma < -10000 ||
-			chip->dt.term_current_thresh_hi_ma > 10000 ||
-			chip->dt.term_current_thresh_lo_ma < -10000 ||
-			chip->dt.term_current_thresh_lo_ma > 10000) {
+	if (chip->chg.smb_version == PMI632_SUBTYPE)
+		max_limit_ma = ITERM_LIMITS_PMI632_MA;
+	else
+		max_limit_ma = ITERM_LIMITS_PM8150B_MA;
+
+	if (chip->dt.term_current_thresh_hi_ma < (-1 * max_limit_ma)
+		|| chip->dt.term_current_thresh_hi_ma > max_limit_ma
+		|| chip->dt.term_current_thresh_lo_ma < (-1 * max_limit_ma)
+		|| chip->dt.term_current_thresh_lo_ma > max_limit_ma) {
 		dev_err(chg->dev, "ITERM threshold out of range rc=%d\n", rc);
 		return -EINVAL;
 	}
 
 	/*
 	 * Conversion:
-	 *	raw (A) = (scaled_mA * ADC_CHG_TERM_MASK) / (10 * 1000)
+	 *	raw (A) = (term_current * ADC_CHG_ITERM_MASK) / max_limit_ma
 	 * Note: raw needs to be converted to big-endian format.
 	 */
 
 	if (chip->dt.term_current_thresh_hi_ma) {
-		raw_hi_thresh = ((chip->dt.term_current_thresh_hi_ma *
-						ADC_CHG_TERM_MASK) / 10000);
+		raw_hi_thresh = RAW_ITERM(chip->dt.term_current_thresh_hi_ma,
+					max_limit_ma);
 		raw_hi_thresh = sign_extend32(raw_hi_thresh, 15);
 		buf = (u8 *)&raw_hi_thresh;
 		raw_hi_thresh = buf[1] | (buf[0] << 8);
@@ -1742,8 +1991,8 @@
 	}
 
 	if (chip->dt.term_current_thresh_lo_ma) {
-		raw_lo_thresh = ((chip->dt.term_current_thresh_lo_ma *
-					ADC_CHG_TERM_MASK) / 10000);
+		raw_lo_thresh = RAW_ITERM(chip->dt.term_current_thresh_lo_ma,
+					max_limit_ma);
 		raw_lo_thresh = sign_extend32(raw_lo_thresh, 15);
 		buf = (u8 *)&raw_lo_thresh;
 		raw_lo_thresh = buf[1] | (buf[0] << 8);
@@ -1775,321 +2024,87 @@
 	return rc;
 }
 
-static int smb5_init_hw(struct smb5 *chip)
+static int smb5_configure_mitigation(struct smb_charger *chg)
 {
-	struct smb_charger *chg = &chip->chg;
-	int rc, type = 0;
-	u8 val = 0;
-	union power_supply_propval pval;
+	int rc;
+	u8 chan = 0, src_cfg = 0;
 
-	if (chip->dt.no_battery)
-		chg->fake_capacity = 50;
-
-	if (chip->dt.batt_profile_fcc_ua < 0)
-		smblib_get_charge_param(chg, &chg->param.fcc,
-				&chg->batt_profile_fcc_ua);
-
-	if (chip->dt.batt_profile_fv_uv < 0)
-		smblib_get_charge_param(chg, &chg->param.fv,
-				&chg->batt_profile_fv_uv);
-
-	smblib_get_charge_param(chg, &chg->param.usb_icl,
-				&chg->default_icl_ua);
-
-	if (chg->charger_temp_max == -EINVAL) {
-		rc = smblib_get_thermal_threshold(chg,
-					DIE_REG_H_THRESHOLD_MSB_REG,
-					&chg->charger_temp_max);
-		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't get charger_temp_max rc=%d\n",
-					rc);
-			return rc;
-		}
-	}
-
-	/* Disable SMB Temperature ADC INT */
-	rc = smblib_masked_write(chg, MISC_THERMREG_SRC_CFG_REG,
-					 THERMREG_SMB_ADC_SRC_EN_BIT, 0);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure SMB thermal regulation  rc=%d\n",
-				rc);
-		return rc;
-	}
-
-	/*
-	 * If SW thermal regulation WA is active then all the HW temperature
-	 * comparators need to be disabled to prevent HW thermal regulation,
-	 * apart from DIE_TEMP analog comparator for SHDN regulation.
-	 */
-	if (chg->wa_flags & SW_THERM_REGULATION_WA) {
-		rc = smblib_write(chg, MISC_THERMREG_SRC_CFG_REG,
-					THERMREG_DIE_CMP_SRC_EN_BIT);
-		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't disable HW thermal regulation rc=%d\n",
-				rc);
-			return rc;
-		}
-	}
-
-	/*
-	 * Disable HVDCP autonomous mode operation by default. Additionally, if
-	 * specified in DT: disable HVDCP and HVDCP authentication algorithm.
-	 */
-	val = (chg->hvdcp_disable) ? 0 :
-		(HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT);
-	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
-			(HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT |
-			 HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT),
-			val);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure HVDCP rc=%d\n", rc);
-		return rc;
-	}
-
-	/*
-	 * PMI632 can have the connector type defined by a dedicated register
-	 * TYPEC_MICRO_USB_MODE_REG or by a common TYPEC_U_USB_CFG_REG.
-	 */
-	if (chg->smb_version == PMI632_SUBTYPE) {
-		rc = smblib_read(chg, TYPEC_MICRO_USB_MODE_REG, &val);
-		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't read USB mode rc=%d\n", rc);
-			return rc;
-		}
-		type = !!(val & MICRO_USB_MODE_ONLY_BIT);
-	}
-
-	/*
-	 * If TYPEC_MICRO_USB_MODE_REG is not set and for all non-PMI632
-	 * check the connector type using TYPEC_U_USB_CFG_REG.
-	 */
-	if (!type) {
-		rc = smblib_read(chg, TYPEC_U_USB_CFG_REG, &val);
-		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't read U_USB config rc=%d\n",
-					rc);
-			return rc;
-		}
-
-		type = !!(val & EN_MICRO_USB_MODE_BIT);
-	}
-
-	pr_debug("Connector type=%s\n", type ? "Micro USB" : "TypeC");
-
-	if (type) {
-		chg->connector_type = POWER_SUPPLY_CONNECTOR_MICRO_USB;
-		rc = smb5_configure_micro_usb(chg);
+	if (!chg->hw_die_temp_mitigation && !chg->hw_connector_mitigation &&
+			!chg->hw_skin_temp_mitigation) {
+		src_cfg = THERMREG_SW_ICL_ADJUST_BIT;
 	} else {
-		chg->connector_type = POWER_SUPPLY_CONNECTOR_TYPEC;
-		rc = smb5_configure_typec(chg);
+		if (chg->hw_die_temp_mitigation) {
+			chan = DIE_TEMP_CHANNEL_EN_BIT;
+			src_cfg = THERMREG_DIE_ADC_SRC_EN_BIT
+				| THERMREG_DIE_CMP_SRC_EN_BIT;
+		}
+
+		if (chg->hw_connector_mitigation) {
+			chan |= CONN_THM_CHANNEL_EN_BIT;
+			src_cfg |= THERMREG_CONNECTOR_ADC_SRC_EN_BIT;
+		}
+
+		if (chg->hw_skin_temp_mitigation) {
+			chan |= MISC_THM_CHANNEL_EN_BIT;
+			src_cfg |= THERMREG_SKIN_ADC_SRC_EN_BIT;
+		}
+
+		rc = smblib_masked_write(chg, BATIF_ADC_CHANNEL_EN_REG,
+			CONN_THM_CHANNEL_EN_BIT | DIE_TEMP_CHANNEL_EN_BIT |
+			MISC_THM_CHANNEL_EN_BIT, chan);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't enable ADC channel rc=%d\n",
+				rc);
+			return rc;
+		}
 	}
+
+	rc = smblib_masked_write(chg, MISC_THERMREG_SRC_CFG_REG,
+		THERMREG_SW_ICL_ADJUST_BIT | THERMREG_DIE_ADC_SRC_EN_BIT |
+		THERMREG_DIE_CMP_SRC_EN_BIT | THERMREG_SKIN_ADC_SRC_EN_BIT |
+		SKIN_ADC_CFG_BIT | THERMREG_CONNECTOR_ADC_SRC_EN_BIT, src_cfg);
 	if (rc < 0) {
 		dev_err(chg->dev,
-			"Couldn't configure TypeC/micro-USB mode rc=%d\n", rc);
+				"Couldn't configure THERM_SRC reg rc=%d\n", rc);
 		return rc;
 	}
 
-	/*
-	 * PMI632 based hw init:
-	 * - Rerun APSD to ensure proper charger detection if device
-	 *   boots with charger connected.
-	 * - Initialize flash module for PMI632
-	 */
-	if (chg->smb_version == PMI632_SUBTYPE) {
-		schgm_flash_init(chg);
-		smblib_rerun_apsd_if_required(chg);
-	}
+	return 0;
+}
 
-	/* clear the ICL override if it is set */
-	rc = smblib_icl_override(chg, false);
-	if (rc < 0) {
-		pr_err("Couldn't disable ICL override rc=%d\n", rc);
-		return rc;
-	}
+static int smb5_init_dc_peripheral(struct smb_charger *chg)
+{
+	int rc = 0;
 
-	/* set OTG current limit */
-	rc = smblib_set_charge_param(chg, &chg->param.otg_cl, chg->otg_cl_ua);
-	if (rc < 0) {
-		pr_err("Couldn't set otg current limit rc=%d\n", rc);
-		return rc;
-	}
-
-	/* vote 0mA on usb_icl for non battery platforms */
-	vote(chg->usb_icl_votable,
-		DEFAULT_VOTER, chip->dt.no_battery, 0);
-	vote(chg->dc_suspend_votable,
-		DEFAULT_VOTER, chip->dt.no_battery, 0);
-	vote(chg->fcc_votable, HW_LIMIT_VOTER,
-		chip->dt.batt_profile_fcc_ua > 0, chip->dt.batt_profile_fcc_ua);
-	vote(chg->fv_votable, HW_LIMIT_VOTER,
-		chip->dt.batt_profile_fv_uv > 0, chip->dt.batt_profile_fv_uv);
-	vote(chg->fcc_votable,
-		BATT_PROFILE_VOTER, chg->batt_profile_fcc_ua > 0,
-		chg->batt_profile_fcc_ua);
-	vote(chg->fv_votable,
-		BATT_PROFILE_VOTER, chg->batt_profile_fv_uv > 0,
-		chg->batt_profile_fv_uv);
-
-	/* Some h/w limit maximum supported ICL */
-	vote(chg->usb_icl_votable, HW_LIMIT_VOTER,
-			chg->hw_max_icl_ua > 0, chg->hw_max_icl_ua);
+	/* PMI632 does not have DC peripheral */
+	if (chg->smb_version == PMI632_SUBTYPE)
+		return 0;
 
 	/* set DC icl_max 1A */
 	rc = smblib_set_charge_param(chg, &chg->param.dc_icl, 1000000);
 	if (rc < 0) {
-		dev_err(chg->dev,
-			"Couldn't set dc_icl rc=%d\n", rc);
+		dev_err(chg->dev, "Couldn't set dc_icl rc=%d\n", rc);
 		return rc;
 	}
 
-	/*
-	 * AICL configuration:
-	 * start from min and AICL ADC disable, and enable aicl rerun
-	 */
-	if (chg->smb_version != PMI632_SUBTYPE) {
-		rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
-				USBIN_AICL_PERIODIC_RERUN_EN_BIT
-				| USBIN_AICL_ADC_EN_BIT | USBIN_AICL_EN_BIT,
-				USBIN_AICL_PERIODIC_RERUN_EN_BIT
-				| USBIN_AICL_EN_BIT);
-		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't config AICL rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	rc = smblib_write(chg, AICL_RERUN_TIME_CFG_REG,
-				AICL_RERUN_TIME_12S_VAL);
+	/* Disable DC Input missing poller function */
+	rc = smblib_masked_write(chg, DCIN_LOAD_CFG_REG,
+					INPUT_MISS_POLL_EN_BIT, 0);
 	if (rc < 0) {
 		dev_err(chg->dev,
-			"Couldn't configure AICL rerun interval rc=%d\n", rc);
+			"Couldn't disable DC Input missing poller rc=%d\n", rc);
 		return rc;
 	}
 
-	/* enable the charging path */
-	rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
-		return rc;
-	}
+	return rc;
+}
 
-	/* configure VBUS for software control */
-	rc = smblib_masked_write(chg, DCDC_OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
-	if (rc < 0) {
-		dev_err(chg->dev,
-			"Couldn't configure VBUS for SW control rc=%d\n", rc);
-		return rc;
-	}
-
-	val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT)
-			& BARK_WDOG_TIMEOUT_MASK;
-	val |= BITE_WDOG_TIMEOUT_8S;
-	rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
-			BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
-			BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
-			val);
-	if (rc < 0) {
-		pr_err("Couldn't configue WD config rc=%d\n", rc);
-		return rc;
-	}
-
-	/* enable WD BARK and enable it on plugin */
-	rc = smblib_masked_write(chg, WD_CFG_REG,
-			WATCHDOG_TRIGGER_AFP_EN_BIT |
-			WDOG_TIMER_EN_ON_PLUGIN_BIT |
-			BARK_WDOG_INT_EN_BIT,
-			WDOG_TIMER_EN_ON_PLUGIN_BIT |
-			BARK_WDOG_INT_EN_BIT);
-	if (rc < 0) {
-		pr_err("Couldn't configue WD config rc=%d\n", rc);
-		return rc;
-	}
-
-	/* set termination current threshold values */
-	rc = smb5_configure_iterm_thresholds(chip);
-	if (rc < 0) {
-		pr_err("Couldn't configure ITERM thresholds rc=%d\n",
-				rc);
-		return rc;
-	}
-
-	/* configure float charger options */
-	switch (chip->dt.float_option) {
-	case FLOAT_DCP:
-		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
-				FLOAT_OPTIONS_MASK, 0);
-		break;
-	case FLOAT_SDP:
-		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
-				FLOAT_OPTIONS_MASK, FORCE_FLOAT_SDP_CFG_BIT);
-		break;
-	case DISABLE_CHARGING:
-		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
-				FLOAT_OPTIONS_MASK, FLOAT_DIS_CHGING_CFG_BIT);
-		break;
-	case SUSPEND_INPUT:
-		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
-				FLOAT_OPTIONS_MASK, SUSPEND_FLOAT_CFG_BIT);
-		break;
-	default:
-		rc = 0;
-		break;
-	}
-
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure float charger options rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	switch (chip->dt.chg_inhibit_thr_mv) {
-	case 50:
-		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
-				CHARGE_INHIBIT_THRESHOLD_MASK,
-				INHIBIT_ANALOG_VFLT_MINUS_50MV);
-		break;
-	case 100:
-		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
-				CHARGE_INHIBIT_THRESHOLD_MASK,
-				INHIBIT_ANALOG_VFLT_MINUS_100MV);
-		break;
-	case 200:
-		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
-				CHARGE_INHIBIT_THRESHOLD_MASK,
-				INHIBIT_ANALOG_VFLT_MINUS_200MV);
-		break;
-	case 300:
-		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
-				CHARGE_INHIBIT_THRESHOLD_MASK,
-				INHIBIT_ANALOG_VFLT_MINUS_300MV);
-		break;
-	case 0:
-		rc = smblib_masked_write(chg, CHGR_CFG2_REG,
-				CHARGER_INHIBIT_BIT, 0);
-	default:
-		break;
-	}
-
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure charge inhibit threshold rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	rc = smblib_write(chg, CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG,
-					FAST_CHARGE_SAFETY_TIMER_768_MIN);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't set CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG rc=%d\n",
-			rc);
-		return rc;
-	}
+static int smb5_configure_recharging(struct smb5 *chip)
+{
+	int rc = 0;
+	struct smb_charger *chg = &chip->chg;
+	union power_supply_propval pval;
+	/* Configure VBATT-based or automatic recharging */
 
 	rc = smblib_masked_write(chg, CHGR_CFG2_REG, RECHG_MASK,
 				(chip->dt.auto_recharge_vbat_mv != -EINVAL) ?
@@ -2152,12 +2167,369 @@
 		}
 	}
 
-	if (chg->sw_jeita_enabled) {
-		rc = smblib_disable_hw_jeita(chg, true);
+	return 0;
+}
+
+static int smb5_configure_float_charger(struct smb5 *chip)
+{
+	int rc = 0;
+	struct smb_charger *chg = &chip->chg;
+
+	/* configure float charger options */
+	switch (chip->dt.float_option) {
+	case FLOAT_DCP:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, 0);
+		break;
+	case FLOAT_SDP:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, FORCE_FLOAT_SDP_CFG_BIT);
+		break;
+	case DISABLE_CHARGING:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, FLOAT_DIS_CHGING_CFG_BIT);
+		break;
+	case SUSPEND_INPUT:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, SUSPEND_FLOAT_CFG_BIT);
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure float charger options rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smb5_init_connector_type(struct smb_charger *chg)
+{
+	int rc, type = 0;
+	u8 val = 0;
+
+	/*
+	 * PMI632 can have the connector type defined by a dedicated register
+	 * PMI632_TYPEC_MICRO_USB_MODE_REG or by a common TYPEC_U_USB_CFG_REG.
+	 */
+	if (chg->smb_version == PMI632_SUBTYPE) {
+		rc = smblib_read(chg, PMI632_TYPEC_MICRO_USB_MODE_REG, &val);
 		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't set hw jeita rc=%d\n", rc);
+			dev_err(chg->dev, "Couldn't read USB mode rc=%d\n", rc);
 			return rc;
 		}
+		type = !!(val & MICRO_USB_MODE_ONLY_BIT);
+	}
+
+	/*
+	 * If PMI632_TYPEC_MICRO_USB_MODE_REG is not set and for all non-PMI632
+	 * check the connector type using TYPEC_U_USB_CFG_REG.
+	 */
+	if (!type) {
+		rc = smblib_read(chg, TYPEC_U_USB_CFG_REG, &val);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't read U_USB config rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		type = !!(val & EN_MICRO_USB_MODE_BIT);
+	}
+
+	pr_debug("Connector type=%s\n", type ? "Micro USB" : "TypeC");
+
+	if (type) {
+		chg->connector_type = POWER_SUPPLY_CONNECTOR_MICRO_USB;
+		rc = smb5_configure_micro_usb(chg);
+	} else {
+		chg->connector_type = POWER_SUPPLY_CONNECTOR_TYPEC;
+		rc = smb5_configure_typec(chg);
+	}
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure TypeC/micro-USB mode rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * PMI632 based hw init:
+	 * - Rerun APSD to ensure proper charger detection if device
+	 *   boots with charger connected.
+	 * - Initialize flash module for PMI632
+	 */
+	if (chg->smb_version == PMI632_SUBTYPE) {
+		schgm_flash_init(chg);
+		smblib_rerun_apsd_if_required(chg);
+	}
+
+	return 0;
+
+}
+
+static int smb5_init_hw(struct smb5 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc;
+	u8 val = 0, mask = 0;
+
+	if (chip->dt.no_battery)
+		chg->fake_capacity = 50;
+
+	if (chip->dt.batt_profile_fcc_ua < 0)
+		smblib_get_charge_param(chg, &chg->param.fcc,
+				&chg->batt_profile_fcc_ua);
+
+	if (chip->dt.batt_profile_fv_uv < 0)
+		smblib_get_charge_param(chg, &chg->param.fv,
+				&chg->batt_profile_fv_uv);
+
+	smblib_get_charge_param(chg, &chg->param.usb_icl,
+				&chg->default_icl_ua);
+	smblib_get_charge_param(chg, &chg->param.aicl_5v_threshold,
+				&chg->default_aicl_5v_threshold_mv);
+	chg->aicl_5v_threshold_mv = chg->default_aicl_5v_threshold_mv;
+	smblib_get_charge_param(chg, &chg->param.aicl_cont_threshold,
+				&chg->default_aicl_cont_threshold_mv);
+	chg->aicl_cont_threshold_mv = chg->default_aicl_cont_threshold_mv;
+
+	if (chg->charger_temp_max == -EINVAL) {
+		rc = smblib_get_thermal_threshold(chg,
+					DIE_REG_H_THRESHOLD_MSB_REG,
+					&chg->charger_temp_max);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't get charger_temp_max rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * If SW thermal regulation WA is active then all the HW temperature
+	 * comparators need to be disabled to prevent HW thermal regulation,
+	 * apart from DIE_TEMP analog comparator for SHDN regulation.
+	 */
+	if (chg->wa_flags & SW_THERM_REGULATION_WA) {
+		rc = smblib_write(chg, MISC_THERMREG_SRC_CFG_REG,
+					THERMREG_DIE_CMP_SRC_EN_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't disable HW thermal regulation rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		/* configure temperature mitigation */
+		rc = smb5_configure_mitigation(chg);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure mitigation rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * Disable HVDCP autonomous mode operation by default, providing a DT
+	 * knob to turn it on if required. Additionally, if specified in DT,
+	 * disable HVDCP and HVDCP authentication algorithm.
+	 */
+	val = (chg->hvdcp_disable) ? 0 :
+		(HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT);
+	if (chip->dt.hvdcp_autonomous)
+		val |= HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT;
+
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+			(HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT |
+			 HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT),
+			val);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure HVDCP rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb5_init_connector_type(chg);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure connector type rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	/* Use ICL results from HW */
+	rc = smblib_icl_override(chg, HW_AUTO_MODE);
+	if (rc < 0) {
+		pr_err("Couldn't disable ICL override rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set OTG current limit */
+	rc = smblib_set_charge_param(chg, &chg->param.otg_cl, chg->otg_cl_ua);
+	if (rc < 0) {
+		pr_err("Couldn't set otg current limit rc=%d\n", rc);
+		return rc;
+	}
+
+	/* vote 0mA on usb_icl for non battery platforms */
+	vote(chg->usb_icl_votable,
+		DEFAULT_VOTER, chip->dt.no_battery, 0);
+	vote(chg->dc_suspend_votable,
+		DEFAULT_VOTER, chip->dt.no_battery, 0);
+	vote(chg->fcc_votable, HW_LIMIT_VOTER,
+		chip->dt.batt_profile_fcc_ua > 0, chip->dt.batt_profile_fcc_ua);
+	vote(chg->fv_votable, HW_LIMIT_VOTER,
+		chip->dt.batt_profile_fv_uv > 0, chip->dt.batt_profile_fv_uv);
+	vote(chg->fcc_votable,
+		BATT_PROFILE_VOTER, chg->batt_profile_fcc_ua > 0,
+		chg->batt_profile_fcc_ua);
+	vote(chg->fv_votable,
+		BATT_PROFILE_VOTER, chg->batt_profile_fv_uv > 0,
+		chg->batt_profile_fv_uv);
+
+	/* Some h/w limit maximum supported ICL */
+	vote(chg->usb_icl_votable, HW_LIMIT_VOTER,
+			chg->hw_max_icl_ua > 0, chg->hw_max_icl_ua);
+
+	/* Initialize DC peripheral configurations */
+	rc = smb5_init_dc_peripheral(chg);
+	if (rc < 0)
+		return rc;
+
+	/*
+	 * AICL configuration:
+	 * start from min and AICL ADC disable, and enable aicl rerun
+	 */
+	if (chg->smb_version != PMI632_SUBTYPE) {
+		mask = USBIN_AICL_PERIODIC_RERUN_EN_BIT | USBIN_AICL_ADC_EN_BIT
+			| USBIN_AICL_EN_BIT | SUSPEND_ON_COLLAPSE_USBIN_BIT;
+		val = USBIN_AICL_PERIODIC_RERUN_EN_BIT | USBIN_AICL_EN_BIT;
+		if (!chip->dt.disable_suspend_on_collapse)
+			val |= SUSPEND_ON_COLLAPSE_USBIN_BIT;
+
+		rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
+				mask, val);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't config AICL rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	rc = smblib_write(chg, AICL_RERUN_TIME_CFG_REG,
+				AICL_RERUN_TIME_12S_VAL);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure AICL rerun interval rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VBUS for software control */
+	rc = smblib_masked_write(chg, DCDC_OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure VBUS for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT)
+			& BARK_WDOG_TIMEOUT_MASK;
+	val |= (BITE_WDOG_TIMEOUT_8S | BITE_WDOG_DISABLE_CHARGING_CFG_BIT);
+	rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+			BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
+			BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
+			val);
+	if (rc < 0) {
+		pr_err("Couldn't configue WD config rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable WD BARK and enable it on plugin */
+	rc = smblib_masked_write(chg, WD_CFG_REG,
+			WATCHDOG_TRIGGER_AFP_EN_BIT |
+			WDOG_TIMER_EN_ON_PLUGIN_BIT |
+			BARK_WDOG_INT_EN_BIT,
+			WDOG_TIMER_EN_ON_PLUGIN_BIT |
+			BARK_WDOG_INT_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configue WD config rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set termination current threshold values */
+	rc = smb5_configure_iterm_thresholds(chip);
+	if (rc < 0) {
+		pr_err("Couldn't configure ITERM thresholds rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rc = smb5_configure_float_charger(chip);
+	if (rc < 0)
+		return rc;
+
+	switch (chip->dt.chg_inhibit_thr_mv) {
+	case 50:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				INHIBIT_ANALOG_VFLT_MINUS_50MV);
+		break;
+	case 100:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				INHIBIT_ANALOG_VFLT_MINUS_100MV);
+		break;
+	case 200:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				INHIBIT_ANALOG_VFLT_MINUS_200MV);
+		break;
+	case 300:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				INHIBIT_ANALOG_VFLT_MINUS_300MV);
+		break;
+	case 0:
+		rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				CHARGER_INHIBIT_BIT, 0);
+	default:
+		break;
+	}
+
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure charge inhibit threshold rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smblib_write(chg, CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG,
+					FAST_CHARGE_SAFETY_TIMER_768_MIN);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't set CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb5_configure_recharging(chip);
+	if (rc < 0)
+		return rc;
+
+	rc = smblib_disable_hw_jeita(chg, true);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't set hw jeita rc=%d\n", rc);
+		return rc;
 	}
 
 	rc = smblib_masked_write(chg, DCDC_ENG_SDCDC_CFG5_REG,
@@ -2168,6 +2540,17 @@
 		return rc;
 	}
 
+	if (chg->connector_pull_up != -EINVAL) {
+		rc = smb5_configure_internal_pull(chg, CONN_THERM,
+				get_valid_pullup(chg->connector_pull_up));
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't configure CONN_THERM pull-up rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	return rc;
 }
 
@@ -2339,10 +2722,12 @@
 	[USBIN_UV_IRQ] = {
 		.name		= "usbin-uv",
 		.handler	= usbin_uv_irq_handler,
+		.wake		= true,
+		.storm_data	= {true, 3000, 5},
 	},
 	[USBIN_OV_IRQ] = {
 		.name		= "usbin-ov",
-		.handler	= default_irq_handler,
+		.handler	= usbin_ov_irq_handler,
 	},
 	[USBIN_PLUGIN_IRQ] = {
 		.name		= "usbin-plugin",
@@ -2447,8 +2832,14 @@
 	[IMP_TRIGGER_IRQ] = {
 		.name		= "imp-trigger",
 	},
+	/*
+	 * triggered when DIE or SKIN or CONNECTOR temperature across
+	 * either of the _REG_L, _REG_H, _RST, or _SHDN thresholds
+	 */
 	[TEMP_CHANGE_IRQ] = {
 		.name		= "temp-change",
+		.handler	= temp_change_irq_handler,
+		.wake		= true,
 	},
 	[TEMP_CHANGE_SMB_IRQ] = {
 		.name		= "temp-change-smb",
@@ -2562,11 +2953,16 @@
 		chg->usb_icl_change_irq_enabled = true;
 
 	/*
-	 * Disable WDOG SNARL IRQ by default to prevent IRQ storm. If required
-	 * for any application, enable it through votable.
+	 * WDOG_SNARL_IRQ is required for SW Thermal Regulation WA only. In
+	 * case the WA is not required, disable the WDOG_SNARL_IRQ to prevent
+	 * interrupt storm.
 	 */
-	if (chg->irq_info[WDOG_SNARL_IRQ].irq)
-		vote(chg->wdog_snarl_irq_en_votable, DEFAULT_VOTER, false, 0);
+
+	if (chg->irq_info[WDOG_SNARL_IRQ].irq && !(chg->wa_flags &
+						SW_THERM_REGULATION_WA)) {
+		disable_irq_wake(chg->irq_info[WDOG_SNARL_IRQ].irq);
+		disable_irq_nosync(chg->irq_info[WDOG_SNARL_IRQ].irq);
+	}
 
 	return rc;
 }
@@ -2783,6 +3179,21 @@
 		goto cleanup;
 	}
 
+	/* Support reporting polarity and speed via properties */
+	rc = extcon_set_property_capability(chg->extcon,
+			EXTCON_USB, EXTCON_PROP_USB_TYPEC_POLARITY);
+	rc |= extcon_set_property_capability(chg->extcon,
+			EXTCON_USB, EXTCON_PROP_USB_SS);
+	rc |= extcon_set_property_capability(chg->extcon,
+			EXTCON_USB_HOST, EXTCON_PROP_USB_TYPEC_POLARITY);
+	rc |= extcon_set_property_capability(chg->extcon,
+			EXTCON_USB_HOST, EXTCON_PROP_USB_SS);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"failed to configure extcon capabilities\n");
+		goto cleanup;
+	}
+
 	rc = smb5_init_hw(chip);
 	if (rc < 0) {
 		pr_err("Couldn't initialize hardware rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/schgm-flash.c b/drivers/power/supply/qcom/schgm-flash.c
index 7e8c70b..b92ace1 100644
--- a/drivers/power/supply/qcom/schgm-flash.c
+++ b/drivers/power/supply/qcom/schgm-flash.c
@@ -94,6 +94,11 @@
 	}
 }
 
+bool is_flash_active(struct smb_charger *chg)
+{
+	return chg->flash_active ? true : false;
+}
+
 int schgm_flash_get_vreg_ok(struct smb_charger *chg, int *val)
 {
 	int rc, vreg_state;
@@ -140,6 +145,31 @@
 	return 0;
 }
 
+void schgm_flash_torch_priority(struct smb_charger *chg, enum torch_mode mode)
+{
+	int rc;
+	u8 reg;
+
+	/*
+	 * If torch is configured in default BOOST mode, skip any update in the
+	 * mode configuration.
+	 */
+	if (chg->headroom_mode == FIXED_MODE)
+		return;
+
+	if ((mode != TORCH_BOOST_MODE) && (mode != TORCH_BUCK_MODE))
+		return;
+
+	reg = mode;
+	rc = smblib_masked_write(chg, SCHGM_TORCH_PRIORITY_CONTROL_REG,
+					TORCH_PRIORITY_CONTROL_BIT, reg);
+	if (rc < 0)
+		pr_err("Couldn't configure Torch priority control rc=%d\n",
+				rc);
+
+	pr_debug("Torch priority changed to: %d\n", mode);
+}
+
 int schgm_flash_init(struct smb_charger *chg)
 {
 	int rc;
@@ -183,7 +213,7 @@
 
 		reg = (chg->headroom_mode == FIXED_MODE)
 					? TORCH_PRIORITY_CONTROL_BIT : 0;
-		rc = smblib_write(chg, SCHGM_TORCH_PRIORITY_CONTROL, reg);
+		rc = smblib_write(chg, SCHGM_TORCH_PRIORITY_CONTROL_REG, reg);
 		if (rc < 0) {
 			pr_err("Couldn't force 5V boost in torch mode rc=%d\n",
 					rc);
diff --git a/drivers/power/supply/qcom/schgm-flash.h b/drivers/power/supply/qcom/schgm-flash.h
index 546e63a..1294467 100644
--- a/drivers/power/supply/qcom/schgm-flash.h
+++ b/drivers/power/supply/qcom/schgm-flash.h
@@ -30,7 +30,7 @@
 #define SCHGM_FLASH_CONTROL_REG			(SCHGM_FLASH_BASE + 0x60)
 #define SOC_LOW_FOR_FLASH_EN_BIT		BIT(7)
 
-#define SCHGM_TORCH_PRIORITY_CONTROL		(SCHGM_FLASH_BASE + 0x63)
+#define SCHGM_TORCH_PRIORITY_CONTROL_REG	(SCHGM_FLASH_BASE + 0x63)
 #define TORCH_PRIORITY_CONTROL_BIT		BIT(0)
 
 #define SCHGM_SOC_BASED_FLASH_DERATE_TH_CFG_REG	(SCHGM_FLASH_BASE + 0x67)
@@ -38,8 +38,15 @@
 #define SCHGM_SOC_BASED_FLASH_DISABLE_TH_CFG_REG \
 						(SCHGM_FLASH_BASE + 0x68)
 
+enum torch_mode {
+	TORCH_BUCK_MODE = 0,
+	TORCH_BOOST_MODE,
+};
+
 int schgm_flash_get_vreg_ok(struct smb_charger *chg, int *val);
+void schgm_flash_torch_priority(struct smb_charger *chg, enum torch_mode mode);
 int schgm_flash_init(struct smb_charger *chg);
+bool is_flash_active(struct smb_charger *chg);
 
 irqreturn_t schgm_flash_default_irq_handler(int irq, void *data);
 irqreturn_t schgm_flash_ilim2_irq_handler(int irq, void *data);
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 916b160..469a276 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -47,6 +47,9 @@
 #define BATT_GT_PRE_TO_FAST_BIT			BIT(4)
 #define ENABLE_CHARGING_BIT			BIT(3)
 
+#define CHGR_CHARGING_ENABLE_CMD_REG		(CHGR_BASE + 0x42)
+#define CHARGING_ENABLE_CMD_BIT			BIT(0)
+
 #define CHGR_CFG2_REG				(CHGR_BASE + 0x51)
 #define CHG_EN_SRC_BIT				BIT(7)
 #define CHG_EN_POLARITY_BIT			BIT(6)
@@ -1032,7 +1035,17 @@
 		return rc;
 	}
 
-	/* disable parallel charging path */
+	/*
+	 * Disable command based SMB1355 enablement and disable parallel
+	 * charging path by switching to command based mode.
+	 */
+	rc = smb1355_masked_write(chip, CHGR_CHARGING_ENABLE_CMD_REG,
+				CHARGING_ENABLE_CMD_BIT, 0);
+	if (rc < 0) {
+		pr_err("Coudln't configure command bit, rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = smb1355_set_parallel_charging(chip, true);
 	if (rc < 0) {
 		pr_err("Couldn't disable parallel path rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index 279a3e2..b00ac7c 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -5,6 +5,7 @@
 
 #define pr_fmt(fmt) "SMB1390: %s: " fmt, __func__
 
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -77,12 +78,23 @@
 #define CP_VOTER		"CP_VOTER"
 #define USER_VOTER		"USER_VOTER"
 #define ILIM_VOTER		"ILIM_VOTER"
+#define TAPER_END_VOTER		"TAPER_END_VOTER"
 #define FCC_VOTER		"FCC_VOTER"
 #define ICL_VOTER		"ICL_VOTER"
 #define WIRELESS_VOTER		"WIRELESS_VOTER"
 #define SRC_VOTER		"SRC_VOTER"
 #define SWITCHER_TOGGLE_VOTER	"SWITCHER_TOGGLE_VOTER"
 
+#define smb1390_dbg(chip, reason, fmt, ...)				\
+	do {								\
+		if (chip->debug_mask & (reason))			\
+			pr_info("SMB1390: %s: " fmt, __func__,		\
+				##__VA_ARGS__);				\
+		else							\
+			pr_debug("SMB1390: %s: " fmt, __func__,		\
+				##__VA_ARGS__);				\
+	} while (0)
+
 enum {
 	SWITCHER_OFF_WINDOW_IRQ = 0,
 	SWITCHER_OFF_FAULT_IRQ,
@@ -100,6 +112,14 @@
 	SMB_PIN_EN,
 };
 
+enum print_reason {
+	PR_INTERRUPT		= BIT(0),
+	PR_REGISTER		= BIT(1),
+	PR_INFO			= BIT(2),
+	PR_EXT_DEPENDENCY	= BIT(3),
+	PR_MISC			= BIT(4),
+};
+
 struct smb1390_iio {
 	struct iio_channel	*die_temp_chan;
 };
@@ -109,6 +129,7 @@
 	struct regmap		*regmap;
 	struct notifier_block	nb;
 	struct wakeup_source	*cp_ws;
+	struct dentry		*dfs_root;
 
 	/* work structs */
 	struct work_struct	status_change_work;
@@ -122,7 +143,7 @@
 	struct votable		*disable_votable;
 	struct votable		*ilim_votable;
 	struct votable		*fcc_votable;
-	struct votable		*cp_awake_votable;
+	struct votable		*fv_votable;
 
 	/* power supplies */
 	struct power_supply	*usb_psy;
@@ -135,6 +156,11 @@
 	bool			taper_work_running;
 	struct smb1390_iio	iio;
 	int			irq_status;
+	int			taper_entry_fv;
+	bool			switcher_enabled;
+	int			die_temp;
+	bool			suspended;
+	u32			debug_mask;
 };
 
 struct smb_irq {
@@ -161,7 +187,8 @@
 {
 	int rc;
 
-	pr_debug("Writing 0x%02x to 0x%04x with mask 0x%02x\n", val, reg, mask);
+	smb1390_dbg(chip, PR_REGISTER, "Writing 0x%02x to 0x%04x with mask 0x%02x\n",
+			val, reg, mask);
 	rc = regmap_update_bits(chip->regmap, reg, mask, val);
 	if (rc < 0)
 		pr_err("Couldn't write 0x%02x to 0x%04x with mask 0x%02x\n",
@@ -175,7 +202,7 @@
 	if (!chip->batt_psy) {
 		chip->batt_psy = power_supply_get_by_name("battery");
 		if (!chip->batt_psy) {
-			pr_debug("Couldn't find battery psy\n");
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find battery psy\n");
 			return false;
 		}
 	}
@@ -183,7 +210,7 @@
 	if (!chip->usb_psy) {
 		chip->usb_psy = power_supply_get_by_name("usb");
 		if (!chip->usb_psy) {
-			pr_debug("Couldn't find usb psy\n");
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find usb psy\n");
 			return false;
 		}
 	}
@@ -191,7 +218,7 @@
 	if (!chip->dc_psy) {
 		chip->dc_psy = power_supply_get_by_name("dc");
 		if (!chip->dc_psy) {
-			pr_debug("Couldn't find dc psy\n");
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find dc psy\n");
 			return false;
 		}
 	}
@@ -199,11 +226,24 @@
 	if (!chip->fcc_votable) {
 		chip->fcc_votable = find_votable("FCC");
 		if (!chip->fcc_votable) {
-			pr_debug("Couldn't find FCC votable\n");
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find FCC votable\n");
 			return false;
 		}
 	}
 
+	if (!chip->fv_votable) {
+		chip->fv_votable = find_votable("FV");
+		if (!chip->fv_votable) {
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find FV votable\n");
+			return false;
+		}
+	}
+
+	if (!chip->disable_votable) {
+		smb1390_dbg(chip, PR_MISC, "Couldn't find CP DISABLE votable\n");
+		return false;
+	}
+
 	return true;
 }
 
@@ -236,7 +276,8 @@
 		*enable = !!(status & EN_PIN_OUT2_BIT);
 		break;
 	default:
-		pr_debug("cp_en status %d is not supported\n", id);
+		smb1390_dbg(chip, PR_MISC, "cp_en status %d is not supported\n",
+				id);
 		rc = -EINVAL;
 		break;
 	}
@@ -247,17 +288,29 @@
 static irqreturn_t default_irq_handler(int irq, void *data)
 {
 	struct smb1390 *chip = data;
-	int i;
+	int i, rc;
+	bool enable;
 
 	for (i = 0; i < NUM_IRQS; ++i) {
 		if (irq == chip->irqs[i]) {
-			pr_debug("%s IRQ triggered\n", smb_irqs[i].name);
+			smb1390_dbg(chip, PR_INTERRUPT, "%s IRQ triggered\n",
+				smb_irqs[i].name);
 			chip->irq_status |= 1 << i;
 		}
 	}
 
+	rc = smb1390_get_cp_en_status(chip, SWITCHER_EN, &enable);
+	if (!rc) {
+		if (chip->switcher_enabled != enable) {
+			chip->switcher_enabled = enable;
+			if (chip->fcc_votable)
+				rerun_election(chip->fcc_votable);
+		}
+	}
+
 	if (chip->cp_master_psy)
 		power_supply_changed(chip->cp_master_psy);
+
 	return IRQ_HANDLED;
 }
 
@@ -396,7 +449,7 @@
 	struct smb1390 *chip = data;
 	int rc = 0;
 
-	if (!is_psy_voter_available(chip))
+	if (!is_psy_voter_available(chip) || chip->suspended)
 		return -EAGAIN;
 
 	if (disable) {
@@ -404,10 +457,7 @@
 				   CMD_EN_SWITCHER_BIT, 0);
 		if (rc < 0)
 			return rc;
-
-		vote(chip->cp_awake_votable, CP_VOTER, false, 0);
 	} else {
-		vote(chip->cp_awake_votable, CP_VOTER, true, 0);
 		rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
 				   CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT);
 		if (rc < 0)
@@ -426,7 +476,7 @@
 	struct smb1390 *chip = data;
 	int rc = 0;
 
-	if (!is_psy_voter_available(chip))
+	if (!is_psy_voter_available(chip) || chip->suspended)
 		return -EAGAIN;
 
 	/* ILIM should always have at least one active vote */
@@ -435,38 +485,27 @@
 		return -EINVAL;
 	}
 
+	rc = smb1390_masked_write(chip, CORE_FTRIM_ILIM_REG,
+		CFG_ILIM_MASK,
+		DIV_ROUND_CLOSEST(max(ilim_uA, 500000) - 500000, 100000));
+	if (rc < 0) {
+		pr_err("Failed to write ILIM Register, rc=%d\n", rc);
+		return rc;
+	}
+
 	/* ILIM less than 1A is not accurate; disable charging */
 	if (ilim_uA < 1000000) {
-		pr_debug("ILIM %duA is too low to allow charging\n", ilim_uA);
+		smb1390_dbg(chip, PR_INFO, "ILIM %duA is too low to allow charging\n",
+			ilim_uA);
 		vote(chip->disable_votable, ILIM_VOTER, true, 0);
 	} else {
-		pr_debug("setting ILIM to %duA\n", ilim_uA);
-		rc = smb1390_masked_write(chip, CORE_FTRIM_ILIM_REG,
-				CFG_ILIM_MASK,
-				DIV_ROUND_CLOSEST(ilim_uA - 500000, 100000));
-		if (rc < 0)
-			pr_err("Failed to write ILIM Register, rc=%d\n", rc);
-		if (rc >= 0)
-			vote(chip->disable_votable, ILIM_VOTER, false, 0);
+		smb1390_dbg(chip, PR_INFO, "ILIM set to %duA\n", ilim_uA);
+		vote(chip->disable_votable, ILIM_VOTER, false, 0);
 	}
 
 	return rc;
 }
 
-static int smb1390_awake_vote_cb(struct votable *votable, void *data,
-				int awake, const char *client)
-{
-	struct smb1390 *chip = data;
-
-	if (awake)
-		__pm_stay_awake(chip->cp_ws);
-	else
-		__pm_relax(chip->cp_ws);
-
-	pr_debug("client: %s awake: %d\n", client, awake);
-	return 0;
-}
-
 static int smb1390_notifier_cb(struct notifier_block *nb,
 			       unsigned long event, void *data)
 {
@@ -544,9 +583,13 @@
 								pval.intval);
 		}
 
-		/* input current is always half the charge current */
-		vote(chip->ilim_votable, FCC_VOTER, true,
-				get_effective_result(chip->fcc_votable) / 2);
+		/*
+		 * Remove SMB1390 Taper condition disable vote if float voltage
+		 * increased in comparison to voltage at which it entered taper.
+		 */
+		if (chip->taper_entry_fv <
+				get_effective_result(chip->fv_votable))
+			vote(chip->disable_votable, TAPER_END_VOTER, false, 0);
 
 		/*
 		 * all votes that would result in disabling the charge pump have
@@ -574,6 +617,7 @@
 		}
 	} else {
 		vote(chip->disable_votable, SRC_VOTER, true, 0);
+		vote(chip->disable_votable, TAPER_END_VOTER, false, 0);
 		vote(chip->fcc_votable, CP_VOTER, false, 0);
 	}
 
@@ -591,11 +635,8 @@
 	if (!is_psy_voter_available(chip))
 		goto out;
 
-	do {
-		fcc_uA = get_effective_result(chip->fcc_votable) - 100000;
-		pr_debug("taper work reducing FCC to %duA\n", fcc_uA);
-		vote(chip->fcc_votable, CP_VOTER, true, fcc_uA);
-
+	chip->taper_entry_fv = get_effective_result(chip->fv_votable);
+	while (true) {
 		rc = power_supply_get_property(chip->batt_psy,
 					POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
 		if (rc < 0) {
@@ -603,12 +644,36 @@
 			goto out;
 		}
 
-		msleep(500);
-	} while (fcc_uA >= 2000000
-		 && pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER);
+		if (get_effective_result(chip->fv_votable) >
+						chip->taper_entry_fv) {
+			smb1390_dbg(chip, PR_INFO, "Float voltage increased. Exiting taper\n");
+			goto out;
+		} else {
+			chip->taper_entry_fv =
+					get_effective_result(chip->fv_votable);
+		}
 
+		if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+			fcc_uA = get_effective_result(chip->fcc_votable)
+								- 100000;
+			smb1390_dbg(chip, PR_INFO, "taper work reducing FCC to %duA\n",
+				fcc_uA);
+			vote(chip->fcc_votable, CP_VOTER, true, fcc_uA);
+
+			if (fcc_uA < 2000000) {
+				vote(chip->disable_votable, TAPER_END_VOTER,
+								true, 0);
+				goto out;
+			}
+		} else {
+			smb1390_dbg(chip, PR_INFO, "In fast charging. Wait for next taper\n");
+		}
+
+		msleep(500);
+	}
 out:
-	pr_debug("taper work exit\n");
+	smb1390_dbg(chip, PR_INFO, "taper work exit\n");
+	vote(chip->fcc_votable, CP_VOTER, false, 0);
 	chip->taper_work_running = false;
 }
 
@@ -650,12 +715,26 @@
 				!get_effective_result(chip->disable_votable);
 		break;
 	case POWER_SUPPLY_PROP_CP_SWITCHER_EN:
-		rc = smb1390_get_cp_en_status(chip, SWITCHER_EN, &enable);
-		if (!rc)
-			val->intval = enable;
+		if (chip->suspended) {
+			val->intval = chip->switcher_enabled;
+		} else {
+			rc = smb1390_get_cp_en_status(chip, SWITCHER_EN,
+					&enable);
+			if (!rc)
+				val->intval = enable;
+		}
 		break;
 	case POWER_SUPPLY_PROP_CP_DIE_TEMP:
-		rc = smb1390_get_die_temp(chip, val);
+		if (chip->suspended) {
+			if (chip->die_temp != -ENODATA)
+				val->intval = chip->die_temp;
+			else
+				rc = -ENODATA;
+		} else {
+			rc = smb1390_get_die_temp(chip, val);
+			if (rc >= 0)
+				chip->die_temp = val->intval;
+		}
 		break;
 	case POWER_SUPPLY_PROP_CP_ISNS:
 		rc = smb1390_get_isns(chip, val);
@@ -680,7 +759,7 @@
 					+ 500000;
 		break;
 	default:
-		pr_debug("charge pump power supply get prop %d not supported\n",
+		smb1390_dbg(chip, PR_MISC, "charge pump power supply get prop %d not supported\n",
 			prop);
 		return -EINVAL;
 	}
@@ -707,7 +786,7 @@
 		chip->irq_status = val->intval;
 		break;
 	default:
-		pr_debug("charge pump power supply set prop %d not supported\n",
+		smb1390_dbg(chip, PR_MISC, "charge pump power supply set prop %d not supported\n",
 			prop);
 		return -EINVAL;
 	}
@@ -789,11 +868,6 @@
 
 static int smb1390_create_votables(struct smb1390 *chip)
 {
-	chip->cp_awake_votable = create_votable("CP_AWAKE", VOTE_SET_ANY,
-			smb1390_awake_vote_cb, chip);
-	if (IS_ERR(chip->cp_awake_votable))
-		return PTR_ERR(chip->cp_awake_votable);
-
 	chip->disable_votable = create_votable("CP_DISABLE",
 			VOTE_SET_ANY, smb1390_disable_vote_cb, chip);
 	if (IS_ERR(chip->disable_votable))
@@ -810,6 +884,14 @@
 	 */
 	vote(chip->disable_votable, USER_VOTER, true, 0);
 
+	/*
+	 * In case SMB1390 probe happens after FCC value has been configured,
+	 * update ilim vote to reflect FCC / 2 value.
+	 */
+	if (chip->fcc_votable)
+		vote(chip->ilim_votable, FCC_VOTER, true,
+			get_effective_result(chip->fcc_votable) / 2);
+
 	return 0;
 }
 
@@ -817,7 +899,6 @@
 {
 	destroy_votable(chip->disable_votable);
 	destroy_votable(chip->ilim_votable);
-	destroy_votable(chip->cp_awake_votable);
 }
 
 static int smb1390_init_hw(struct smb1390 *chip)
@@ -914,6 +995,31 @@
 	return rc;
 }
 
+#ifdef CONFIG_DEBUG_FS
+static void smb1390_create_debugfs(struct smb1390 *chip)
+{
+	struct dentry *entry;
+
+	chip->dfs_root = debugfs_create_dir("smb1390_charger_psy", NULL);
+	if (IS_ERR_OR_NULL(chip->dfs_root)) {
+		pr_err("Failed to create debugfs directory, rc=%ld\n",
+					(long)chip->dfs_root);
+		return;
+	}
+
+	entry = debugfs_create_u32("debug_mask", 0600, chip->dfs_root,
+			&chip->debug_mask);
+	if (IS_ERR_OR_NULL(entry)) {
+		pr_err("Failed to create debug_mask, rc=%ld\n", (long)entry);
+		debugfs_remove_recursive(chip->dfs_root);
+	}
+}
+#else
+static void smb1390_create_debugfs(struct smb1390 *chip)
+{
+}
+#endif
+
 static int smb1390_probe(struct platform_device *pdev)
 {
 	struct smb1390 *chip;
@@ -926,6 +1032,8 @@
 	chip->dev = &pdev->dev;
 	spin_lock_init(&chip->status_change_lock);
 	mutex_init(&chip->die_chan_lock);
+	chip->die_temp = -ENODATA;
+	platform_set_drvdata(pdev, chip);
 
 	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
 	if (!chip->regmap) {
@@ -977,7 +1085,10 @@
 		goto out_notifier;
 	}
 
+	smb1390_create_debugfs(chip);
+
 	pr_debug("smb1390 probed successfully\n");
+
 	return 0;
 
 out_notifier:
@@ -1007,6 +1118,30 @@
 	return 0;
 }
 
+static int smb1390_suspend(struct device *dev)
+{
+	struct smb1390 *chip = dev_get_drvdata(dev);
+
+	chip->suspended = true;
+	return 0;
+}
+
+static int smb1390_resume(struct device *dev)
+{
+	struct smb1390 *chip = dev_get_drvdata(dev);
+
+	chip->suspended = false;
+	rerun_election(chip->ilim_votable);
+	rerun_election(chip->disable_votable);
+
+	return 0;
+}
+
+static const struct dev_pm_ops smb1390_pm_ops = {
+	.suspend	= smb1390_suspend,
+	.resume		= smb1390_resume,
+};
+
 static const struct of_device_id match_table[] = {
 	{ .compatible = "qcom,smb1390-charger-psy", },
 	{ },
@@ -1015,6 +1150,7 @@
 static struct platform_driver smb1390_driver = {
 	.driver	= {
 		.name		= "qcom,smb1390-charger-psy",
+		.pm		= &smb1390_pm_ops,
 		.of_match_table	= match_table,
 	},
 	.probe	= smb1390_probe,
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 2aae8bd..7c4a9df 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -16,8 +16,10 @@
 #include "smb5-lib.h"
 #include "smb5-reg.h"
 #include "battery.h"
+#include "schgm-flash.h"
 #include "step-chg-jeita.h"
 #include "storm-watch.h"
+#include "schgm-flash.h"
 
 #define smblib_err(chg, fmt, ...)		\
 	pr_err("%s: %s: " fmt, chg->name,	\
@@ -36,7 +38,9 @@
 #define typec_rp_med_high(chg, typec_mode)			\
 	((typec_mode == POWER_SUPPLY_TYPEC_SOURCE_MEDIUM	\
 	|| typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH)	\
-	&& !chg->typec_legacy)
+	&& (!chg->typec_legacy || chg->typec_legacy_use_rp_icl))
+
+static void update_sw_icl_max(struct smb_charger *chg, int pst);
 
 int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
 {
@@ -156,15 +160,50 @@
 	return 0;
 }
 
-int smblib_icl_override(struct smb_charger *chg, bool override)
+int smblib_icl_override(struct smb_charger *chg, enum icl_override_mode  mode)
 {
 	int rc;
+	u8 usb51_mode, icl_override, apsd_override;
+
+	switch (mode) {
+	case SW_OVERRIDE_USB51_MODE:
+		usb51_mode = 0;
+		icl_override = ICL_OVERRIDE_BIT;
+		apsd_override = 0;
+		break;
+	case SW_OVERRIDE_HC_MODE:
+		usb51_mode = USBIN_MODE_CHG_BIT;
+		icl_override = 0;
+		apsd_override = ICL_OVERRIDE_AFTER_APSD_BIT;
+		break;
+	case HW_AUTO_MODE:
+	default:
+		usb51_mode = USBIN_MODE_CHG_BIT;
+		icl_override = 0;
+		apsd_override = 0;
+		break;
+	}
+
+	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+				USBIN_MODE_CHG_BIT, usb51_mode);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set USBIN_ICL_OPTIONS rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_masked_write(chg, CMD_ICL_OVERRIDE_REG,
+				ICL_OVERRIDE_BIT, icl_override);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+		return rc;
+	}
 
 	rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
-				ICL_OVERRIDE_AFTER_APSD_BIT,
-				override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+				ICL_OVERRIDE_AFTER_APSD_BIT, apsd_override);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't override ICL_AFTER_APSD rc=%d\n", rc);
+		return rc;
+	}
 
 	return rc;
 }
@@ -176,7 +215,7 @@
 static int smblib_select_sec_charger_locked(struct smb_charger *chg,
 					int sec_chg)
 {
-	int rc;
+	int rc = 0;
 
 	switch (sec_chg) {
 	case POWER_SUPPLY_CHARGER_SEC_CP:
@@ -191,12 +230,14 @@
 			return rc;
 		}
 		/* Enable Charge Pump, under HW control */
-		rc = smblib_write(chg, MISC_SMB_EN_CMD_REG,  EN_CP_CMD_BIT);
+		rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+					EN_CP_CMD_BIT, EN_CP_CMD_BIT);
 		if (rc < 0) {
 			dev_err(chg->dev, "Couldn't enable SMB charger rc=%d\n",
 						rc);
 			return rc;
 		}
+		vote(chg->smb_override_votable, PL_SMB_EN_VOTER, false, 0);
 		break;
 	case POWER_SUPPLY_CHARGER_SEC_PL:
 		/* select slave charger instead of Charge Pump */
@@ -208,12 +249,14 @@
 			return rc;
 		}
 		/* Enable slave charger, under HW control */
-		rc = smblib_write(chg, MISC_SMB_EN_CMD_REG,  EN_STAT_CMD_BIT);
+		rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+					EN_STAT_CMD_BIT, EN_STAT_CMD_BIT);
 		if (rc < 0) {
 			dev_err(chg->dev, "Couldn't enable SMB charger rc=%d\n",
 						rc);
 			return rc;
 		}
+		vote(chg->smb_override_votable, PL_SMB_EN_VOTER, false, 0);
 
 		vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, false, 0);
 
@@ -223,13 +266,7 @@
 		vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, true, 0);
 
 		/* SW override, disabling secondary charger(s) */
-		rc = smblib_write(chg, MISC_SMB_EN_CMD_REG,
-						SMB_EN_OVERRIDE_BIT);
-		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't disable charging rc=%d\n",
-						rc);
-			return rc;
-		}
+		vote(chg->smb_override_votable, PL_SMB_EN_VOTER, true, 0);
 		break;
 	}
 
@@ -552,6 +589,25 @@
 	return 0;
 }
 
+#define AICL_RANGE2_MIN_MV		5600
+#define AICL_RANGE2_STEP_DELTA_MV	200
+#define AICL_RANGE2_OFFSET		16
+int smblib_get_aicl_cont_threshold(struct smb_chg_param *param, u8 val_raw)
+{
+	int base = param->min_u;
+	u8 reg = val_raw;
+	int step = param->step_u;
+
+
+	if (val_raw >= AICL_RANGE2_OFFSET) {
+		reg = val_raw - AICL_RANGE2_OFFSET;
+		base = AICL_RANGE2_MIN_MV;
+		step = AICL_RANGE2_STEP_DELTA_MV;
+	}
+
+	return base + (reg * step);
+}
+
 /********************
  * REGISTER SETTERS *
  ********************/
@@ -704,6 +760,23 @@
 {
 	int rc = 0;
 
+	/* PMI632 only support max. 9V */
+	if (chg->smb_version == PMI632_SUBTYPE) {
+		switch (allowed_voltage) {
+		case USBIN_ADAPTER_ALLOW_12V:
+		case USBIN_ADAPTER_ALLOW_9V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+			break;
+		case USBIN_ADAPTER_ALLOW_5V_OR_12V:
+		case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_OR_9V;
+			break;
+		case USBIN_ADAPTER_ALLOW_5V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+			break;
+		}
+	}
+
 	rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
@@ -773,6 +846,29 @@
 	return rc;
 }
 
+int smblib_set_aicl_cont_threshold(struct smb_chg_param *param,
+				int val_u, u8 *val_raw)
+{
+	int base = param->min_u;
+	int offset = 0;
+	int step = param->step_u;
+
+	if (val_u > param->max_u)
+		val_u = param->max_u;
+	if (val_u < param->min_u)
+		val_u = param->min_u;
+
+	if (val_u >= AICL_RANGE2_MIN_MV) {
+		base = AICL_RANGE2_MIN_MV;
+		step = AICL_RANGE2_STEP_DELTA_MV;
+		offset = AICL_RANGE2_OFFSET;
+	};
+
+	*val_raw = ((val_u - base) / step) + offset;
+
+	return 0;
+}
+
 /********************
  * HELPER FUNCTIONS *
  ********************/
@@ -808,7 +904,7 @@
 	int rc;
 	u8 mask;
 
-	if (chg->hvdcp_disable)
+	if (chg->hvdcp_disable || chg->pd_not_supported)
 		return;
 
 	mask = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
@@ -908,7 +1004,7 @@
 			schedule_work(&chg->bms_update_work);
 	}
 
-	if (!chg->jeita_configured)
+	if (chg->jeita_configured == JEITA_CFG_NONE)
 		schedule_work(&chg->jeita_update_work);
 
 	if (chg->sec_pl_present && !chg->pl.psy
@@ -969,7 +1065,6 @@
 	return 0;
 }
 
-#define SDP_100_MA			100000
 static void smblib_uusb_removal(struct smb_charger *chg)
 {
 	int rc;
@@ -984,6 +1079,9 @@
 
 	cancel_delayed_work_sync(&chg->pl_enable_work);
 
+	if (chg->wa_flags & CHG_TERMINATION_WA)
+		alarm_cancel(&chg->chg_termination_alarm);
+
 	if (chg->wa_flags & BOOST_BACK_WA) {
 		data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
 		if (data) {
@@ -1000,8 +1098,11 @@
 	/* reset both usbin current and voltage votes */
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
-	vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
+	vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+			is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
 	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
+	vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0);
+	vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
 
 	/* Remove SW thermal regulation WA votes */
 	vote(chg->usb_icl_votable, SW_THERM_REGULATION_VOTER, false, 0);
@@ -1018,6 +1119,11 @@
 		smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
 			rc);
 
+	/* reset USBOV votes and cancel work */
+	cancel_delayed_work_sync(&chg->usbov_dbc_work);
+	vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+	chg->dbc_usbov = false;
+
 	chg->voltage_min_uv = MICRO_5V;
 	chg->voltage_max_uv = MICRO_5V;
 	chg->usb_icl_delta_ua = 0;
@@ -1166,42 +1272,25 @@
 	}
 
 	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
-		CFG_USB3P0_SEL_BIT | USB51_MODE_BIT | USBIN_MODE_CHG_BIT,
-		icl_options);
+			CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't set ICL options rc=%d\n", rc);
 		return rc;
 	}
 
-	return rc;
-}
-
-static int get_sdp_current(struct smb_charger *chg, int *icl_ua)
-{
-	int rc;
-	u8 icl_options;
-	bool usb3 = false;
-
-	rc = smblib_read(chg, USBIN_ICL_OPTIONS_REG, &icl_options);
+	rc = smblib_icl_override(chg, SW_OVERRIDE_USB51_MODE);
 	if (rc < 0) {
-		smblib_err(chg, "Couldn't get ICL options rc=%d\n", rc);
+		smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
 		return rc;
 	}
 
-	usb3 = (icl_options & CFG_USB3P0_SEL_BIT);
-
-	if (icl_options & USB51_MODE_BIT)
-		*icl_ua = usb3 ? USBIN_900MA : USBIN_500MA;
-	else
-		*icl_ua = usb3 ? USBIN_150MA : USBIN_100MA;
-
 	return rc;
 }
 
 int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 {
 	int rc = 0;
-	bool hc_mode = false, override = false;
+	enum icl_override_mode icl_override = HW_AUTO_MODE;
 	/* suspend if 25mA or less is requested */
 	bool suspend = (icl_ua <= USBIN_25MA);
 
@@ -1241,7 +1330,7 @@
 		if (icl_ua <= USBIN_500MA) {
 			rc = set_sdp_current(chg, icl_ua);
 			if (rc >= 0)
-				goto out;
+				goto unsuspend;
 		}
 
 		rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
@@ -1249,30 +1338,17 @@
 			smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
 			goto out;
 		}
-		hc_mode = true;
-
-		/*
-		 * Micro USB mode follows ICL register independent of override
-		 * bit, configure override only for typeC mode.
-		 */
-		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC)
-			override = true;
+		icl_override = SW_OVERRIDE_HC_MODE;
 	}
 
 set_mode:
-	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
-		USBIN_MODE_CHG_BIT, hc_mode ? USBIN_MODE_CHG_BIT : 0);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't set USBIN_ICL_OPTIONS rc=%d\n", rc);
-		goto out;
-	}
-
-	rc = smblib_icl_override(chg, override);
+	rc = smblib_icl_override(chg, icl_override);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
 		goto out;
 	}
 
+unsuspend:
 	/* unsuspend after configuring current and override */
 	rc = smblib_set_usb_suspend(chg, false);
 	if (rc < 0) {
@@ -1281,46 +1357,21 @@
 	}
 
 	/* Re-run AICL */
-	if (chg->real_charger_type != POWER_SUPPLY_TYPE_USB)
-		rc = smblib_rerun_aicl(chg);
+	if (icl_override != SW_OVERRIDE_HC_MODE)
+		rc = smblib_run_aicl(chg, RERUN_AICL);
 out:
 	return rc;
 }
 
 int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua)
 {
-	int rc = 0;
-	u8 load_cfg;
-	bool override;
+	int rc;
 
-	if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
-		|| chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
-		&& (chg->usb_psy->desc->type == POWER_SUPPLY_TYPE_USB)) {
-		rc = get_sdp_current(chg, icl_ua);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get SDP ICL rc=%d\n", rc);
-			return rc;
-		}
-	} else {
-		rc = smblib_read(chg, USBIN_LOAD_CFG_REG, &load_cfg);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get load cfg rc=%d\n", rc);
-			return rc;
-		}
-		override = load_cfg & ICL_OVERRIDE_AFTER_APSD_BIT;
-		if (!override)
-			return INT_MAX;
+	rc = smblib_get_charge_param(chg, &chg->param.icl_max_stat, icl_ua);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't get HC ICL rc=%d\n", rc);
 
-		/* override is set */
-		rc = smblib_get_charge_param(chg, &chg->param.icl_max_stat,
-					icl_ua);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get HC ICL rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	return 0;
+	return rc;
 }
 
 int smblib_toggle_smb_en(struct smb_charger *chg, int toggle)
@@ -1336,9 +1387,122 @@
 	return rc;
 }
 
+/****************************
+ * uUSB Moisture Protection *
+ ****************************/
+#define MICRO_USB_DETECTION_ON_TIME_20_MS 0x08
+#define MICRO_USB_DETECTION_PERIOD_X_100 0x03
+#define U_USB_STATUS_WATER_PRESENT 0x00
+static int smblib_set_moisture_protection(struct smb_charger *chg,
+				bool enable)
+{
+	int rc = 0;
+
+	if (chg->moisture_present == enable) {
+		smblib_dbg(chg, PR_MISC, "No change in moisture protection status\n");
+		return rc;
+	}
+
+	if (enable) {
+		chg->moisture_present = true;
+
+		/* Disable uUSB factory mode detection */
+		rc = smblib_masked_write(chg, TYPEC_U_USB_CFG_REG,
+					EN_MICRO_USB_FACTORY_MODE_BIT, 0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable uUSB factory mode detection rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Disable moisture detection and uUSB state change interrupt */
+		rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
+					TYPEC_WATER_DETECTION_INT_EN_BIT |
+					MICRO_USB_STATE_CHANGE_INT_EN_BIT, 0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable moisture detection interrupt rc=%d\n",
+			rc);
+			return rc;
+		}
+
+		/* Set 1% duty cycle on ID detection */
+		rc = smblib_masked_write(chg,
+				((chg->smb_version == PMI632_SUBTYPE) ?
+				PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+				TYPEC_U_USB_WATER_PROTECTION_CFG_REG),
+				EN_MICRO_USB_WATER_PROTECTION_BIT |
+				MICRO_USB_DETECTION_ON_TIME_CFG_MASK |
+				MICRO_USB_DETECTION_PERIOD_CFG_MASK,
+				EN_MICRO_USB_WATER_PROTECTION_BIT |
+				MICRO_USB_DETECTION_ON_TIME_20_MS |
+				MICRO_USB_DETECTION_PERIOD_X_100);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set 1 percent CC_ID duty cycle rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		vote(chg->usb_icl_votable, MOISTURE_VOTER, true, 0);
+	} else {
+		chg->moisture_present = false;
+		vote(chg->usb_icl_votable, MOISTURE_VOTER, false, 0);
+
+		/* Enable moisture detection and uUSB state change interrupt */
+		rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
+					TYPEC_WATER_DETECTION_INT_EN_BIT |
+					MICRO_USB_STATE_CHANGE_INT_EN_BIT,
+					TYPEC_WATER_DETECTION_INT_EN_BIT |
+					MICRO_USB_STATE_CHANGE_INT_EN_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable moisture detection and uUSB state change interrupt rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Disable periodic monitoring of CC_ID pin */
+		rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
+				PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+				TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable 1 percent CC_ID duty cycle rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Enable uUSB factory mode detection */
+		rc = smblib_masked_write(chg, TYPEC_U_USB_CFG_REG,
+					EN_MICRO_USB_FACTORY_MODE_BIT,
+					EN_MICRO_USB_FACTORY_MODE_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable uUSB factory mode detection rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	smblib_dbg(chg, PR_MISC, "Moisture protection %s\n",
+			chg->moisture_present ? "enabled" : "disabled");
+	return rc;
+}
+
 /*********************
  * VOTABLE CALLBACKS *
  *********************/
+static int smblib_smb_disable_override_vote_callback(struct votable *votable,
+			void *data, int disable_smb, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc = 0;
+
+	/* Enable/disable SMB_EN pin */
+	rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+			SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT,
+			disable_smb ? SMB_EN_OVERRIDE_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't configure SMB_EN, rc=%d\n", rc);
+
+	return rc;
+}
 
 static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
 			int suspend, const char *client)
@@ -1407,25 +1571,6 @@
 	return 0;
 }
 
-static int smblib_wdog_snarl_irq_en_vote_callback(struct votable *votable,
-				void *data, int enable, const char *client)
-{
-	struct smb_charger *chg = data;
-
-	if (!chg->irq_info[WDOG_SNARL_IRQ].irq)
-		return 0;
-
-	if (enable) {
-		enable_irq(chg->irq_info[WDOG_SNARL_IRQ].irq);
-		enable_irq_wake(chg->irq_info[WDOG_SNARL_IRQ].irq);
-	} else {
-		disable_irq_wake(chg->irq_info[WDOG_SNARL_IRQ].irq);
-		disable_irq_nosync(chg->irq_info[WDOG_SNARL_IRQ].irq);
-	}
-
-	return 0;
-}
-
 /*******************
  * VCONN REGULATOR *
  * *****************/
@@ -1606,7 +1751,32 @@
 	union power_supply_propval pval = {0, };
 	bool usb_online, dc_online;
 	u8 stat;
-	int rc;
+	int rc, suspend = 0;
+
+	if (chg->dbc_usbov) {
+		rc = smblib_get_prop_usb_present(chg, &pval);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't get usb present prop rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = smblib_get_usb_suspend(chg, &suspend);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't get usb suspend rc=%d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * Report charging as long as USBOV is not debounced and
+		 * charging path is un-suspended.
+		 */
+		if (pval.intval && !suspend) {
+			val->intval = POWER_SUPPLY_STATUS_CHARGING;
+			return 0;
+		}
+	}
 
 	rc = smblib_get_prop_usb_online(chg, &pval);
 	if (rc < 0) {
@@ -1670,6 +1840,16 @@
 		return 0;
 	}
 
+	/*
+	 * If charge termination WA is active and has suspended charging, then
+	 * continue reporting charging status as FULL.
+	 */
+	if (is_client_vote_enabled(chg->usb_icl_votable,
+						CHG_TERMINATION_VOTER)) {
+		val->intval = POWER_SUPPLY_STATUS_FULL;
+		return 0;
+	}
+
 	if (val->intval != POWER_SUPPLY_STATUS_CHARGING)
 		return 0;
 
@@ -1849,7 +2029,14 @@
 
 	temp = buf[1] | (buf[0] << 8);
 	temp = sign_extend32(temp, 15);
-	temp = DIV_ROUND_CLOSEST(temp * 10000, ADC_CHG_TERM_MASK);
+
+	if (chg->smb_version == PMI632_SUBTYPE)
+		temp = DIV_ROUND_CLOSEST(temp * ITERM_LIMITS_PMI632_MA,
+					ADC_CHG_ITERM_MASK);
+	else
+		temp = DIV_ROUND_CLOSEST(temp * ITERM_LIMITS_PM8150B_MA,
+					ADC_CHG_ITERM_MASK);
+
 	val->intval = temp;
 
 	return rc;
@@ -1873,6 +2060,19 @@
 	return 0;
 }
 
+int smblib_get_batt_current_now(struct smb_charger *chg,
+					union power_supply_propval *val)
+{
+	int rc;
+
+	rc = smblib_get_prop_from_bms(chg,
+			POWER_SUPPLY_PROP_CURRENT_NOW, val);
+	if (!rc)
+		val->intval *= (-1);
+
+	return rc;
+}
+
 /***********************
  * BATTERY PSY SETTERS *
  ***********************/
@@ -1978,7 +2178,7 @@
 	return rc;
 }
 
-int smblib_rerun_aicl(struct smb_charger *chg)
+int smblib_run_aicl(struct smb_charger *chg, int type)
 {
 	int rc;
 	u8 stat;
@@ -1996,8 +2196,8 @@
 
 	smblib_dbg(chg, PR_MISC, "re-running AICL\n");
 
-	rc = smblib_masked_write(chg, AICL_CMD_REG, RERUN_AICL_BIT,
-				RERUN_AICL_BIT);
+	stat = (type == RERUN_AICL) ? RERUN_AICL_BIT : RESTART_AICL_BIT;
+	rc = smblib_masked_write(chg, AICL_CMD_REG, stat, stat);
 	if (rc < 0)
 		smblib_err(chg, "Couldn't write to AICL_CMD_REG rc=%d\n",
 				rc);
@@ -2032,7 +2232,7 @@
 	return rc;
 }
 
-static int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
+int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
 {
 	int rc;
 
@@ -2109,6 +2309,7 @@
 		}
 
 		smblib_hvdcp_set_fsw(chg, stat & QC_2P0_STATUS_MASK);
+		vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0);
 	}
 
 	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
@@ -2208,8 +2409,12 @@
 			break;
 		}
 
-		if (stat & QC_5V_BIT)
+		if (stat & QC_5V_BIT) {
+			/* Force 1A ICL before requesting higher voltage */
+			vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER,
+					true, 1000000);
 			smblib_hvdcp_set_fsw(chg, QC_9V_BIT);
+		}
 
 		rc = smblib_force_vbus_voltage(chg, FORCE_9V_BIT);
 		if (rc < 0)
@@ -2229,8 +2434,12 @@
 			break;
 		}
 
-		if ((stat & QC_9V_BIT) || (stat & QC_5V_BIT))
+		if ((stat & QC_9V_BIT) || (stat & QC_5V_BIT)) {
+			/* Force 1A ICL before requesting higher voltage */
+			vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER,
+					true, 1000000);
 			smblib_hvdcp_set_fsw(chg, QC_12V_BIT);
+		}
 
 		rc = smblib_force_vbus_voltage(chg, FORCE_12V_BIT);
 		if (rc < 0)
@@ -2263,6 +2472,7 @@
 				rc);
 		return rc;
 	}
+
 	return 0;
 }
 
@@ -2287,8 +2497,6 @@
 			return rc;
 		}
 
-		vote(chg->wdog_snarl_irq_en_votable, SW_THERM_REGULATION_VOTER,
-							true, 0);
 		/*
 		 * Schedule SW_THERM_REGULATION_WORK directly if USB input
 		 * is suspended due to SW thermal regulation WA since WDOG
@@ -2301,8 +2509,6 @@
 			schedule_delayed_work(&chg->thermal_regulation_work, 0);
 		}
 	} else {
-		vote(chg->wdog_snarl_irq_en_votable, SW_THERM_REGULATION_VOTER,
-							false, 0);
 		cancel_delayed_work_sync(&chg->thermal_regulation_work);
 		vote(chg->awake_votable, SW_THERM_REGULATION_VOTER, false, 0);
 	}
@@ -2343,19 +2549,28 @@
 	}
 
 	if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) {
-		rc = smblib_read_iio_channel(chg, chg->iio.smb_temp_chan,
-					DIV_FACTOR_DECIDEGC, &chg->smb_temp);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't read SMB TEMP channel, rc=%d\n",
+		if (!chg->cp_psy)
+			chg->cp_psy =
+				power_supply_get_by_name("charge_pump_master");
+		if (chg->cp_psy) {
+			rc = power_supply_get_property(chg->cp_psy,
+				POWER_SUPPLY_PROP_CP_DIE_TEMP, &pval);
+			if (rc < 0) {
+				smblib_err(chg, "Couldn't get smb1390 charger temp, rc=%d\n",
 					rc);
-			return rc;
+				return rc;
+			}
+			chg->smb_temp = pval.intval;
+		} else {
+			smblib_dbg(chg, PR_MISC, "Coudln't find cp_psy\n");
+			chg->smb_temp = -ENODATA;
 		}
 	} else if (chg->pl.psy && chg->sec_chg_selected ==
 					POWER_SUPPLY_CHARGER_SEC_PL) {
 		rc = power_supply_get_property(chg->pl.psy,
 				POWER_SUPPLY_PROP_CHARGER_TEMP, &pval);
 		if (rc < 0) {
-			smblib_err(chg, "Couldn't get smb charger temp, rc=%d\n",
+			smblib_err(chg, "Couldn't get smb1355 charger temp, rc=%d\n",
 					rc);
 			return rc;
 		}
@@ -2467,13 +2682,8 @@
 		if (chg->thermal_status == TEMP_ALERT_LEVEL)
 			goto exit;
 
-		/* Enable/disable SMB_EN pin */
-		rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
-			SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT,
-			(disable_smb ? SMB_EN_OVERRIDE_BIT :
-			(SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT)));
-		if (rc < 0)
-			smblib_err(chg, "Couldn't set SMB_EN, rc=%d\n", rc);
+		vote(chg->smb_override_votable, SW_THERM_REGULATION_VOTER,
+				disable_smb, 0);
 
 		/*
 		 * Enable/disable secondary charger through votables to ensure
@@ -2538,6 +2748,11 @@
 	int rc;
 	u8 stat;
 
+	if (chg->smb_version == PMI632_SUBTYPE) {
+		val->intval = 0;
+		return 0;
+	}
+
 	rc = smblib_read(chg, DCIN_BASE + INT_RT_STS_OFFSET, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read DCIN_RT_STS rc=%d\n", rc);
@@ -2554,11 +2769,22 @@
 	int rc = 0;
 	u8 stat;
 
+	if (chg->smb_version == PMI632_SUBTYPE) {
+		val->intval = 0;
+		return 0;
+	}
+
 	if (get_client_vote(chg->dc_suspend_votable, USER_VOTER)) {
 		val->intval = false;
 		return rc;
 	}
 
+	if (is_client_vote_enabled(chg->dc_suspend_votable,
+						CHG_TERMINATION_VOTER)) {
+		rc = smblib_get_prop_dc_present(chg, val);
+		return rc;
+	}
+
 	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
@@ -2671,6 +2897,12 @@
 		return rc;
 	}
 
+	if (is_client_vote_enabled(chg->usb_icl_votable,
+					CHG_TERMINATION_VOTER)) {
+		rc = smblib_get_prop_usb_present(chg, val);
+		return rc;
+	}
+
 	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
@@ -2685,6 +2917,29 @@
 	return rc;
 }
 
+int smblib_get_usb_online(struct smb_charger *chg,
+			union power_supply_propval *val)
+{
+	int rc;
+
+	rc = smblib_get_prop_usb_online(chg, val);
+	if (!val->intval)
+		goto exit;
+
+	if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) ||
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
+		&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
+		val->intval = 0;
+	else
+		val->intval = 1;
+
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_UNKNOWN)
+		val->intval = 0;
+
+exit:
+	return rc;
+}
+
 int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
 				    union power_supply_propval *val)
 {
@@ -2714,36 +2969,14 @@
 	return 0;
 }
 
-static int smblib_estimate_hvdcp_voltage(struct smb_charger *chg,
-					 union power_supply_propval *val)
-{
-	int rc;
-	u8 stat;
-
-	rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read QC_CHANGE_STATUS_REG rc=%d\n",
-				rc);
-		return rc;
-	}
-
-	if (stat & QC_5V_BIT)
-		val->intval = MICRO_5V;
-	else if (stat & QC_9V_BIT)
-		val->intval = MICRO_9V;
-	else if (stat & QC_12V_BIT)
-		val->intval = MICRO_12V;
-
-	return 0;
-}
-
 #define HVDCP3_STEP_UV	200000
 static int smblib_estimate_adaptor_voltage(struct smb_charger *chg,
 					  union power_supply_propval *val)
 {
 	switch (chg->real_charger_type) {
 	case POWER_SUPPLY_TYPE_USB_HVDCP:
-		return smblib_estimate_hvdcp_voltage(chg, val);
+		val->intval = MICRO_12V;
+		break;
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 		val->intval = MICRO_5V + (HVDCP3_STEP_UV * chg->pulse_cnt);
 		break;
@@ -2931,6 +3164,11 @@
 	int rc = 0;
 	u8 stat;
 
+	val->intval = 0;
+
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+		return 0;
+
 	rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
@@ -2940,8 +3178,6 @@
 
 	if (stat & CC_ATTACHED_BIT)
 		val->intval = (bool)(stat & CC_ORIENTATION_BIT) + 1;
-	else
-		val->intval = 0;
 
 	return rc;
 }
@@ -3038,12 +3274,28 @@
 		return smblib_get_prop_ufp_mode(chg);
 }
 
+inline int smblib_get_usb_prop_typec_mode(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+		val->intval = POWER_SUPPLY_TYPEC_NONE;
+	else
+		val->intval = chg->typec_mode;
+
+	return 0;
+}
+
 int smblib_get_prop_typec_power_role(struct smb_charger *chg,
 				     union power_supply_propval *val)
 {
 	int rc = 0;
 	u8 ctrl;
 
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) {
+		val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		return 0;
+	}
+
 	rc = smblib_read(chg, TYPE_C_MODE_CFG_REG, &ctrl);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read TYPE_C_MODE_CFG_REG rc=%d\n",
@@ -3123,30 +3375,59 @@
 int smblib_get_prop_usb_current_now(struct smb_charger *chg,
 				    union power_supply_propval *val)
 {
-	int rc = 0;
+	union power_supply_propval pval = {0, };
+	int rc = 0, buck_scale = 1, boost_scale = 1;
 
 	if (chg->iio.usbin_i_chan) {
 		rc = iio_read_channel_processed(chg->iio.usbin_i_chan,
 				&val->intval);
+		if (rc < 0) {
+			pr_err("Error in reading USBIN_I channel, rc=%d\n", rc);
+			return rc;
+		}
 
 		/*
 		 * For PM8150B, scaling factor = reciprocal of
 		 * 0.2V/A in Buck mode, 0.4V/A in Boost mode.
+		 * For PMI632, scaling factor = reciprocal of
+		 * 0.4V/A in Buck mode, 0.8V/A in Boost mode.
 		 */
-		if (smblib_get_prop_ufp_mode(chg) != POWER_SUPPLY_TYPEC_NONE) {
-			val->intval *= 5;
+		switch (chg->smb_version) {
+		case PMI632_SUBTYPE:
+			buck_scale = 40;
+			boost_scale = 80;
+			break;
+		default:
+			buck_scale = 20;
+			boost_scale = 40;
+			break;
+		}
+
+		if (chg->otg_present || smblib_get_prop_dfp_mode(chg) !=
+				POWER_SUPPLY_TYPEC_NONE) {
+			val->intval = DIV_ROUND_CLOSEST(val->intval * 100,
+								boost_scale);
 			return rc;
 		}
 
-		if (smblib_get_prop_dfp_mode(chg) != POWER_SUPPLY_TYPEC_NONE) {
-			val->intval = DIV_ROUND_CLOSEST(val->intval * 100, 40);
-			return rc;
+		rc = smblib_get_prop_usb_present(chg, &pval);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get usb present status,rc=%d\n",
+				rc);
+			return -ENODATA;
 		}
+
+		/* If USB is not present, return 0 */
+		if (!pval.intval)
+			val->intval = 0;
+		else
+			val->intval = DIV_ROUND_CLOSEST(val->intval * 100,
+								buck_scale);
 	} else {
+		val->intval = 0;
 		rc = -ENODATA;
 	}
 
-	val->intval = 0;
 	return rc;
 }
 
@@ -3156,6 +3437,9 @@
 	int rc;
 	u8 stat;
 
+	if (chg->sink_src_mode != SRC_MODE)
+		return -ENODATA;
+
 	rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n",
@@ -3262,11 +3546,43 @@
 	return POWER_SUPPLY_HEALTH_COOL;
 }
 
+int smblib_get_die_health(struct smb_charger *chg,
+			union power_supply_propval *val)
+{
+	if (chg->die_health == -EINVAL)
+		val->intval = smblib_get_prop_die_health(chg);
+	else
+		val->intval = chg->die_health;
+
+	return 0;
+}
+
+int smblib_get_prop_scope(struct smb_charger *chg,
+			union power_supply_propval *val)
+{
+	int rc;
+	union power_supply_propval pval;
+
+	val->intval = POWER_SUPPLY_SCOPE_UNKNOWN;
+	rc = smblib_get_prop_usb_present(chg, &pval);
+	if (rc < 0)
+		return rc;
+
+	val->intval = pval.intval ? POWER_SUPPLY_SCOPE_DEVICE
+		: chg->otg_present ? POWER_SUPPLY_SCOPE_SYSTEM
+		: POWER_SUPPLY_SCOPE_UNKNOWN;
+
+	return 0;
+}
+
 int smblib_get_prop_connector_health(struct smb_charger *chg)
 {
 	int rc;
 	u8 stat;
 
+	if (chg->connector_health != -EINVAL)
+		return chg->connector_health;
+
 	if (chg->wa_flags & SW_THERM_REGULATION_WA) {
 		if (chg->connector_temp == -ENODATA)
 			return POWER_SUPPLY_HEALTH_UNKNOWN;
@@ -3302,13 +3618,6 @@
 	return POWER_SUPPLY_HEALTH_COOL;
 }
 
-#define SDP_CURRENT_UA			500000
-#define CDP_CURRENT_UA			1500000
-#define DCP_CURRENT_UA			1500000
-#define HVDCP_CURRENT_UA		3000000
-#define TYPEC_DEFAULT_CURRENT_UA	900000
-#define TYPEC_MEDIUM_CURRENT_UA		1500000
-#define TYPEC_HIGH_CURRENT_UA		3000000
 static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
 {
 	int rp_ua;
@@ -3348,6 +3657,7 @@
 					int usb_current)
 {
 	int rc = 0, rp_ua, typec_mode;
+	union power_supply_propval val = {0, };
 
 	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
 		if (usb_current == -ETIMEDOUT) {
@@ -3402,8 +3712,16 @@
 				return rc;
 		}
 	} else {
-		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
-					true, usb_current);
+		rc = smblib_get_prop_usb_present(chg, &val);
+		if (!rc && !val.intval)
+			return 0;
+
+		/* if flash is active force 500mA */
+		if ((usb_current < SDP_CURRENT_UA) && is_flash_active(chg))
+			usb_current = SDP_CURRENT_UA;
+
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, true,
+							usb_current);
 		if (rc < 0) {
 			pr_err("Couldn't vote ICL USB_PSY_VOTER rc=%d\n", rc);
 			return rc;
@@ -3579,6 +3897,8 @@
 int smblib_set_prop_pd_active(struct smb_charger *chg,
 				const union power_supply_propval *val)
 {
+	const struct apsd_result *apsd = smblib_get_apsd_result(chg);
+
 	int rc = 0;
 	int sec_charger;
 
@@ -3586,6 +3906,8 @@
 
 	smblib_apsd_enable(chg, !chg->pd_active);
 
+	update_sw_icl_max(chg, apsd->pst);
+
 	if (chg->pd_active) {
 		vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
 
@@ -3612,7 +3934,6 @@
 					rc);
 		}
 	} else {
-		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
 		vote(chg->usb_icl_votable, PD_VOTER, false, 0);
 		vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
 
@@ -3673,51 +3994,155 @@
 	return rc;
 }
 
-static int smblib_recover_from_soft_jeita(struct smb_charger *chg)
+#define JEITA_SOFT			0
+#define JEITA_HARD			1
+static int smblib_update_jeita(struct smb_charger *chg, u32 *thresholds,
+								int type)
 {
-	u8 stat1, stat7;
 	int rc;
+	u16 temp, base;
 
-	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat1);
+	base = CHGR_JEITA_THRESHOLD_BASE_REG(type);
+
+	temp = thresholds[1] & 0xFFFF;
+	temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
+	rc = smblib_batch_write(chg, base, (u8 *)&temp, 2);
 	if (rc < 0) {
-		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
-				rc);
+		smblib_err(chg,
+			"Couldn't configure Jeita %s hot threshold rc=%d\n",
+			(type == JEITA_SOFT) ? "Soft" : "Hard", rc);
 		return rc;
 	}
 
-	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_7_REG, &stat7);
+	temp = thresholds[0] & 0xFFFF;
+	temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
+	rc = smblib_batch_write(chg, base + 2, (u8 *)&temp, 2);
 	if (rc < 0) {
-		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
-				rc);
+		smblib_err(chg,
+			"Couldn't configure Jeita %s cold threshold rc=%d\n",
+			(type == JEITA_SOFT) ? "Soft" : "Hard", rc);
 		return rc;
 	}
 
-	if ((chg->jeita_status && !(stat7 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK) &&
-		((stat1 & BATTERY_CHARGER_STATUS_MASK) == TERMINATE_CHARGE))) {
-		/*
-		 * We are moving from JEITA soft -> Normal and charging
-		 * is terminated
-		 */
-		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG, 0);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't disable charging rc=%d\n",
-						rc);
-			return rc;
-		}
-		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG,
-						CHARGING_ENABLE_CMD_BIT);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't enable charging rc=%d\n",
-						rc);
-			return rc;
-		}
-	}
-
-	chg->jeita_status = stat7 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK;
+	smblib_dbg(chg, PR_MISC, "%s Jeita threshold configured\n",
+				(type == JEITA_SOFT) ? "Soft" : "Hard");
 
 	return 0;
 }
 
+static int smblib_charge_inhibit_en(struct smb_charger *chg, bool enable)
+{
+	int rc;
+
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+					CHARGER_INHIBIT_BIT,
+					enable ? CHARGER_INHIBIT_BIT : 0);
+	return rc;
+}
+
+static int smblib_soft_jeita_arb_wa(struct smb_charger *chg)
+{
+	union power_supply_propval pval;
+	int rc = 0;
+	bool soft_jeita;
+
+	rc = smblib_get_prop_batt_health(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get battery health rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Do nothing on entering hard JEITA condition */
+	if (pval.intval == POWER_SUPPLY_HEALTH_COLD ||
+		pval.intval == POWER_SUPPLY_HEALTH_HOT)
+		return 0;
+
+	if (chg->jeita_soft_fcc[0] < 0 || chg->jeita_soft_fcc[1] < 0 ||
+		chg->jeita_soft_fv[0] < 0 || chg->jeita_soft_fv[1] < 0)
+		return 0;
+
+	soft_jeita = (pval.intval == POWER_SUPPLY_HEALTH_COOL) ||
+			(pval.intval == POWER_SUPPLY_HEALTH_WARM);
+
+	/* Do nothing on entering soft JEITA from hard JEITA */
+	if (chg->jeita_arb_flag && soft_jeita)
+		return 0;
+
+	/* Do nothing, initial to health condition */
+	if (!chg->jeita_arb_flag && !soft_jeita)
+		return 0;
+
+	if (!chg->cp_disable_votable)
+		chg->cp_disable_votable = find_votable("CP_DISABLE");
+
+	/* Entering soft JEITA from normal state */
+	if (!chg->jeita_arb_flag && soft_jeita) {
+		vote(chg->chg_disable_votable, JEITA_ARB_VOTER, true, 0);
+		/* Disable parallel charging */
+		if (chg->pl_disable_votable)
+			vote(chg->pl_disable_votable, JEITA_ARB_VOTER, true, 0);
+		if (chg->cp_disable_votable)
+			vote(chg->cp_disable_votable, JEITA_ARB_VOTER, true, 0);
+
+		rc = smblib_charge_inhibit_en(chg, true);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable charge inhibit rc=%d\n",
+					rc);
+
+		rc = smblib_update_jeita(chg, chg->jeita_soft_hys_thlds,
+					JEITA_SOFT);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't configure Jeita soft threshold rc=%d\n",
+				rc);
+
+		if (pval.intval == POWER_SUPPLY_HEALTH_COOL) {
+			vote(chg->fcc_votable, JEITA_ARB_VOTER, true,
+						chg->jeita_soft_fcc[0]);
+			vote(chg->fv_votable, JEITA_ARB_VOTER, true,
+						chg->jeita_soft_fv[0]);
+		} else {
+			vote(chg->fcc_votable, JEITA_ARB_VOTER, true,
+						chg->jeita_soft_fcc[1]);
+			vote(chg->fv_votable, JEITA_ARB_VOTER, true,
+						chg->jeita_soft_fv[1]);
+		}
+
+		vote(chg->chg_disable_votable, JEITA_ARB_VOTER, false, 0);
+		chg->jeita_arb_flag = true;
+	} else if (chg->jeita_arb_flag && !soft_jeita) {
+		/* Exit to health state from soft JEITA */
+
+		vote(chg->chg_disable_votable, JEITA_ARB_VOTER, true, 0);
+
+		rc = smblib_charge_inhibit_en(chg, false);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't disable charge inhibit rc=%d\n",
+					rc);
+
+		rc = smblib_update_jeita(chg, chg->jeita_soft_thlds,
+							JEITA_SOFT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't configure Jeita soft threshold rc=%d\n",
+				rc);
+
+		vote(chg->fcc_votable, JEITA_ARB_VOTER, false, 0);
+		vote(chg->fv_votable, JEITA_ARB_VOTER, false, 0);
+		if (chg->pl_disable_votable)
+			vote(chg->pl_disable_votable, JEITA_ARB_VOTER, false,
+				0);
+		if (chg->cp_disable_votable)
+			vote(chg->cp_disable_votable, JEITA_ARB_VOTER, false,
+				0);
+		vote(chg->chg_disable_votable, JEITA_ARB_VOTER, false, 0);
+		chg->jeita_arb_flag = false;
+	}
+
+	smblib_dbg(chg, PR_MISC, "JEITA ARB status %d, soft JEITA status %d\n",
+			chg->jeita_arb_flag, soft_jeita);
+	return rc;
+}
+
 /************************
  * USB MAIN PSY GETTERS *
  ************************/
@@ -3781,7 +4206,7 @@
 		return 0;
 	}
 
-	if (non_compliant) {
+	if (non_compliant && !chg->typec_legacy_use_rp_icl) {
 		switch (apsd_result->bit) {
 		case CDP_CHARGER_BIT:
 			current_ua = CDP_CURRENT_UA;
@@ -3846,6 +4271,38 @@
 	return IRQ_HANDLED;
 }
 
+#define CHG_TERM_WA_ENTRY_DELAY_MS		300000		/* 5 min */
+#define CHG_TERM_WA_EXIT_DELAY_MS		60000		/* 1 min */
+static void smblib_eval_chg_termination(struct smb_charger *chg, u8 batt_status)
+{
+	union power_supply_propval pval = {0, };
+	int rc = 0;
+
+	rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CAPACITY, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read SOC value, rc=%d\n", rc);
+		return;
+	}
+
+	/*
+	 * Post charge termination, switch to BSM mode triggers the risk of
+	 * over charging as BATFET opening may take some time post the necessity
+	 * of staying in supplemental mode, leading to unintended charging of
+	 * battery. Trigger the charge termination WA once charging is completed
+	 * to prevent overcharing.
+	 */
+	if ((batt_status == TERMINATE_CHARGE) && (pval.intval == 100)) {
+		alarm_start_relative(&chg->chg_termination_alarm,
+				ms_to_ktime(CHG_TERM_WA_ENTRY_DELAY_MS));
+	} else if (pval.intval < 100) {
+		/*
+		 * Reset CC_SOC reference value for charge termination WA once
+		 * we exit the TERMINATE_CHARGE state and soc drops below 100%
+		 */
+		chg->cc_soc_ref = 0;
+	}
+}
+
 irqreturn_t chg_state_change_irq_handler(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
@@ -3863,6 +4320,10 @@
 	}
 
 	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+
+	if (chg->wa_flags & CHG_TERMINATION_WA)
+		smblib_eval_chg_termination(chg, stat);
+
 	power_supply_changed(chg->batt_psy);
 	return IRQ_HANDLED;
 }
@@ -3873,15 +4334,18 @@
 	struct smb_charger *chg = irq_data->parent_data;
 	int rc;
 
-	rc = smblib_recover_from_soft_jeita(chg);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (chg->jeita_configured != JEITA_CFG_COMPLETE)
+		return IRQ_HANDLED;
+
+	rc = smblib_soft_jeita_arb_wa(chg);
 	if (rc < 0) {
-		smblib_err(chg, "Couldn't recover chg from soft jeita rc=%d\n",
+		smblib_err(chg, "Couldn't fix soft jeita arb rc=%d\n",
 				rc);
 		return IRQ_HANDLED;
 	}
 
-	rerun_election(chg->fcc_votable);
-	power_supply_changed(chg->batt_psy);
 	return IRQ_HANDLED;
 }
 
@@ -3895,6 +4359,8 @@
 	return IRQ_HANDLED;
 }
 
+#define AICL_STEP_MV		200
+#define MAX_AICL_THRESHOLD_MV	4800
 irqreturn_t usbin_uv_irq_handler(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
@@ -3905,6 +4371,70 @@
 	u8 stat = 0, max_pulses = 0;
 
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if ((chg->wa_flags & WEAK_ADAPTER_WA)
+			&& is_storming(&irq_data->storm_data)) {
+
+		if (chg->aicl_max_reached) {
+			smblib_dbg(chg, PR_MISC,
+					"USBIN_UV storm at max AICL threshold\n");
+			return IRQ_HANDLED;
+		}
+
+		smblib_dbg(chg, PR_MISC, "USBIN_UV storm at threshold %d\n",
+				chg->aicl_5v_threshold_mv);
+
+		/* suspend USBIN before updating AICL threshold */
+		vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER, true, 0);
+
+		/* delay for VASHDN deglitch */
+		msleep(20);
+
+		if (chg->aicl_5v_threshold_mv > MAX_AICL_THRESHOLD_MV) {
+			/* reached max AICL threshold */
+			chg->aicl_max_reached = true;
+			goto unsuspend_input;
+		}
+
+		/* Increase AICL threshold by 200mV */
+		rc = smblib_set_charge_param(chg, &chg->param.aicl_5v_threshold,
+				chg->aicl_5v_threshold_mv + AICL_STEP_MV);
+		if (rc < 0)
+			dev_err(chg->dev,
+				"Error in setting AICL threshold rc=%d\n", rc);
+		else
+			chg->aicl_5v_threshold_mv += AICL_STEP_MV;
+
+		rc = smblib_set_charge_param(chg,
+				&chg->param.aicl_cont_threshold,
+				chg->aicl_cont_threshold_mv + AICL_STEP_MV);
+		if (rc < 0)
+			dev_err(chg->dev,
+				"Error in setting AICL threshold rc=%d\n", rc);
+		else
+			chg->aicl_cont_threshold_mv += AICL_STEP_MV;
+
+unsuspend_input:
+		/* Force torch in boost mode to ensure it works with low ICL */
+		if (chg->smb_version == PMI632_SUBTYPE)
+			schgm_flash_torch_priority(chg, TORCH_BOOST_MODE);
+
+		if (chg->aicl_max_reached) {
+			smblib_dbg(chg, PR_MISC,
+				"Reached max AICL threshold resctricting ICL to 100mA\n");
+			vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER,
+					true, USBIN_100MA);
+			smblib_run_aicl(chg, RESTART_AICL);
+		} else {
+			smblib_run_aicl(chg, RESTART_AICL);
+			vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER,
+					false, 0);
+		}
+
+		wdata = &chg->irq_info[USBIN_UV_IRQ].irq_data->storm_data;
+		reset_storm_count(wdata);
+	}
+
 	if (!chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data)
 		return IRQ_HANDLED;
 
@@ -3973,6 +4503,24 @@
 	struct smb_charger *chg = irq_data->parent_data;
 
 	if (chg->mode == PARALLEL_MASTER) {
+		/*
+		 * Ignore if change in ICL is due to DIE temp mitigation.
+		 * This is to prevent any further ICL split.
+		 */
+		if (chg->hw_die_temp_mitigation) {
+			rc = smblib_read(chg, DIE_TEMP_STATUS_REG, &stat);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't read DIE_TEMP rc=%d\n", rc);
+				return IRQ_HANDLED;
+			}
+			if (stat & (DIE_TEMP_UB_BIT | DIE_TEMP_LB_BIT)) {
+				smblib_dbg(chg, PR_PARALLEL,
+					"skip ICL change DIE_TEMP %x\n", stat);
+				return IRQ_HANDLED;
+			}
+		}
+
 		rc = smblib_read(chg, AICL_STATUS_REG, &stat);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
@@ -4116,6 +4664,33 @@
 			vote(chg->fcc_votable, FCC_STEPPER_VOTER,
 							true, 1500000);
 
+		if (chg->wa_flags & WEAK_ADAPTER_WA) {
+			chg->aicl_5v_threshold_mv =
+					chg->default_aicl_5v_threshold_mv;
+			chg->aicl_cont_threshold_mv =
+					chg->default_aicl_cont_threshold_mv;
+
+			smblib_set_charge_param(chg,
+					&chg->param.aicl_5v_threshold,
+					chg->aicl_5v_threshold_mv);
+			smblib_set_charge_param(chg,
+					&chg->param.aicl_cont_threshold,
+					chg->aicl_cont_threshold_mv);
+			chg->aicl_max_reached = false;
+
+			if (chg->smb_version == PMI632_SUBTYPE)
+				schgm_flash_torch_priority(chg,
+						TORCH_BUCK_MODE);
+
+			data = chg->irq_info[USBIN_UV_IRQ].irq_data;
+			if (data) {
+				wdata = &data->storm_data;
+				reset_storm_count(wdata);
+			}
+			vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER,
+					false, 0);
+		}
+
 		rc = smblib_request_dpdm(chg, false);
 		if (rc < 0)
 			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
@@ -4218,10 +4793,8 @@
 
 static void update_sw_icl_max(struct smb_charger *chg, int pst)
 {
-	union power_supply_propval pval;
 	int typec_mode;
 	int rp_ua;
-	int rc;
 
 	/* while PD is active it should have complete ICL control */
 	if (chg->pd_active)
@@ -4250,9 +4823,12 @@
 		 * enumeration is done.
 		 */
 		if (!is_client_vote_enabled(chg->usb_icl_votable,
-								USB_PSY_VOTER))
+						USB_PSY_VOTER)) {
+			/* if flash is active force 500mA */
 			vote(chg->usb_icl_votable, USB_PSY_VOTER, true,
-					SDP_100_MA);
+					is_flash_active(chg) ?
+					SDP_CURRENT_UA : SDP_100_MA);
+		}
 		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
 		break;
 	case POWER_SUPPLY_TYPE_USB_CDP:
@@ -4273,15 +4849,8 @@
 		break;
 	case POWER_SUPPLY_TYPE_UNKNOWN:
 	default:
-		rc = smblib_get_prop_usb_present(chg, &pval);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get usb present rc = %d\n",
-					rc);
-			return;
-		}
-
 		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
-				pval.intval ? SDP_CURRENT_UA : SDP_100_MA);
+					SDP_100_MA);
 		break;
 	}
 }
@@ -4331,7 +4900,7 @@
 		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
 		return IRQ_HANDLED;
 	}
-	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+	smblib_dbg(chg, PR_INTERRUPT, "APSD_STATUS = 0x%02x\n", stat);
 
 	if ((chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 		&& (stat & APSD_DTC_STATUS_DONE_BIT)
@@ -4373,7 +4942,7 @@
 		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
 		return IRQ_HANDLED;
 	}
-	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+	smblib_dbg(chg, PR_INTERRUPT, "APSD_STATUS = 0x%02x\n", stat);
 
 	return IRQ_HANDLED;
 }
@@ -4419,6 +4988,9 @@
 	u8 stat;
 	int rc;
 
+	if (chg->lpd_disabled)
+		return false;
+
 	rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n",
@@ -4458,7 +5030,13 @@
 
 static void typec_sink_insertion(struct smb_charger *chg)
 {
+	int rc;
+
 	vote(chg->usb_icl_votable, OTG_VOTER, true, 0);
+	rc = smblib_set_charge_param(chg, &chg->param.freq_switcher,
+					chg->chg_freq.freq_above_otg_threshold);
+	if (rc < 0)
+		dev_err(chg->dev, "Error in setting freq_boost rc=%d\n", rc);
 
 	if (chg->use_extcon) {
 		smblib_notify_usb_host(chg, true);
@@ -4496,7 +5074,13 @@
 
 static void typec_sink_removal(struct smb_charger *chg)
 {
+	int rc;
+
 	vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
+	rc = smblib_set_charge_param(chg, &chg->param.freq_switcher,
+					chg->chg_freq.freq_removal);
+	if (rc < 0)
+		dev_err(chg->dev, "Error in setting freq_removal rc=%d\n", rc);
 
 	if (chg->use_extcon) {
 		if (chg->otg_present)
@@ -4537,14 +5121,20 @@
 
 	cancel_delayed_work_sync(&chg->pl_enable_work);
 
+	if (chg->wa_flags & CHG_TERMINATION_WA)
+		alarm_cancel(&chg->chg_termination_alarm);
+
 	/* reset input current limit voters */
-	vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
+	vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+			is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
 	vote(chg->usb_icl_votable, PD_VOTER, false, 0);
 	vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
 	vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
 	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
 	vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
 	vote(chg->usb_icl_votable, CTM_VOTER, false, 0);
+	vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0);
+	vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
 
 	/* reset usb irq voters */
 	vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
@@ -4565,6 +5155,11 @@
 		vote(chg->cp_disable_votable, SW_THERM_REGULATION_VOTER,
 								false, 0);
 
+	/* reset USBOV votes and cancel work */
+	cancel_delayed_work_sync(&chg->usbov_dbc_work);
+	vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+	chg->dbc_usbov = false;
+
 	chg->pulse_cnt = 0;
 	chg->usb_icl_delta_ua = 0;
 	chg->voltage_min_uv = MICRO_5V;
@@ -4642,32 +5237,19 @@
 						chg->typec_mode, typec_mode);
 }
 
-irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data)
+static void smblib_lpd_launch_ra_open_work(struct smb_charger *chg)
 {
-	struct smb_irq_data *irq_data = data;
-	struct smb_charger *chg = irq_data->parent_data;
 	u8 stat;
 	int rc;
 
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
-	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) {
-		cancel_delayed_work_sync(&chg->uusb_otg_work);
-		vote(chg->awake_votable, OTG_DELAY_VOTER, true, 0);
-		smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n");
-		schedule_delayed_work(&chg->uusb_otg_work,
-				msecs_to_jiffies(chg->otg_delay_ms));
-		goto out;
-	}
-
-	if (chg->pr_swap_in_progress || chg->pd_hard_reset)
-		goto out;
+	if (chg->lpd_disabled)
+		return;
 
 	rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n",
 			rc);
-		goto out;
+		return;
 	}
 
 	if (!(stat & TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT)
@@ -4678,6 +5260,44 @@
 		schedule_delayed_work(&chg->lpd_ra_open_work,
 						msecs_to_jiffies(300));
 	}
+}
+
+irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) {
+		if (chg->uusb_moisture_protection_enabled) {
+			/*
+			 * Adding pm_stay_awake as because pm_relax is called
+			 * on exit path from the work routine.
+			 */
+			pm_stay_awake(chg->dev);
+			schedule_work(&chg->moisture_protection_work);
+		}
+
+		cancel_delayed_work_sync(&chg->uusb_otg_work);
+		/*
+		 * Skip OTG enablement if RID interrupt triggers with moisture
+		 * protection still enabled.
+		 */
+		if (!chg->moisture_present) {
+			vote(chg->awake_votable, OTG_DELAY_VOTER, true, 0);
+			smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n");
+			schedule_delayed_work(&chg->uusb_otg_work,
+				msecs_to_jiffies(chg->otg_delay_ms));
+		}
+
+		goto out;
+	}
+
+	if (chg->pr_swap_in_progress || chg->pd_hard_reset)
+		goto out;
+
+	smblib_lpd_launch_ra_open_work(chg);
 
 	if (chg->usb_psy)
 		power_supply_changed(chg->usb_psy);
@@ -4712,6 +5332,16 @@
 	return IRQ_HANDLED;
 }
 
+static void smblib_lpd_clear_ra_open_work(struct smb_charger *chg)
+{
+	if (chg->lpd_disabled)
+		return;
+
+	cancel_delayed_work_sync(&chg->lpd_detach_work);
+	chg->lpd_stage = LPD_STAGE_FLOAT_CANCEL;
+	cancel_delayed_work_sync(&chg->lpd_ra_open_work);
+	vote(chg->awake_votable, LPD_VOTER, false, 0);
+}
 
 irqreturn_t typec_attach_detach_irq_handler(int irq, void *data)
 {
@@ -4730,9 +5360,8 @@
 	}
 
 	if (stat & TYPEC_ATTACH_DETACH_STATE_BIT) {
-		chg->lpd_stage = LPD_STAGE_FLOAT_CANCEL;
-		cancel_delayed_work_sync(&chg->lpd_ra_open_work);
-		vote(chg->awake_votable, LPD_VOTER, false, 0);
+
+		smblib_lpd_clear_ra_open_work(chg);
 
 		rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
 		if (rc < 0) {
@@ -4774,7 +5403,7 @@
 
 		if (chg->lpd_stage == LPD_STAGE_FLOAT_CANCEL)
 			schedule_delayed_work(&chg->lpd_detach_work,
-					msecs_to_jiffies(100));
+					msecs_to_jiffies(1000));
 	}
 
 	power_supply_changed(chg->usb_psy);
@@ -4959,12 +5588,94 @@
 	if (rc < 0)
 		smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
 
-	if (chg->step_chg_enabled || chg->sw_jeita_enabled)
+	if (chg->step_chg_enabled)
 		power_supply_changed(chg->batt_psy);
 
 	return IRQ_HANDLED;
 }
 
+static void smblib_die_rst_icl_regulate(struct smb_charger *chg)
+{
+	int rc;
+	u8 temp;
+
+	rc = smblib_read(chg, DIE_TEMP_STATUS_REG, &temp);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read DIE_TEMP_STATUS_REG rc=%d\n",
+				rc);
+		return;
+	}
+
+	/* Regulate ICL on die temp crossing DIE_RST threshold */
+	vote(chg->usb_icl_votable, DIE_TEMP_VOTER,
+				temp & DIE_TEMP_RST_BIT, 500000);
+}
+
+/*
+ * triggered when DIE or SKIN or CONNECTOR temperature across
+ * either of the _REG_L, _REG_H, _RST, or _SHDN thresholds
+ */
+irqreturn_t temp_change_irq_handler(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_die_rst_icl_regulate(chg);
+
+	return IRQ_HANDLED;
+}
+
+static void smblib_usbov_dbc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						usbov_dbc_work.work);
+
+	smblib_dbg(chg, PR_MISC, "Resetting USBOV debounce\n");
+	chg->dbc_usbov = false;
+	vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+}
+
+#define USB_OV_DBC_PERIOD_MS		1000
+irqreturn_t usbin_ov_irq_handler(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	u8 stat;
+	int rc;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (!(chg->wa_flags & USBIN_OV_WA))
+		return IRQ_HANDLED;
+
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	/*
+	 * On specific PMICs, OV IRQ triggers for very small duration in
+	 * interim periods affecting charging status reflection. In order to
+	 * differentiate between OV IRQ glitch and real OV_IRQ, add a debounce
+	 * period for evaluation.
+	 */
+	if (stat & USBIN_OV_RT_STS_BIT) {
+		chg->dbc_usbov = true;
+		vote(chg->awake_votable, USBOV_DBC_VOTER, true, 0);
+		schedule_delayed_work(&chg->usbov_dbc_work,
+				msecs_to_jiffies(USB_OV_DBC_PERIOD_MS));
+	} else {
+		cancel_delayed_work_sync(&chg->usbov_dbc_work);
+		chg->dbc_usbov = false;
+		vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+	}
+
+	smblib_dbg(chg, PR_MISC, "USBOV debounce status %d\n",
+				chg->dbc_usbov);
+	return IRQ_HANDLED;
+}
+
 /**************
  * Additional USB PSY getters/setters
  * that call interrupt functions
@@ -5173,40 +5884,190 @@
 					rc);
 }
 
-#define JEITA_SOFT			0
-#define JEITA_HARD			1
-static int smblib_update_jeita(struct smb_charger *chg, u32 *thresholds,
-								int type)
+#define MOISTURE_PROTECTION_CHECK_DELAY_MS 300000		/* 5 mins */
+static void smblib_moisture_protection_work(struct work_struct *work)
 {
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						moisture_protection_work);
 	int rc;
-	u16 temp, base;
+	bool usb_plugged_in;
+	u8 stat;
 
-	base = CHGR_JEITA_THRESHOLD_BASE_REG(type);
+	/*
+	 * Hold awake votable to prevent pm_relax being called prior to
+	 * completion of this work.
+	 */
+	vote(chg->awake_votable, MOISTURE_VOTER, true, 0);
 
-	temp = thresholds[1] & 0xFFFF;
-	temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
-	rc = smblib_batch_write(chg, base, (u8 *)&temp, 2);
+	/*
+	 * Disable 1% duty cycle on CC_ID pin and enable uUSB factory mode
+	 * detection to track any change on RID, as interrupts are disable.
+	 */
+	rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
+			PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+			TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
 	if (rc < 0) {
-		smblib_err(chg,
-			"Couldn't configure Jeita %s hot threshold rc=%d\n",
-			(type == JEITA_SOFT) ? "Soft" : "Hard", rc);
-		return rc;
+		smblib_err(chg, "Couldn't disable periodic monitoring of CC_ID rc=%d\n",
+			rc);
+		goto out;
 	}
 
-	temp = thresholds[0] & 0xFFFF;
-	temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
-	rc = smblib_batch_write(chg, base + 2, (u8 *)&temp, 2);
+	rc = smblib_masked_write(chg, TYPEC_U_USB_CFG_REG,
+					EN_MICRO_USB_FACTORY_MODE_BIT,
+					EN_MICRO_USB_FACTORY_MODE_BIT);
 	if (rc < 0) {
-		smblib_err(chg,
-			"Couldn't configure Jeita %s cold threshold rc=%d\n",
-			(type == JEITA_SOFT) ? "Soft" : "Hard", rc);
-		return rc;
+		smblib_err(chg, "Couldn't enable uUSB factory mode detection rc=%d\n",
+			rc);
+		goto out;
 	}
 
-	smblib_dbg(chg, PR_MISC, "%s Jeita threshold configured\n",
-				(type == JEITA_SOFT) ? "Soft" : "Hard");
+	/*
+	 * Add a delay of 100ms to allow change in rid to reflect on
+	 * status registers.
+	 */
+	msleep(100);
 
-	return 0;
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+		goto out;
+	}
+	usb_plugged_in = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+
+	/* Check uUSB status for moisture presence */
+	rc = smblib_read(chg, TYPEC_U_USB_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_U_USB_STATUS_REG rc=%d\n",
+				rc);
+		goto out;
+	}
+
+	/*
+	 * Factory mode detection happens in case of USB plugged-in by using
+	 * a different current source of 2uA which can hamper moisture
+	 * detection. Since factory mode is not supported in kernel, factory
+	 * mode detection can be considered as equivalent to presence of
+	 * moisture.
+	 */
+	if (stat == U_USB_STATUS_WATER_PRESENT || stat == U_USB_FMB1_BIT ||
+			stat == U_USB_FMB2_BIT || (usb_plugged_in &&
+			stat == U_USB_FLOAT1_BIT)) {
+		smblib_set_moisture_protection(chg, true);
+		alarm_start_relative(&chg->moisture_protection_alarm,
+			ms_to_ktime(MOISTURE_PROTECTION_CHECK_DELAY_MS));
+	} else {
+		smblib_set_moisture_protection(chg, false);
+		rc = alarm_cancel(&chg->moisture_protection_alarm);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't cancel moisture protection alarm\n");
+	}
+
+out:
+	vote(chg->awake_votable, MOISTURE_VOTER, false, 0);
+}
+
+static enum alarmtimer_restart moisture_protection_alarm_cb(struct alarm *alarm,
+							ktime_t now)
+{
+	struct smb_charger *chg = container_of(alarm, struct smb_charger,
+					moisture_protection_alarm);
+
+	smblib_dbg(chg, PR_MISC, "moisture Protection Alarm Triggered %lld\n",
+			ktime_to_ms(now));
+
+	/* Atomic context, cannot use voter */
+	pm_stay_awake(chg->dev);
+	schedule_work(&chg->moisture_protection_work);
+
+	return ALARMTIMER_NORESTART;
+}
+
+static void smblib_chg_termination_work(struct work_struct *work)
+{
+	union power_supply_propval pval;
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						chg_termination_work);
+	int rc, input_present, delay = CHG_TERM_WA_ENTRY_DELAY_MS;
+
+	/*
+	 * Hold awake votable to prevent pm_relax being called prior to
+	 * completion of this work.
+	 */
+	vote(chg->awake_votable, CHG_TERMINATION_VOTER, true, 0);
+
+	rc = smblib_is_input_present(chg, &input_present);
+	if ((rc < 0) || !input_present)
+		goto out;
+
+	rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CAPACITY, &pval);
+	if ((rc < 0) || (pval.intval < 100)) {
+		vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
+		goto out;
+	}
+
+	rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CHARGE_FULL,
+					&pval);
+	if (rc < 0)
+		goto out;
+
+	/*
+	 * On change in the value of learned capacity, re-initialize the
+	 * reference cc_soc value due to change in cc_soc characteristic value
+	 * at full capacity. Also, in case cc_soc_ref value is reset,
+	 * re-initialize it.
+	 */
+	if (pval.intval != chg->charge_full_cc || !chg->cc_soc_ref) {
+		chg->charge_full_cc = pval.intval;
+		rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CC_SOC,
+					&pval);
+		if (rc < 0)
+			goto out;
+
+		chg->cc_soc_ref = pval.intval;
+	} else {
+		rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CC_SOC,
+					&pval);
+		if (rc < 0)
+			goto out;
+	}
+
+	/*
+	 * Suspend/Unsuspend USB input to keep cc_soc within the 0.5% to 0.75%
+	 * overshoot range of the cc_soc value at termination, to prevent
+	 * overcharging.
+	 */
+	if (pval.intval < DIV_ROUND_CLOSEST(chg->cc_soc_ref * 10050, 10000)) {
+		vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
+		vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, false, 0);
+		delay = CHG_TERM_WA_ENTRY_DELAY_MS;
+	} else if (pval.intval > DIV_ROUND_CLOSEST(chg->cc_soc_ref * 10075,
+								10000)) {
+		vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, true, 0);
+		vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, true, 0);
+		delay = CHG_TERM_WA_EXIT_DELAY_MS;
+	}
+
+	smblib_dbg(chg, PR_MISC, "Chg Term WA readings: cc_soc: %d, cc_soc_ref: %d, delay: %d\n",
+			pval.intval, chg->cc_soc_ref, delay);
+	alarm_start_relative(&chg->chg_termination_alarm, ms_to_ktime(delay));
+out:
+	vote(chg->awake_votable, CHG_TERMINATION_VOTER, false, 0);
+}
+
+static enum alarmtimer_restart chg_termination_alarm_cb(struct alarm *alarm,
+								ktime_t now)
+{
+	struct smb_charger *chg = container_of(alarm, struct smb_charger,
+							chg_termination_alarm);
+
+	smblib_dbg(chg, PR_MISC, "Charge termination WA alarm triggered %lld\n",
+			ktime_to_ms(now));
+
+	/* Atomic context, cannot use voter */
+	pm_stay_awake(chg->dev);
+	schedule_work(&chg->chg_termination_work);
+
+	return ALARMTIMER_NORESTART;
 }
 
 static void jeita_update_work(struct work_struct *work)
@@ -5216,8 +6077,8 @@
 	struct device_node *node = chg->dev->of_node;
 	struct device_node *batt_node, *pnode;
 	union power_supply_propval val;
-	int rc;
-	u32 jeita_thresholds[2];
+	int rc, tmp[2], max_fcc_ma, max_fv_uv;
+	u32 jeita_hard_thresholds[2];
 
 	batt_node = of_find_node_by_name(node, "qcom,battery-data");
 	if (!batt_node) {
@@ -5250,9 +6111,10 @@
 	}
 
 	rc = of_property_read_u32_array(pnode, "qcom,jeita-hard-thresholds",
-				jeita_thresholds, 2);
+				jeita_hard_thresholds, 2);
 	if (!rc) {
-		rc = smblib_update_jeita(chg, jeita_thresholds, JEITA_HARD);
+		rc = smblib_update_jeita(chg, jeita_hard_thresholds,
+					JEITA_HARD);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't configure Hard Jeita rc=%d\n",
 					rc);
@@ -5261,18 +6123,83 @@
 	}
 
 	rc = of_property_read_u32_array(pnode, "qcom,jeita-soft-thresholds",
-				jeita_thresholds, 2);
+				chg->jeita_soft_thlds, 2);
 	if (!rc) {
-		rc = smblib_update_jeita(chg, jeita_thresholds, JEITA_SOFT);
+		rc = smblib_update_jeita(chg, chg->jeita_soft_thlds,
+					JEITA_SOFT);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't configure Soft Jeita rc=%d\n",
 					rc);
 			goto out;
 		}
+
+		rc = of_property_read_u32_array(pnode,
+					"qcom,jeita-soft-hys-thresholds",
+					chg->jeita_soft_hys_thlds, 2);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get Soft Jeita hysteresis thresholds rc=%d\n",
+					rc);
+			goto out;
+		}
 	}
 
+	chg->jeita_soft_fcc[0] = chg->jeita_soft_fcc[1] = -EINVAL;
+	chg->jeita_soft_fv[0] = chg->jeita_soft_fv[1] = -EINVAL;
+	max_fcc_ma = max_fv_uv = -EINVAL;
+
+	of_property_read_u32(pnode, "qcom,fastchg-current-ma", &max_fcc_ma);
+	of_property_read_u32(pnode, "qcom,max-voltage-uv", &max_fv_uv);
+
+	if (max_fcc_ma <= 0 || max_fv_uv <= 0) {
+		smblib_err(chg, "Incorrect fastchg-current-ma or max-voltage-uv\n");
+		goto out;
+	}
+
+	rc = of_property_read_u32_array(pnode, "qcom,jeita-soft-fcc-ua",
+					tmp, 2);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get fcc values for soft JEITA rc=%d\n",
+				rc);
+		goto out;
+	}
+
+	max_fcc_ma *= 1000;
+	if (tmp[0] > max_fcc_ma || tmp[1] > max_fcc_ma) {
+		smblib_err(chg, "Incorrect FCC value [%d %d] max: %d\n", tmp[0],
+			tmp[1], max_fcc_ma);
+		goto out;
+	}
+	chg->jeita_soft_fcc[0] = tmp[0];
+	chg->jeita_soft_fcc[1] = tmp[1];
+
+	rc = of_property_read_u32_array(pnode, "qcom,jeita-soft-fv-uv", tmp,
+					2);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get fv values for soft JEITA rc=%d\n",
+				rc);
+		goto out;
+	}
+
+	if (tmp[0] > max_fv_uv || tmp[1] > max_fv_uv) {
+		smblib_err(chg, "Incorrect FV value [%d %d] max: %d\n", tmp[0],
+			tmp[1], max_fv_uv);
+		goto out;
+	}
+	chg->jeita_soft_fv[0] = tmp[0];
+	chg->jeita_soft_fv[1] = tmp[1];
+
+	rc = smblib_soft_jeita_arb_wa(chg);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't fix soft jeita arb rc=%d\n",
+				rc);
+		goto out;
+	}
+
+	chg->jeita_configured = JEITA_CFG_COMPLETE;
+	return;
+
 out:
-	chg->jeita_configured = true;
+	chg->jeita_configured = JEITA_CFG_FAILURE;
 }
 
 static void smblib_lpd_ra_open_work(struct work_struct *work)
@@ -5316,16 +6243,9 @@
 		goto out;
 	}
 
-	/* Wait 1.5ms to read src status */
+	/* Wait 1.5ms to get SBUx ready */
 	usleep_range(1500, 1510);
 
-	rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n",
-				rc);
-		goto out;
-	}
-
 	if (smblib_rsbux_low(chg, RSBU_K_300K_UV)) {
 		/* Moisture detected, enable sink only mode */
 		pval.intval = POWER_SUPPLY_TYPEC_PR_SINK;
@@ -5418,6 +6338,15 @@
 
 	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
 
+	chg->smb_override_votable = create_votable("SMB_EN_OVERRIDE",
+				VOTE_SET_ANY,
+				smblib_smb_disable_override_vote_callback, chg);
+	if (IS_ERR(chg->smb_override_votable)) {
+		rc = PTR_ERR(chg->smb_override_votable);
+		chg->smb_override_votable = NULL;
+		return rc;
+	}
+
 	chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
 					smblib_dc_suspend_vote_callback,
 					chg);
@@ -5455,16 +6384,6 @@
 		return rc;
 	}
 
-	chg->wdog_snarl_irq_en_votable = create_votable("SNARL_WDOG_IRQ_ENABLE",
-					VOTE_SET_ANY,
-					smblib_wdog_snarl_irq_en_vote_callback,
-					chg);
-	if (IS_ERR(chg->wdog_snarl_irq_en_votable)) {
-		rc = PTR_ERR(chg->wdog_snarl_irq_en_votable);
-		chg->wdog_snarl_irq_en_votable = NULL;
-		return rc;
-	}
-
 	return rc;
 }
 
@@ -5492,6 +6411,14 @@
 		iio_channel_release(chg->iio.sbux_chan);
 	if (!IS_ERR_OR_NULL(chg->iio.vph_v_chan))
 		iio_channel_release(chg->iio.vph_v_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.die_temp_chan))
+		iio_channel_release(chg->iio.die_temp_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.connector_temp_chan))
+		iio_channel_release(chg->iio.connector_temp_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.skin_temp_chan))
+		iio_channel_release(chg->iio.skin_temp_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.smb_temp_chan))
+		iio_channel_release(chg->iio.smb_temp_chan);
 }
 
 int smblib_init(struct smb_charger *chg)
@@ -5512,6 +6439,34 @@
 	INIT_DELAYED_WORK(&chg->lpd_detach_work, smblib_lpd_detach_work);
 	INIT_DELAYED_WORK(&chg->thermal_regulation_work,
 					smblib_thermal_regulation_work);
+	INIT_DELAYED_WORK(&chg->usbov_dbc_work, smblib_usbov_dbc_work);
+
+	if (chg->wa_flags & CHG_TERMINATION_WA) {
+		INIT_WORK(&chg->chg_termination_work,
+					smblib_chg_termination_work);
+
+		if (alarmtimer_get_rtcdev()) {
+			alarm_init(&chg->chg_termination_alarm, ALARM_BOOTTIME,
+						chg_termination_alarm_cb);
+		} else {
+			smblib_err(chg, "Couldn't get rtc device\n");
+			return -ENODEV;
+		}
+	}
+
+	if (chg->uusb_moisture_protection_enabled) {
+		INIT_WORK(&chg->moisture_protection_work,
+					smblib_moisture_protection_work);
+
+		if (alarmtimer_get_rtcdev()) {
+			alarm_init(&chg->moisture_protection_alarm,
+				ALARM_BOOTTIME, moisture_protection_alarm_cb);
+		} else {
+			smblib_err(chg, "Failed to initialize moisture protection alarm\n");
+			return -ENODEV;
+		}
+	}
+
 	chg->fake_capacity = -EINVAL;
 	chg->fake_input_current_limited = -EINVAL;
 	chg->fake_batt_status = -EINVAL;
@@ -5519,6 +6474,7 @@
 	chg->jeita_configured = false;
 	chg->sec_chg_selected = POWER_SUPPLY_CHARGER_SEC_NONE;
 	chg->cp_reason = POWER_SUPPLY_CP_NONE;
+	chg->thermal_status = TEMP_BELOW_RANGE;
 
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
@@ -5530,7 +6486,7 @@
 		}
 
 		rc = qcom_step_chg_init(chg->dev, chg->step_chg_enabled,
-						chg->sw_jeita_enabled);
+						chg->sw_jeita_enabled, false);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
 				rc);
@@ -5603,6 +6559,14 @@
 {
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
+		if (chg->uusb_moisture_protection_enabled) {
+			alarm_cancel(&chg->moisture_protection_alarm);
+			cancel_work_sync(&chg->moisture_protection_work);
+		}
+		if (chg->wa_flags & CHG_TERMINATION_WA) {
+			alarm_cancel(&chg->chg_termination_alarm);
+			cancel_work_sync(&chg->chg_termination_work);
+		}
 		cancel_work_sync(&chg->bms_update_work);
 		cancel_work_sync(&chg->jeita_update_work);
 		cancel_work_sync(&chg->pl_update_work);
@@ -5614,6 +6578,7 @@
 		cancel_delayed_work_sync(&chg->lpd_ra_open_work);
 		cancel_delayed_work_sync(&chg->lpd_detach_work);
 		cancel_delayed_work_sync(&chg->thermal_regulation_work);
+		cancel_delayed_work_sync(&chg->usbov_dbc_work);
 		power_supply_unreg_notifier(&chg->nb);
 		smblib_destroy_votables(chg);
 		qcom_step_chg_deinit();
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 2c28443..154554a 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SMB5_CHARGER_H
@@ -35,6 +35,7 @@
 #define CHG_STATE_VOTER			"CHG_STATE_VOTER"
 #define TAPER_END_VOTER			"TAPER_END_VOTER"
 #define THERMAL_DAEMON_VOTER		"THERMAL_DAEMON_VOTER"
+#define DIE_TEMP_VOTER			"DIE_TEMP_VOTER"
 #define BOOST_BACK_VOTER		"BOOST_BACK_VOTER"
 #define MICRO_USB_VOTER			"MICRO_USB_VOTER"
 #define DEBUG_BOARD_VOTER		"DEBUG_BOARD_VOTER"
@@ -59,13 +60,30 @@
 #define LPD_VOTER			"LPD_VOTER"
 #define FCC_STEPPER_VOTER		"FCC_STEPPER_VOTER"
 #define SW_THERM_REGULATION_VOTER	"SW_THERM_REGULATION_VOTER"
+#define JEITA_ARB_VOTER			"JEITA_ARB_VOTER"
+#define MOISTURE_VOTER			"MOISTURE_VOTER"
+#define HVDCP2_ICL_VOTER		"HVDCP2_ICL_VOTER"
+#define AICL_THRESHOLD_VOTER		"AICL_THRESHOLD_VOTER"
+#define USBOV_DBC_VOTER			"USBOV_DBC_VOTER"
+#define CHG_TERMINATION_VOTER		"CHG_TERMINATION_VOTER"
 
 #define BOOST_BACK_STORM_COUNT	3
 #define WEAK_CHG_STORM_COUNT	8
 
 #define VBAT_TO_VRAW_ADC(v)		div_u64((u64)v * 1000000UL, 194637UL)
 
-#define ADC_CHG_TERM_MASK	32767
+#define ITERM_LIMITS_PMI632_MA		5000
+#define ITERM_LIMITS_PM8150B_MA		10000
+#define ADC_CHG_ITERM_MASK		32767
+
+#define SDP_100_MA			100000
+#define SDP_CURRENT_UA			500000
+#define CDP_CURRENT_UA			1500000
+#define DCP_CURRENT_UA			1500000
+#define HVDCP_CURRENT_UA		3000000
+#define TYPEC_DEFAULT_CURRENT_UA	900000
+#define TYPEC_MEDIUM_CURRENT_UA		1500000
+#define TYPEC_HIGH_CURRENT_UA		3000000
 
 enum smb_mode {
 	PARALLEL_MASTER = 0,
@@ -88,6 +106,20 @@
 enum {
 	BOOST_BACK_WA			= BIT(0),
 	SW_THERM_REGULATION_WA		= BIT(1),
+	WEAK_ADAPTER_WA			= BIT(2),
+	USBIN_OV_WA			= BIT(3),
+	CHG_TERMINATION_WA		= BIT(4),
+};
+
+enum jeita_cfg_stat {
+	JEITA_CFG_NONE = 0,
+	JEITA_CFG_FAILURE,
+	JEITA_CFG_COMPLETE,
+};
+
+enum {
+	RERUN_AICL = 0,
+	RESTART_AICL,
 };
 
 enum smb_irq_index {
@@ -221,6 +253,15 @@
 	TEMP_BELOW_RANGE,
 };
 
+enum icl_override_mode {
+	/* APSD/Type-C/QC auto */
+	HW_AUTO_MODE,
+	/* 100/150/500/900mA */
+	SW_OVERRIDE_USB51_MODE,
+	/* ICL other than USB51 */
+	SW_OVERRIDE_HC_MODE,
+};
+
 /* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */
 static const u32 smblib_extcon_exclusive[] = {0x3, 0};
 
@@ -274,6 +315,8 @@
 	struct smb_chg_param	jeita_cc_comp_hot;
 	struct smb_chg_param	jeita_cc_comp_cold;
 	struct smb_chg_param	freq_switcher;
+	struct smb_chg_param	aicl_5v_threshold;
+	struct smb_chg_param	aicl_cont_threshold;
 };
 
 struct parallel_params {
@@ -322,6 +365,7 @@
 	struct power_supply		*usb_main_psy;
 	struct power_supply		*usb_port_psy;
 	struct power_supply		*wls_psy;
+	struct power_supply		*cp_psy;
 	enum power_supply_type		real_charger_type;
 
 	/* notifiers */
@@ -346,12 +390,14 @@
 	struct votable		*pl_enable_votable_indirect;
 	struct votable		*usb_irq_enable_votable;
 	struct votable		*cp_disable_votable;
-	struct votable		*wdog_snarl_irq_en_votable;
+	struct votable		*smb_override_votable;
 
 	/* work */
 	struct work_struct	bms_update_work;
 	struct work_struct	pl_update_work;
 	struct work_struct	jeita_update_work;
+	struct work_struct	moisture_protection_work;
+	struct work_struct	chg_termination_work;
 	struct delayed_work	ps_change_timeout_work;
 	struct delayed_work	clear_hdc_work;
 	struct delayed_work	icl_change_work;
@@ -361,8 +407,11 @@
 	struct delayed_work	lpd_ra_open_work;
 	struct delayed_work	lpd_detach_work;
 	struct delayed_work	thermal_regulation_work;
+	struct delayed_work	usbov_dbc_work;
 
 	struct alarm		lpd_recheck_timer;
+	struct alarm		moisture_protection_alarm;
+	struct alarm		chg_termination_alarm;
 
 	/* secondary charger config */
 	bool			sec_pl_present;
@@ -391,6 +440,7 @@
 	int			fake_batt_status;
 	bool			step_chg_enabled;
 	bool			sw_jeita_enabled;
+	bool			typec_legacy_use_rp_icl;
 	bool			is_hdc;
 	bool			chg_done;
 	int			connector_type;
@@ -405,17 +455,19 @@
 	int			usb_icl_change_irq_enabled;
 	u32			jeita_status;
 	u8			float_cfg;
+	bool			jeita_arb_flag;
 	bool			use_extcon;
 	bool			otg_present;
 	bool			hvdcp_disable;
 	int			hw_max_icl_ua;
 	int			auto_recharge_soc;
 	enum sink_src_mode	sink_src_mode;
-	bool			jeita_configured;
+	enum jeita_cfg_stat	jeita_configured;
 	int			charger_temp_max;
 	int			smb_temp_max;
 	u8			typec_try_mode;
 	enum lpd_stage		lpd_stage;
+	bool			lpd_disabled;
 	enum lpd_reason		lpd_reason;
 	bool			fcc_stepper_enable;
 	int			die_temp;
@@ -424,12 +476,31 @@
 	int			connector_temp;
 	int			thermal_status;
 	int			main_fcc_max;
+	u32			jeita_soft_thlds[2];
+	u32			jeita_soft_hys_thlds[2];
+	int			jeita_soft_fcc[2];
+	int			jeita_soft_fv[2];
+	bool			moisture_present;
+	bool			uusb_moisture_protection_capable;
+	bool			uusb_moisture_protection_enabled;
+	bool			hw_die_temp_mitigation;
+	bool			hw_connector_mitigation;
+	bool			hw_skin_temp_mitigation;
+	int			connector_pull_up;
+	int			aicl_5v_threshold_mv;
+	int			default_aicl_5v_threshold_mv;
+	int			aicl_cont_threshold_mv;
+	int			default_aicl_cont_threshold_mv;
+	bool			aicl_max_reached;
+	int			charge_full_cc;
+	int			cc_soc_ref;
 
 	/* workaround flag */
 	u32			wa_flags;
 	int			boost_current_ua;
 	int                     qc2_max_pulses;
 	enum qc2_non_comp_voltage qc2_unsupported_voltage;
+	bool			dbc_usbov;
 
 	/* extcon for VBUS / ID notification to USB for uUSB */
 	struct extcon_dev	*extcon;
@@ -464,7 +535,7 @@
 int smblib_get_charge_param(struct smb_charger *chg,
 			    struct smb_chg_param *param, int *val_u);
 int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend);
-
+int smblib_get_aicl_cont_threshold(struct smb_chg_param *param, u8 val_raw);
 int smblib_enable_charging(struct smb_charger *chg, bool enable);
 int smblib_set_charge_param(struct smb_charger *chg,
 			    struct smb_chg_param *param, int val_u);
@@ -481,6 +552,8 @@
 				int val_u, u8 *val_raw);
 int smblib_set_prop_boost_current(struct smb_charger *chg,
 				const union power_supply_propval *val);
+int smblib_set_aicl_cont_threshold(struct smb_chg_param *param,
+				int val_u, u8 *val_raw);
 int smblib_vbus_regulator_enable(struct regulator_dev *rdev);
 int smblib_vbus_regulator_disable(struct regulator_dev *rdev);
 int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev);
@@ -505,6 +578,8 @@
 irqreturn_t wdog_snarl_irq_handler(int irq, void *data);
 irqreturn_t wdog_bark_irq_handler(int irq, void *data);
 irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data);
+irqreturn_t temp_change_irq_handler(int irq, void *data);
+irqreturn_t usbin_ov_irq_handler(int irq, void *data);
 
 int smblib_get_prop_input_suspend(struct smb_charger *chg,
 				union power_supply_propval *val);
@@ -518,6 +593,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_batt_current_now(struct smb_charger *chg,
+					union power_supply_propval *val);
 int smblib_get_prop_batt_health(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_system_temp_level(struct smb_charger *chg,
@@ -557,6 +634,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_usb_online(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_usb_online(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_usb_suspend(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
@@ -567,8 +646,12 @@
 				union power_supply_propval *val);
 int smblib_get_prop_usb_current_now(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_usb_prop_typec_mode(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_scope(struct smb_charger *chg,
+			union power_supply_propval *val);
 int smblib_get_prop_typec_select_rp(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_typec_power_role(struct smb_charger *chg,
@@ -584,6 +667,8 @@
 int smblib_get_prop_charger_temp(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_die_health(struct smb_charger *chg);
+int smblib_get_die_health(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_connector_health(struct smb_charger *chg);
 int smblib_set_prop_pd_current_max(struct smb_charger *chg,
 				const union power_supply_propval *val);
@@ -612,7 +697,7 @@
 int smblib_get_thermal_threshold(struct smb_charger *chg, u16 addr, int *val);
 int smblib_dp_dm(struct smb_charger *chg, int val);
 int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable);
-int smblib_rerun_aicl(struct smb_charger *chg);
+int smblib_run_aicl(struct smb_charger *chg, int type);
 int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
 int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
 int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
@@ -628,12 +713,13 @@
 int smblib_read_iio_channel(struct smb_charger *chg, struct iio_channel *chan,
 							int div, int *data);
 int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable);
-int smblib_icl_override(struct smb_charger *chg, bool override);
+int smblib_icl_override(struct smb_charger *chg, enum icl_override_mode mode);
 enum alarmtimer_restart smblib_lpd_recheck_timer(struct alarm *alarm,
 				ktime_t time);
 int smblib_toggle_smb_en(struct smb_charger *chg, int toggle);
 void smblib_hvdcp_detect_enable(struct smb_charger *chg, bool enable);
 void smblib_apsd_enable(struct smb_charger *chg, bool enable);
+int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
index 1cd9c74..af361fb 100644
--- a/drivers/power/supply/qcom/smb5-reg.h
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SMB5_CHARGER_REG_H
@@ -50,6 +50,7 @@
 #define BAT_TEMP_STATUS_SOFT_LIMIT_MASK		GENMASK(5, 4)
 #define BAT_TEMP_STATUS_HOT_SOFT_BIT		BIT(5)
 #define BAT_TEMP_STATUS_COLD_SOFT_BIT		BIT(4)
+#define BAT_TEMP_STATUS_HARD_LIMIT_MASK		GENMASK(3, 2)
 #define BAT_TEMP_STATUS_TOO_HOT_BIT		BIT(3)
 #define BAT_TEMP_STATUS_TOO_COLD_BIT		BIT(2)
 #define BAT_TEMP_STATUS_TOO_HOT_AFP_BIT		BIT(1)
@@ -103,6 +104,7 @@
 #define JEITA_CCCOMP_CFG_COLD_REG		(CHGR_BASE + 0x93)
 
 #define CHGR_JEITA_THRESHOLD_BASE_REG(i)	(CHGR_BASE + 0x94 + (i * 4))
+#define CHGR_JEITA_HOT_THRESHOLD_MSB_REG	CHGR_JEITA_THRESHOLD_BASE_REG(0)
 
 #define CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG	(CHGR_BASE + 0xA2)
 #define FAST_CHARGE_SAFETY_TIMER_192_MIN	0x0
@@ -117,7 +119,7 @@
  *  DCDC Peripheral Registers  *
  ********************************/
 #define ICL_MAX_STATUS_REG			(DCDC_BASE + 0x06)
-
+#define ICL_STATUS_REG				(DCDC_BASE + 0x07)
 #define AICL_ICL_STATUS_REG			(DCDC_BASE + 0x08)
 
 #define AICL_STATUS_REG				(DCDC_BASE + 0x0A)
@@ -128,6 +130,7 @@
 #define USBIN_SUSPEND_STS_BIT			BIT(6)
 #define USE_USBIN_BIT				BIT(4)
 #define USE_DCIN_BIT				BIT(3)
+#define POWER_PATH_MASK				GENMASK(2, 1)
 #define VALID_INPUT_POWER_SOURCE_STS_BIT	BIT(0)
 
 #define DCDC_CMD_OTG_REG			(DCDC_BASE + 0x40)
@@ -168,6 +171,19 @@
 #define SHIP_MODE_REG				(BATIF_BASE + 0x40)
 #define SHIP_MODE_EN_BIT			BIT(0)
 
+#define BATIF_ADC_CHANNEL_EN_REG		(BATIF_BASE + 0x82)
+#define CONN_THM_CHANNEL_EN_BIT			BIT(4)
+#define DIE_TEMP_CHANNEL_EN_BIT			BIT(2)
+#define MISC_THM_CHANNEL_EN_BIT			BIT(1)
+
+#define BATIF_ADC_INTERNAL_PULL_UP_REG		(BATIF_BASE + 0x86)
+#define INTERNAL_PULL_UP_CONN_THM_MASK		GENMASK(5, 4)
+#define CONN_THM_SHIFT				4
+#define INTERNAL_PULL_NO_PULL			0x00
+#define INTERNAL_PULL_30K_PULL			0x01
+#define INTERNAL_PULL_100K_PULL			0x02
+#define INTERNAL_PULL_400K_PULL			0x03
+
 /********************************
  *  USBIN Peripheral Registers  *
  ********************************/
@@ -214,10 +230,14 @@
 #define CMD_APSD_REG				(USBIN_BASE + 0x41)
 #define APSD_RERUN_BIT				BIT(0)
 
+#define CMD_ICL_OVERRIDE_REG			(USBIN_BASE + 0x42)
+#define ICL_OVERRIDE_BIT			BIT(0)
+
 #define CMD_HVDCP_2_REG				(USBIN_BASE + 0x43)
 #define FORCE_12V_BIT				BIT(5)
 #define FORCE_9V_BIT				BIT(4)
 #define FORCE_5V_BIT				BIT(3)
+#define IDLE_BIT				BIT(2)
 #define SINGLE_DECREMENT_BIT			BIT(1)
 #define SINGLE_INCREMENT_BIT			BIT(0)
 
@@ -282,6 +302,8 @@
 #define USB_ENG_SSUPPLY_USB2_REG		(USBIN_BASE + 0xC0)
 #define ENG_SSUPPLY_12V_OV_OPT_BIT		BIT(1)
 
+#define USBIN_5V_AICL_THRESHOLD_REG		(USBIN_BASE + 0x81)
+#define USBIN_CONT_AICL_THRESHOLD_REG		(USBIN_BASE + 0x84)
 /********************************
  *  DCIN Peripheral Registers   *
  ********************************/
@@ -292,6 +314,9 @@
 #define DCIN_CMD_IL_REG				(DCIN_BASE + 0x40)
 #define DCIN_SUSPEND_BIT			BIT(0)
 
+#define DCIN_LOAD_CFG_REG			(DCIN_BASE + 0x65)
+#define INPUT_MISS_POLL_EN_BIT			BIT(5)
+
 /********************************
  *  TYPEC Peripheral Registers  *
  ********************************/
@@ -329,6 +354,10 @@
 #define TYPEC_U_USB_STATUS_REG			(TYPEC_BASE + 0x0F)
 #define U_USB_GROUND_NOVBUS_BIT			BIT(6)
 #define U_USB_GROUND_BIT			BIT(4)
+#define U_USB_FMB1_BIT				BIT(3)
+#define U_USB_FLOAT1_BIT			BIT(2)
+#define U_USB_FMB2_BIT				BIT(1)
+#define U_USB_FLOAT2_BIT			BIT(0)
 
 #define TYPE_C_MODE_CFG_REG			(TYPEC_BASE + 0x44)
 #define TYPEC_TRY_MODE_MASK			GENMASK(4, 3)
@@ -349,6 +378,9 @@
 #define TYPEC_CCOUT_VALUE_BIT			BIT(1)
 #define TYPEC_CCOUT_SRC_BIT			BIT(0)
 
+#define DEBUG_ACCESS_SRC_CFG_REG		(TYPEC_BASE + 0x4C)
+#define EN_UNORIENTED_DEBUG_ACCESS_SRC_BIT	BIT(0)
+
 #define TYPE_C_CRUDE_SENSOR_CFG_REG		(TYPEC_BASE + 0x4e)
 #define EN_SRC_CRUDE_SENSOR_BIT			BIT(1)
 #define EN_SNK_CRUDE_SENSOR_BIT			BIT(0)
@@ -394,9 +426,16 @@
 #define SEL_SBU2_ISRC_VAL			0x01
 
 #define TYPEC_U_USB_CFG_REG			(TYPEC_BASE + 0x70)
+#define EN_MICRO_USB_FACTORY_MODE_BIT		BIT(1)
 #define EN_MICRO_USB_MODE_BIT			BIT(0)
 
-#define TYPEC_MICRO_USB_MODE_REG		(TYPEC_BASE + 0x73)
+#define PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG	(TYPEC_BASE + 0x72)
+#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG		(TYPEC_BASE + 0x73)
+#define EN_MICRO_USB_WATER_PROTECTION_BIT		BIT(4)
+#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK		GENMASK(3, 2)
+#define MICRO_USB_DETECTION_PERIOD_CFG_MASK		GENMASK(1, 0)
+
+#define PMI632_TYPEC_MICRO_USB_MODE_REG		(TYPEC_BASE + 0x73)
 #define MICRO_USB_MODE_ONLY_BIT			BIT(0)
 /********************************
  *  MISC Peripheral Registers  *
@@ -427,6 +466,7 @@
 #define BARK_BITE_WDOG_PET_BIT			BIT(0)
 
 #define AICL_CMD_REG				(MISC_BASE + 0x44)
+#define RESTART_AICL_BIT			BIT(1)
 #define RERUN_AICL_BIT				BIT(0)
 
 #define MISC_SMB_EN_CMD_REG			(MISC_BASE + 0x48)
@@ -457,7 +497,13 @@
 #define AICL_RERUN_TIME_12S_VAL			0x01
 
 #define MISC_THERMREG_SRC_CFG_REG		(MISC_BASE + 0x70)
+#define THERMREG_SW_ICL_ADJUST_BIT		BIT(7)
+#define DIE_ADC_SEL_BIT				BIT(6)
 #define THERMREG_SMB_ADC_SRC_EN_BIT		BIT(5)
+#define THERMREG_CONNECTOR_ADC_SRC_EN_BIT	BIT(4)
+#define SKIN_ADC_CFG_BIT			BIT(3)
+#define THERMREG_SKIN_ADC_SRC_EN_BIT		BIT(2)
+#define THERMREG_DIE_ADC_SRC_EN_BIT		BIT(1)
 #define THERMREG_DIE_CMP_SRC_EN_BIT		BIT(0)
 
 #define MISC_SMB_CFG_REG			(MISC_BASE + 0x90)
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index 23f835e..82e7eb8 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "QCOM-STEPCHG: %s: " fmt, __func__
@@ -44,6 +44,7 @@
 	ktime_t			jeita_last_update_time;
 	bool			step_chg_enable;
 	bool			sw_jeita_enable;
+	bool			jeita_arb_en;
 	bool			config_is_read;
 	bool			step_chg_cfg_valid;
 	bool			sw_jeita_cfg_valid;
@@ -595,7 +596,7 @@
 	 * Suspend USB input path if battery voltage is above
 	 * JEITA VFLOAT threshold.
 	 */
-	if (fv_uv > 0) {
+	if (chip->jeita_arb_en && fv_uv > 0) {
 		rc = power_supply_get_property(chip->batt_psy,
 				POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval);
 		if (!rc && (pval.intval > fv_uv))
@@ -747,7 +748,7 @@
 }
 
 int qcom_step_chg_init(struct device *dev,
-		bool step_chg_enable, bool sw_jeita_enable)
+		bool step_chg_enable, bool sw_jeita_enable, bool jeita_arb_en)
 {
 	int rc;
 	struct step_chg_info *chip;
@@ -768,6 +769,7 @@
 	chip->dev = dev;
 	chip->step_chg_enable = step_chg_enable;
 	chip->sw_jeita_enable = sw_jeita_enable;
+	chip->jeita_arb_en = jeita_arb_en;
 	chip->step_index = -EINVAL;
 	chip->jeita_fcc_index = -EINVAL;
 	chip->jeita_fv_index = -EINVAL;
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
index f5431b6..9936e31 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.h
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __STEP_CHG_H__
@@ -22,7 +22,7 @@
 };
 
 int qcom_step_chg_init(struct device *dev,
-		bool step_chg_enable, bool sw_jeita_enable);
+		bool step_chg_enable, bool sw_jeita_enable, bool jeita_arb_en);
 void qcom_step_chg_deinit(void);
 int read_range_data_from_node(struct device_node *node,
 		const char *prop_str, struct range_data *ranges,
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 2012551..796eeff 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -228,7 +228,9 @@
 			pct->sec = ts.tv_sec;
 			pct->nsec = ts.tv_nsec;
 			pct++;
-			ptp->info->gettime64(ptp->info, &ts);
+			err = ptp->info->gettime64(ptp->info, &ts);
+			if (err)
+				goto out;
 			pct->sec = ts.tv_sec;
 			pct->nsec = ts.tv_nsec;
 			pct++;
@@ -281,6 +283,7 @@
 		break;
 	}
 
+out:
 	kfree(sysoff);
 	return err;
 }
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 7eacc1c..c64903a 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -253,8 +253,10 @@
 	ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
 					     ptp, ptp->pin_attr_groups,
 					     "ptp%d", ptp->index);
-	if (IS_ERR(ptp->dev))
+	if (IS_ERR(ptp->dev)) {
+		err = PTR_ERR(ptp->dev);
 		goto no_device;
+	}
 
 	/* Register a new PPS source. */
 	if (info->pps) {
@@ -265,6 +267,7 @@
 		pps.owner = info->owner;
 		ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
 		if (!ptp->pps_source) {
+			err = -EINVAL;
 			pr_err("failed to register pps source\n");
 			goto no_pps;
 		}
diff --git a/drivers/regulator/qpnp-amoled-regulator.c b/drivers/regulator/qpnp-amoled-regulator.c
index 69ffeff..c9c6b2d 100644
--- a/drivers/regulator/qpnp-amoled-regulator.c
+++ b/drivers/regulator/qpnp-amoled-regulator.c
@@ -35,16 +35,24 @@
 /* AB */
 #define AB_STATUS1(chip)		(chip->ab_base + 0x08)
 #define AB_LDO_SW_DBG_CTL(chip)		(chip->ab_base + 0x72)
-
-/* IBB */
-#define IBB_PS_CTL(chip)		(chip->ibb_base + 0x50)
-#define IBB_NLIMIT_DAC(chip)		(chip->ibb_base + 0x61)
-#define IBB_SMART_PS_CTL(chip)		(chip->ibb_base + 0x65)
+#define AB_LDO_PD_CTL(chip)		(chip->ab_base + 0x78)
 
 /* AB_STATUS1 */
 #define VREG_OK_BIT			BIT(6)
 #define VREG_OK_SHIFT			6
 
+/* AB_LDO_PD_CTL */
+#define PULLDN_EN_BIT			BIT(7)
+
+/* IBB */
+#define IBB_PD_CTL(chip)		(chip->ibb_base + 0x47)
+#define IBB_PS_CTL(chip)		(chip->ibb_base + 0x50)
+#define IBB_NLIMIT_DAC(chip)		(chip->ibb_base + 0x61)
+#define IBB_SMART_PS_CTL(chip)		(chip->ibb_base + 0x65)
+
+/* IBB_PD_CTL */
+#define ENABLE_PD_BIT			BIT(7)
+
 struct amoled_regulator {
 	struct regulator_desc	rdesc;
 	struct regulator_dev	*rdev;
@@ -65,6 +73,7 @@
 
 	/* DT params */
 	bool			swire_control;
+	bool			pd_control;
 };
 
 struct ibb_regulator {
@@ -72,6 +81,7 @@
 
 	/* DT params */
 	bool			swire_control;
+	bool			pd_control;
 };
 
 struct qpnp_amoled {
@@ -120,7 +130,7 @@
 	return rc;
 }
 
-int qpnp_amoled_masked_write(struct qpnp_amoled *chip,
+static int qpnp_amoled_masked_write(struct qpnp_amoled *chip,
 				u16 addr, u8 mask, u8 value)
 {
 	int rc = 0;
@@ -208,13 +218,28 @@
 	return 0;
 }
 
+static int qpnp_ab_pd_control(struct qpnp_amoled *chip, bool en)
+{
+	u8 val = en ? PULLDN_EN_BIT : 0;
+
+	return qpnp_amoled_write(chip, AB_LDO_PD_CTL(chip), &val, 1);
+}
+
 #define AB_VREG_OK_POLL_TRIES		50
+#define AB_VREG_OK_POLL_TIME_US		2000
+#define AB_VREG_OK_POLL_HIGH_TRIES	8
+#define AB_VREG_OK_POLL_HIGH_TIME_US	10000
+#define AB_VREG_OK_POLL_AGAIN_TRIES	10
+
 static int qpnp_ab_poll_vreg_ok(struct qpnp_amoled *chip, bool status)
 {
-	u32 i = AB_VREG_OK_POLL_TRIES, poll_us = 2000;
+	u32 i = AB_VREG_OK_POLL_TRIES, poll_us = AB_VREG_OK_POLL_TIME_US;
+	bool swire_high = false, poll_again = false, monitor = false;
+	u32 wait_time_us = 0;
 	int rc;
 	u8 val;
 
+loop:
 	while (i--) {
 		/* Write a dummy value before reading AB_STATUS1 */
 		rc = qpnp_amoled_write(chip, AB_STATUS1(chip), &val, 1);
@@ -225,19 +250,82 @@
 		if (rc < 0)
 			return rc;
 
+		wait_time_us += poll_us;
 		if (((val & VREG_OK_BIT) >> VREG_OK_SHIFT) == status) {
-			pr_debug("Waited for %d us\n",
-				(AB_VREG_OK_POLL_TRIES - i) * poll_us);
-			return 0;
+			pr_debug("Waited for %d us\n", wait_time_us);
+
+			/*
+			 * Return if we're polling for VREG_OK low. Else, poll
+			 * for VREG_OK high for at least 80 ms. IF VREG_OK stays
+			 * high, then consider it as a valid SWIRE pulse.
+			 */
+
+			if (status) {
+				swire_high = true;
+				if (!poll_again && !monitor) {
+					pr_debug("SWIRE is high, start monitoring\n");
+					i = AB_VREG_OK_POLL_HIGH_TRIES;
+					poll_us = AB_VREG_OK_POLL_HIGH_TIME_US;
+					wait_time_us = 0;
+					monitor = true;
+				}
+
+				if (poll_again)
+					poll_again = false;
+			} else {
+				return 0;
+			}
+		} else {
+			/*
+			 * If we're here when polling for VREG_OK high, then it
+			 * is possibly because of an intermittent SWIRE pulse.
+			 * Ignore it and poll for valid SWIRE pulse again.
+			 */
+			if (status && swire_high && monitor) {
+				pr_debug("SWIRE is low\n");
+				poll_again = true;
+				swire_high = false;
+				break;
+			}
+
+			if (poll_again)
+				poll_again = false;
 		}
 
 		usleep_range(poll_us, poll_us + 1);
 	}
 
+	/*
+	 * If poll_again is set, then VREG_OK should be polled for another
+	 * 100 ms for valid SWIRE signal.
+	 */
+
+	if (poll_again) {
+		pr_debug("polling again for SWIRE\n");
+		i = AB_VREG_OK_POLL_AGAIN_TRIES;
+		poll_us = AB_VREG_OK_POLL_HIGH_TIME_US;
+		wait_time_us = 0;
+		goto loop;
+	}
+
+	/* If swire_high is set, then it's a valid SWIRE signal, return 0. */
+	if (swire_high) {
+		pr_debug("SWIRE is high\n");
+		return 0;
+	}
+
 	pr_err("AB_STATUS1: %x poll for VREG_OK %d timed out\n", val, status);
 	return -ETIMEDOUT;
 }
 
+static int qpnp_ibb_pd_control(struct qpnp_amoled *chip, bool en)
+{
+	u8 val = en ? ENABLE_PD_BIT : 0;
+
+	return qpnp_amoled_masked_write(chip, IBB_PD_CTL(chip), ENABLE_PD_BIT,
+					val);
+}
+
 static int qpnp_ibb_aod_config(struct qpnp_amoled *chip, bool aod)
 {
 	int rc;
@@ -300,18 +388,54 @@
 		rc = qpnp_ibb_aod_config(chip, false);
 		if (rc < 0)
 			goto error;
+
+		if (chip->ibb.pd_control) {
+			rc = qpnp_ibb_pd_control(chip, true);
+			if (rc < 0)
+				goto error;
+		}
+
+		if (chip->ab.pd_control) {
+			rc = qpnp_ab_pd_control(chip, true);
+			if (rc < 0)
+				goto error;
+		}
 	} else if (mode == REGULATOR_MODE_IDLE) {
 		/* poll for VREG_OK low */
 		rc = qpnp_ab_poll_vreg_ok(chip, false);
 		if (rc < 0)
 			goto error;
 
+		if (chip->ibb.pd_control) {
+			rc = qpnp_ibb_pd_control(chip, false);
+			if (rc < 0)
+				goto error;
+		}
+
+		if (chip->ab.pd_control) {
+			rc = qpnp_ab_pd_control(chip, false);
+			if (rc < 0)
+				goto error;
+		}
+
 		val = 0xF1;
 	} else if (mode == REGULATOR_MODE_STANDBY) {
 		/* Restore the normal configuration without any delay */
 		rc = qpnp_ibb_aod_config(chip, false);
 		if (rc < 0)
 			goto error;
+
+		if (chip->ibb.pd_control) {
+			rc = qpnp_ibb_pd_control(chip, true);
+			if (rc < 0)
+				goto error;
+		}
+
+		if (chip->ab.pd_control) {
+			rc = qpnp_ab_pd_control(chip, true);
+			if (rc < 0)
+				goto error;
+		}
 	}
 
 	rc = qpnp_amoled_write(chip, AB_LDO_SW_DBG_CTL(chip), &val, 1);
@@ -557,7 +681,6 @@
 {
 	struct device_node *temp, *node = chip->dev->of_node;
 	const __be32 *prop_addr;
-	bool swire_control;
 	int rc = 0;
 	u32 base, val;
 
@@ -579,23 +702,24 @@
 		case OLEDB_PERIPH_TYPE:
 			chip->oledb_base = base;
 			chip->oledb.vreg.node = temp;
-			swire_control = of_property_read_bool(temp,
-						"qcom,swire-control");
-			chip->oledb.swire_control = swire_control;
+			chip->oledb.swire_control = of_property_read_bool(temp,
+							"qcom,swire-control");
 			break;
 		case AB_PERIPH_TYPE:
 			chip->ab_base = base;
 			chip->ab.vreg.node = temp;
-			swire_control = of_property_read_bool(temp,
-						"qcom,swire-control");
-			chip->ab.swire_control = swire_control;
+			chip->ab.swire_control = of_property_read_bool(temp,
+							"qcom,swire-control");
+			chip->ab.pd_control = of_property_read_bool(temp,
+							"qcom,aod-pd-control");
 			break;
 		case IBB_PERIPH_TYPE:
 			chip->ibb_base = base;
 			chip->ibb.vreg.node = temp;
-			swire_control = of_property_read_bool(temp,
-						"qcom,swire-control");
-			chip->ibb.swire_control = swire_control;
+			chip->ibb.swire_control = of_property_read_bool(temp,
+							"qcom,swire-control");
+			chip->ibb.pd_control = of_property_read_bool(temp,
+							"qcom,aod-pd-control");
 			break;
 		default:
 			pr_err("Unknown peripheral type 0x%x\n", val);
@@ -687,7 +811,19 @@
 	.probe		= qpnp_amoled_regulator_probe,
 	.remove		= qpnp_amoled_regulator_remove,
 };
-module_platform_driver(qpnp_amoled_regulator_driver);
+
+static int __init qpnp_amoled_regulator_init(void)
+{
+	return platform_driver_register(&qpnp_amoled_regulator_driver);
+}
+
+static void __exit qpnp_amoled_regulator_exit(void)
+{
+	platform_driver_unregister(&qpnp_amoled_regulator_driver);
+}
 
 MODULE_DESCRIPTION("QPNP AMOLED regulator driver");
 MODULE_LICENSE("GPL v2");
+
+arch_initcall(qpnp_amoled_regulator_init);
+module_exit(qpnp_amoled_regulator_exit);
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 770596e..d859315 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -22,6 +22,7 @@
 #include <linux/kthread.h>
 #include <linux/mailbox_client.h>
 #include <linux/ipc_logging.h>
+#include <soc/qcom/subsystem_notif.h>
 
 #include "rpmsg_internal.h"
 #include "qcom_glink_native.h"
@@ -284,13 +285,22 @@
 {
 	struct glink_channel *channel = container_of(ref, struct glink_channel,
 						     refcount);
+	struct glink_core_rx_intent *tmp;
 	unsigned long flags;
+	int iid;
 
 	CH_INFO(channel, "\n");
 	wake_up(&channel->intent_req_event);
 
 	spin_lock_irqsave(&channel->intent_lock, flags);
+	idr_for_each_entry(&channel->liids, tmp, iid) {
+		kfree(tmp->data);
+		kfree(tmp);
+	}
 	idr_destroy(&channel->liids);
+
+	idr_for_each_entry(&channel->riids, tmp, iid)
+		kfree(tmp);
 	idr_destroy(&channel->riids);
 	spin_unlock_irqrestore(&channel->intent_lock, flags);
 
@@ -347,6 +357,9 @@
 	if (tlen >= glink->tx_pipe->length)
 		return -EINVAL;
 
+	if (atomic_read(&glink->in_reset))
+		return -ECONNRESET;
+
 	spin_lock_irqsave(&glink->tx_lock, flags);
 
 	while (qcom_glink_tx_avail(glink) < tlen) {
@@ -813,9 +826,11 @@
 static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
 					 u32 cid, size_t size)
 {
-	struct glink_core_rx_intent *intent;
+	struct glink_core_rx_intent *intent = NULL;
+	struct glink_core_rx_intent *tmp;
 	struct glink_channel *channel;
 	unsigned long flags;
+	int iid;
 
 	spin_lock_irqsave(&glink->idr_lock, flags);
 	channel = idr_find(&glink->rcids, cid);
@@ -826,6 +841,19 @@
 		return;
 	}
 
+	spin_lock_irqsave(&channel->intent_lock, flags);
+	idr_for_each_entry(&channel->liids, tmp, iid) {
+		if (tmp->size >= size && tmp->reuse) {
+			intent = tmp;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&channel->intent_lock, flags);
+	if (intent) {
+		qcom_glink_send_intent_req_ack(glink, channel, !!intent);
+		return;
+	}
+
 	intent = qcom_glink_alloc_intent(glink, channel, size, false);
 	if (intent)
 		qcom_glink_advertise_intent(glink, channel, intent);
@@ -1362,9 +1390,6 @@
 	channel->ept.cb = NULL;
 	spin_unlock_irqrestore(&channel->recv_lock, flags);
 
-	/* Decouple the potential rpdev from the channel */
-	channel->rpdev = NULL;
-
 	qcom_glink_send_close_req(glink, channel);
 }
 
@@ -1426,6 +1451,8 @@
 	} __packed req;
 	int ret;
 	unsigned long flags;
+	int chunk_size = len;
+	int left_size = 0;
 
 	if (!glink->intentless) {
 		while (!intent) {
@@ -1448,6 +1475,9 @@
 			if (intent)
 				break;
 
+			if (atomic_read(&glink->in_reset))
+				return -ECONNRESET;
+
 			if (!wait)
 				return -EBUSY;
 
@@ -1459,18 +1489,46 @@
 		iid = intent->id;
 	}
 
+	if (wait && (chunk_size > SZ_8K)) {
+		chunk_size = SZ_8K;
+		left_size = len - chunk_size;
+	}
 	req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
 	req.msg.param1 = cpu_to_le16(channel->lcid);
 	req.msg.param2 = cpu_to_le32(iid);
-	req.chunk_size = cpu_to_le32(len);
-	req.left_size = cpu_to_le32(0);
+	req.chunk_size = cpu_to_le32(chunk_size);
+	req.left_size = cpu_to_le32(left_size);
 
-	ret = qcom_glink_tx(glink, &req, sizeof(req), data, len, wait);
+	ret = qcom_glink_tx(glink, &req, sizeof(req), data, chunk_size, wait);
 
 	/* Mark intent available if we failed */
-	if (ret && intent)
+	if (ret && intent) {
 		intent->in_use = false;
+		return ret;
+	}
 
+	while (left_size > 0) {
+		data = (void *)((char *)data + chunk_size);
+		chunk_size = left_size;
+		if (chunk_size > SZ_8K)
+			chunk_size = SZ_8K;
+		left_size -= chunk_size;
+
+		req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA_CONT);
+		req.msg.param1 = cpu_to_le16(channel->lcid);
+		req.msg.param2 = cpu_to_le32(iid);
+		req.chunk_size = cpu_to_le32(chunk_size);
+		req.left_size = cpu_to_le32(left_size);
+
+		ret = qcom_glink_tx(glink, &req, sizeof(req), data,
+				    chunk_size, wait);
+
+		/* Mark intent available if we failed */
+		if (ret && intent) {
+			intent->in_use = false;
+			break;
+		}
+	}
 	return ret;
 }
 
@@ -1614,7 +1672,7 @@
 
 		ret = rpmsg_register_device(rpdev);
 		if (ret)
-			goto free_rpdev;
+			goto rcid_remove;
 
 		channel->rpdev = rpdev;
 	}
@@ -1622,9 +1680,6 @@
 
 	return 0;
 
-free_rpdev:
-	CH_INFO(channel, "free_rpdev\n");
-	kfree(rpdev);
 rcid_remove:
 	CH_INFO(channel, "rcid_remove\n");
 	spin_lock_irqsave(&glink->idr_lock, flags);
@@ -1663,6 +1718,7 @@
 
 		rpmsg_unregister_device(glink->dev, &chinfo);
 	}
+	channel->rpdev = NULL;
 
 	qcom_glink_send_close_ack(glink, channel->rcid);
 
@@ -1676,6 +1732,7 @@
 
 static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
 {
+	struct rpmsg_channel_info chinfo;
 	struct glink_channel *channel;
 	unsigned long flags;
 
@@ -1691,6 +1748,16 @@
 	channel->lcid = 0;
 	spin_unlock_irqrestore(&glink->idr_lock, flags);
 
+	/* Decouple the potential rpdev from the channel */
+	if (channel->rpdev) {
+		strlcpy(chinfo.name, channel->name, sizeof(chinfo.name));
+		chinfo.src = RPMSG_ADDR_ANY;
+		chinfo.dst = RPMSG_ADDR_ANY;
+
+		rpmsg_unregister_device(glink->dev, &chinfo);
+	}
+	channel->rpdev = NULL;
+
 	kref_put(&channel->refcount, qcom_glink_channel_release);
 }
 
@@ -1816,6 +1883,23 @@
 		dev_err(glink->dev, "failed to set task affinity\n");
 }
 
+static void qcom_glink_notif_reset(void *data)
+{
+	struct qcom_glink *glink = data;
+	struct glink_channel *channel;
+	unsigned long flags;
+	int cid;
+
+	if (!glink)
+		return;
+	atomic_inc(&glink->in_reset);
+
+	spin_lock_irqsave(&glink->idr_lock, flags);
+	idr_for_each_entry(&glink->lcids, channel, cid) {
+		wake_up(&channel->intent_req_event);
+	}
+	spin_unlock_irqrestore(&glink->idr_lock, flags);
+}
 
 struct qcom_glink *qcom_glink_native_probe(struct device *dev,
 					   unsigned long features,
@@ -1874,6 +1958,11 @@
 		return ERR_CAST(glink->task);
 	}
 
+	ret = subsys_register_early_notifier(glink->name, XPORT_LAYER_NOTIF,
+					     qcom_glink_notif_reset, glink);
+	if (ret)
+		dev_err(dev, "failed to register early notif %d\n", ret);
+
 	irq = of_irq_get(dev->of_node, 0);
 	ret = devm_request_irq(dev, irq,
 			       qcom_glink_native_intr,
@@ -1881,7 +1970,7 @@
 			       "glink-native", glink);
 	if (ret) {
 		dev_err(dev, "failed to request IRQ\n");
-		return ERR_PTR(ret);
+		goto unregister;
 	}
 
 	glink->irq = irq;
@@ -1889,8 +1978,10 @@
 	size = of_property_count_u32_elems(dev->of_node, "cpu-affinity");
 	if (size > 0) {
 		arr = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
-		if (!arr)
-			return ERR_PTR(-ENOMEM);
+		if (!arr) {
+			ret = -ENOMEM;
+			goto unregister;
+		}
 		ret = of_property_read_u32_array(dev->of_node, "cpu-affinity",
 						 arr, size);
 		if (!ret)
@@ -1901,7 +1992,7 @@
 	ret = qcom_glink_send_version(glink);
 	if (ret) {
 		dev_err(dev, "failed to send version %d\n", ret);
-		return ERR_PTR(ret);
+		goto unregister;
 	}
 
 	ret = qcom_glink_create_chrdev(glink);
@@ -1911,6 +2002,10 @@
 	glink->ilc = ipc_log_context_create(GLINK_LOG_PAGE_CNT, glink->name, 0);
 
 	return glink;
+
+unregister:
+	subsys_unregister_early_notifier(glink->name, XPORT_LAYER_NOTIF);
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(qcom_glink_native_probe);
 
@@ -1928,17 +2023,11 @@
 	int ret;
 	unsigned long flags;
 
-	atomic_inc(&glink->in_reset);
+	subsys_unregister_early_notifier(glink->name, XPORT_LAYER_NOTIF);
+	qcom_glink_notif_reset(glink);
 	disable_irq(glink->irq);
 	cancel_work_sync(&glink->rx_work);
 
-	/* Signal all threads to cancel tx */
-	spin_lock_irqsave(&glink->idr_lock, flags);
-	idr_for_each_entry(&glink->lcids, channel, cid) {
-		wake_up(&channel->intent_req_event);
-	}
-	spin_unlock_irqrestore(&glink->idr_lock, flags);
-
 	ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
 	if (ret)
 		dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
diff --git a/drivers/rpmsg/qcom_glink_rpm.c b/drivers/rpmsg/qcom_glink_rpm.c
index f64f45d..baadf7d 100644
--- a/drivers/rpmsg/qcom_glink_rpm.c
+++ b/drivers/rpmsg/qcom_glink_rpm.c
@@ -338,7 +338,7 @@
 {
 	return platform_driver_register(&glink_rpm_driver);
 }
-subsys_initcall(glink_rpm_init);
+postcore_initcall(glink_rpm_init);
 
 static void __exit glink_rpm_exit(void)
 {
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index ad03e2f..5808a1e 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -393,7 +393,7 @@
 	alrm->time.tm_min  = bcd2bin(alarmvals[3] & 0x7f);
 	alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
 	alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
-	alrm->time.tm_mon  = bcd2bin(alarmvals[0] & 0x3f);
+	alrm->time.tm_mon  = bcd2bin(alarmvals[0] & 0x3f) - 1;
 
 	alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
 	alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 194ffd5..039b207 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -60,7 +60,9 @@
 
 static void __ref sclp_cpu_change_notify(struct work_struct *work)
 {
+	lock_device_hotplug();
 	smp_rescan_cpus();
+	unlock_device_hotplug();
 }
 
 static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f039266..a57b969 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -249,7 +249,8 @@
 static inline int ap_test_config_card_id(unsigned int id)
 {
 	if (!ap_configuration)	/* QCI not supported */
-		return 1;
+		/* only ids 0...3F may be probed */
+		return id < 0x40 ? 1 : 0;
 	return ap_test_config(ap_configuration->apm, id);
 }
 
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 6f7ebc1..2e1a27b 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -52,6 +52,7 @@
 #define REP82_ERROR_FORMAT_FIELD	    0x29
 #define REP82_ERROR_INVALID_COMMAND	    0x30
 #define REP82_ERROR_MALFORMED_MSG	    0x40
+#define REP82_ERROR_INVALID_SPECIAL_CMD	    0x41
 #define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
 #define REP82_ERROR_RESERVED_FIELDO	    0x50 /* old value	*/
 #define REP82_ERROR_WORD_ALIGNMENT	    0x60
@@ -90,6 +91,7 @@
 	case REP88_ERROR_MESSAGE_MALFORMD:
 	case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
 	case REP82_ERROR_INVALID_DOMAIN_PENDING:
+	case REP82_ERROR_INVALID_SPECIAL_CMD:
 	//   REP88_ERROR_INVALID_KEY		// '82' CEX2A
 	//   REP88_ERROR_OPERAND		// '84' CEX2A
 	//   REP88_ERROR_OPERAND_EVEN_MOD	// '85' CEX2A
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 970654f..2d1f6a5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -22,6 +22,7 @@
 #include <linux/hashtable.h>
 #include <linux/ip.h>
 #include <linux/refcount.h>
+#include <linux/workqueue.h>
 
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b03515d..56aacf3 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -565,6 +565,7 @@
 		QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
 			"rc=%i\n", dev_name(&card->gdev->dev), rc);
 		atomic_set(&channel->irq_pending, 0);
+		qeth_release_buffer(channel, iob);
 		card->read_or_write_problem = 1;
 		qeth_schedule_recovery(card);
 		wake_up(&card->wait_q);
@@ -1187,6 +1188,8 @@
 		rc = qeth_get_problem(cdev, irb);
 		if (rc) {
 			card->read_or_write_problem = 1;
+			if (iob)
+				qeth_release_buffer(iob->channel, iob);
 			qeth_clear_ipacmd_list(card);
 			qeth_schedule_recovery(card);
 			goto out;
@@ -1852,6 +1855,7 @@
 		QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
 		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
 		atomic_set(&channel->irq_pending, 0);
+		qeth_release_buffer(channel, iob);
 		wake_up(&card->wait_q);
 		return rc;
 	}
@@ -1923,6 +1927,7 @@
 			rc);
 		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
 		atomic_set(&channel->irq_pending, 0);
+		qeth_release_buffer(channel, iob);
 		wake_up(&card->wait_q);
 		return rc;
 	}
@@ -2110,6 +2115,7 @@
 	}
 	reply = qeth_alloc_reply(card);
 	if (!reply) {
+		qeth_release_buffer(channel, iob);
 		return -ENOMEM;
 	}
 	reply->callback = reply_cb;
@@ -2448,11 +2454,12 @@
 	return 0;
 }
 
-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
+static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
 {
 	if (!q)
 		return;
 
+	qeth_clear_outq_buffers(q, 1);
 	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
 	kfree(q);
 }
@@ -2526,10 +2533,8 @@
 		card->qdio.out_qs[i]->bufs[j] = NULL;
 	}
 out_freeoutq:
-	while (i > 0) {
-		qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
-		qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
-	}
+	while (i > 0)
+		qeth_free_output_queue(card->qdio.out_qs[--i]);
 	kfree(card->qdio.out_qs);
 	card->qdio.out_qs = NULL;
 out_freepool:
@@ -2562,10 +2567,8 @@
 	qeth_free_buffer_pool(card);
 	/* free outbound qdio_qs */
 	if (card->qdio.out_qs) {
-		for (i = 0; i < card->qdio.no_out_queues; ++i) {
-			qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
-			qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
-		}
+		for (i = 0; i < card->qdio.no_out_queues; i++)
+			qeth_free_output_queue(card->qdio.out_qs[i]);
 		kfree(card->qdio.out_qs);
 		card->qdio.out_qs = NULL;
 	}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 76b2fba..b7513c5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -854,6 +854,8 @@
 
 	if (cgdev->state == CCWGROUP_ONLINE)
 		qeth_l2_set_offline(cgdev);
+
+	cancel_work_sync(&card->close_dev_work);
 	if (qeth_netdev_is_registered(card->dev))
 		unregister_netdev(card->dev);
 }
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b7f6a83..7f71ca0 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2611,6 +2611,7 @@
 	if (cgdev->state == CCWGROUP_ONLINE)
 		qeth_l3_set_offline(cgdev);
 
+	cancel_work_sync(&card->close_dev_work);
 	if (qeth_netdev_is_registered(card->dev))
 		unregister_netdev(card->dev);
 	qeth_l3_clear_ip_htable(card, 0);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 94f4d8f..d1b531f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -275,16 +275,16 @@
  */
 int zfcp_status_read_refill(struct zfcp_adapter *adapter)
 {
-	while (atomic_read(&adapter->stat_miss) > 0)
+	while (atomic_add_unless(&adapter->stat_miss, -1, 0))
 		if (zfcp_fsf_status_read(adapter->qdio)) {
+			atomic_inc(&adapter->stat_miss); /* undo add -1 */
 			if (atomic_read(&adapter->stat_miss) >=
 			    adapter->stat_read_buf_num) {
 				zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
 				return 1;
 			}
 			break;
-		} else
-			atomic_dec(&adapter->stat_miss);
+		}
 	return 0;
 }
 
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 6be77b3..ac79f20 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -295,7 +295,7 @@
 	if(tpnt->sdev_attrs == NULL)
 		tpnt->sdev_attrs = NCR_700_dev_attrs;
 
-	memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
+	memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
 				 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
 	if(memory == NULL) {
 		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 6e1b022..3236240 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1304,8 +1304,9 @@
 				  ADD : DELETE;
 				break;
 			}
-			case AifBuManagerEvent:
-				aac_handle_aif_bu(dev, aifcmd);
+			break;
+		case AifBuManagerEvent:
+			aac_handle_aif_bu(dev, aifcmd);
 			break;
 		}
 
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 1391e5f..702da90 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -281,7 +281,7 @@
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			asd_dev_rev[asd_ha->revision_id]);
 }
-static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
+static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
 
 static ssize_t asd_show_dev_bios_build(struct device *dev,
 				       struct device_attribute *attr,char *buf)
@@ -478,7 +478,7 @@
 {
 	int err;
 
-	err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+	err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
 	if (err)
 		return err;
 
@@ -500,13 +500,13 @@
 err_biosb:
 	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
 err_rev:
-	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
 	return err;
 }
 
 static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
 {
-	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
 	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
 	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
 	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f000458..3f97ec4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2371,7 +2371,7 @@
 	if (!interface) {
 		printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
 		rc = -ENOMEM;
-		goto ifput_err;
+		goto netdev_err;
 	}
 
 	if (is_vlan_dev(netdev)) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 350257c..bc9f2a2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -240,6 +240,7 @@
 		return NULL;
 	}
 
+	cmgr->hba = hba;
 	cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
 				  GFP_KERNEL);
 	if (!cmgr->free_list) {
@@ -256,7 +257,6 @@
 		goto mem_err;
 	}
 
-	cmgr->hba = hba;
 	cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
 
 	for (i = 0; i < arr_sz; i++)  {
@@ -295,7 +295,7 @@
 
 	/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
 	mem_size = num_ios * sizeof(struct io_bdt *);
-	cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+	cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
 	if (!cmgr->io_bdt_pool) {
 		printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
 		goto mem_err;
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 8a00403..9bd2bd8 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -594,12 +594,12 @@
 	}
 
 	fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+	ln->fc_vport = fc_vport;
 
 	if (csio_fcoe_alloc_vnp(hw, ln))
 		goto error;
 
 	*(struct csio_lnode **)fc_vport->dd_data = ln;
-	ln->fc_vport = fc_vport;
 	if (!fc_vport->node_name)
 		fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
 	if (!fc_vport->port_name)
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index bf07735..0fc382c 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1144,7 +1144,7 @@
 }
 
 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
-				       unsigned int tid, int pg_idx, bool reply)
+				unsigned int tid, int pg_idx)
 {
 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
 					GFP_KERNEL);
@@ -1160,7 +1160,7 @@
 	req = (struct cpl_set_tcb_field *)skb->head;
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
-	req->reply = V_NO_REPLY(reply ? 0 : 1);
+	req->reply = V_NO_REPLY(1);
 	req->cpu_idx = 0;
 	req->word = htons(31);
 	req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1177,10 @@
  * @tid: connection id
  * @hcrc: header digest enabled
  * @dcrc: data digest enabled
- * @reply: request reply from h/w
  * set up the iscsi digest settings for a connection identified by tid
  */
 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
-			     int hcrc, int dcrc, int reply)
+				 int hcrc, int dcrc)
 {
 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
 					GFP_KERNEL);
@@ -1197,7 +1196,7 @@
 	req = (struct cpl_set_tcb_field *)skb->head;
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
-	req->reply = V_NO_REPLY(reply ? 0 : 1);
+	req->reply = V_NO_REPLY(1);
 	req->cpu_idx = 0;
 	req->word = htons(31);
 	req->mask = cpu_to_be64(0x0F000000);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 211da1d..689d6c8 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1517,16 +1517,22 @@
 	struct cxgbi_sock *csk;
 
 	csk = lookup_tid(t, tid);
-	if (!csk)
+	if (!csk) {
 		pr_err("can't find conn. for tid %u.\n", tid);
+		return;
+	}
 
 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 		"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
 		csk, csk->state, csk->flags, csk->tid, rpl->status);
 
-	if (rpl->status != CPL_ERR_NONE)
+	if (rpl->status != CPL_ERR_NONE) {
 		pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
 			csk, tid, rpl->status);
+		csk->err = -EINVAL;
+	}
+
+	complete(&csk->cmpl);
 
 	__kfree_skb(skb);
 }
@@ -1903,7 +1909,7 @@
 }
 
 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
-				int pg_idx, bool reply)
+				int pg_idx)
 {
 	struct sk_buff *skb;
 	struct cpl_set_tcb_field *req;
@@ -1919,7 +1925,7 @@
 	req = (struct cpl_set_tcb_field *)skb->head;
 	INIT_TP_WR(req, csk->tid);
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
-	req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
 	req->word_cookie = htons(0);
 	req->mask = cpu_to_be64(0x3 << 8);
 	req->val = cpu_to_be64(pg_idx << 8);
@@ -1928,12 +1934,15 @@
 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 		"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
 
+	reinit_completion(&csk->cmpl);
 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
-	return 0;
+	wait_for_completion(&csk->cmpl);
+
+	return csk->err;
 }
 
 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
-				 int hcrc, int dcrc, int reply)
+				 int hcrc, int dcrc)
 {
 	struct sk_buff *skb;
 	struct cpl_set_tcb_field *req;
@@ -1951,7 +1960,7 @@
 	req = (struct cpl_set_tcb_field *)skb->head;
 	INIT_TP_WR(req, tid);
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
-	req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
 	req->word_cookie = htons(0);
 	req->mask = cpu_to_be64(0x3 << 4);
 	req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -1961,8 +1970,11 @@
 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 		"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
 
+	reinit_completion(&csk->cmpl);
 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
-	return 0;
+	wait_for_completion(&csk->cmpl);
+
+	return csk->err;
 }
 
 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3f3af5e..f2c561c 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -573,6 +573,7 @@
 	skb_queue_head_init(&csk->receive_queue);
 	skb_queue_head_init(&csk->write_queue);
 	timer_setup(&csk->retry_timer, NULL, 0);
+	init_completion(&csk->cmpl);
 	rwlock_init(&csk->callback_lock);
 	csk->cdev = cdev;
 	csk->flags = 0;
@@ -2252,14 +2253,14 @@
 		if (!err && conn->hdrdgst_en)
 			err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
 							conn->hdrdgst_en,
-							conn->datadgst_en, 0);
+							conn->datadgst_en);
 		break;
 	case ISCSI_PARAM_DATADGST_EN:
 		err = iscsi_set_param(cls_conn, param, buf, buflen);
 		if (!err && conn->datadgst_en)
 			err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
 							conn->hdrdgst_en,
-							conn->datadgst_en, 0);
+							conn->datadgst_en);
 		break;
 	case ISCSI_PARAM_MAX_R2T:
 		return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2385,7 +2386,7 @@
 
 	ppm = csk->cdev->cdev2ppm(csk->cdev);
 	err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
-					     ppm->tformat.pgsz_idx_dflt, 0);
+					     ppm->tformat.pgsz_idx_dflt);
 	if (err < 0)
 		return err;
 
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index dcb190e..3bf7414 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -146,6 +146,7 @@
 	struct sk_buff_head receive_queue;
 	struct sk_buff_head write_queue;
 	struct timer_list retry_timer;
+	struct completion cmpl;
 	int err;
 	rwlock_t callback_lock;
 	void *user_data;
@@ -487,9 +488,9 @@
 				  struct cxgbi_ppm *,
 				  struct cxgbi_task_tag_info *);
 	int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
-				unsigned int, int, int, int);
+				    unsigned int, int, int);
 	int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
-				unsigned int, int, bool);
+				   unsigned int, int);
 
 	void (*csk_release_offload_resources)(struct cxgbi_sock *);
 	int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 6637116..f987c40 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3694,6 +3694,7 @@
 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
 
 	cfg = shost_priv(host);
+	cfg->state = STATE_PROBING;
 	cfg->host = host;
 	rc = alloc_mem(cfg);
 	if (rc) {
@@ -3782,6 +3783,7 @@
 	return rc;
 
 out_remove:
+	cfg->state = STATE_PROBED;
 	cxlflash_remove(pdev);
 	goto out;
 }
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 687ff61..3922b17 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -492,7 +492,7 @@
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
 		hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
 		hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
-
+		hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
 		/* used for 12G negotiate */
 		hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
 	}
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 08c7b1e..dde84f7 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -588,6 +588,13 @@
 	shost->max_lun = ~0;
 	shost->max_cmd_len = MAX_COMMAND_SIZE;
 
+	/* turn on DIF support */
+	scsi_host_set_prot(shost,
+			   SHOST_DIF_TYPE1_PROTECTION |
+			   SHOST_DIF_TYPE2_PROTECTION |
+			   SHOST_DIF_TYPE3_PROTECTION);
+	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
 	err = scsi_add_host(shost, &pdev->dev);
 	if (err)
 		goto err_shost;
@@ -675,13 +682,6 @@
 			goto err_host_alloc;
 		}
 		pci_info->hosts[i] = h;
-
-		/* turn on DIF support */
-		scsi_host_set_prot(to_shost(h),
-				   SHOST_DIF_TYPE1_PROTECTION |
-				   SHOST_DIF_TYPE2_PROTECTION |
-				   SHOST_DIF_TYPE3_PROTECTION);
-		scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
 	}
 
 	err = isci_setup_interrupts(pdev);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index be83590..ff943f4 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1726,14 +1726,14 @@
 	    fc_frame_payload_op(fp) != ELS_LS_ACC) {
 		FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
 		fc_lport_error(lport, fp);
-		goto err;
+		goto out;
 	}
 
 	flp = fc_frame_payload_get(fp, sizeof(*flp));
 	if (!flp) {
 		FC_LPORT_DBG(lport, "FLOGI bad response\n");
 		fc_lport_error(lport, fp);
-		goto err;
+		goto out;
 	}
 
 	mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1743,7 @@
 		FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
 			     "lport->mfs:%hu\n", mfs, lport->mfs);
 		fc_lport_error(lport, fp);
-		goto err;
+		goto out;
 	}
 
 	if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 372387a..1797e47 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -184,7 +184,6 @@
 	struct fc_rport_priv *rdata;
 
 	rdata = container_of(kref, struct fc_rport_priv, kref);
-	WARN_ON(!list_empty(&rdata->peers));
 	kfree_rcu(rdata, rcu);
 }
 EXPORT_SYMBOL(fc_rport_destroy);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fadc99c..a1551ab 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -829,6 +829,7 @@
 		rphy = sas_end_device_alloc(phy->port);
 		if (!rphy)
 			goto out_free;
+		rphy->identify.phy_identifier = phy_id;
 
 		child->rphy = rphy;
 		get_device(&rphy->dev);
@@ -856,6 +857,7 @@
 
 		child->rphy = rphy;
 		get_device(&rphy->dev);
+		rphy->identify.phy_identifier = phy_id;
 		sas_fill_in_rphy(child, rphy);
 
 		list_add_tail(&child->disco_list_node, &parent->port->disco_list);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4dda969..0d214e6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -242,6 +242,8 @@
 		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
 		if (elscmd == ELS_CMD_FLOGI)
 			icmd->ulpTimeout = FF_DEF_RATOV * 2;
+		else if (elscmd == ELS_CMD_LOGO)
+			icmd->ulpTimeout = phba->fc_ratov;
 		else
 			icmd->ulpTimeout = phba->fc_ratov * 2;
 	} else {
@@ -2682,16 +2684,15 @@
 		goto out;
 	}
 
+	/* The LOGO will not be retried on failure.  A LOGO was
+	 * issued to the remote rport and a ACC or RJT or no Answer are
+	 * all acceptable.  Note the failure and move forward with
+	 * discovery.  The PLOGI will retry.
+	 */
 	if (irsp->ulpStatus) {
-		/* Check for retry */
-		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
-			/* ELS command is being retried */
-			skip_recovery = 1;
-			goto out;
-		}
 		/* LOGO failed */
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-				 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
+				 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
 				 ndlp->nlp_DID, irsp->ulpStatus,
 				 irsp->un.ulpWord[4]);
 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
@@ -2737,7 +2738,8 @@
 	 * For any other port type, the rpi is unregistered as an implicit
 	 * LOGO.
 	 */
-	if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
+	if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
+	    skip_recovery == 0) {
 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
 		spin_lock_irqsave(shost->host_lock, flags);
 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -2770,6 +2772,8 @@
  * will be stored into the context1 field of the IOCB for the completion
  * callback function to the LOGO ELS command.
  *
+ * Callers of this routine are expected to unregister the RPI first
+ *
  * Return code
  *   0 - successfully issued logo
  *   1 - failed to issue logo
@@ -2811,22 +2815,6 @@
 		"Issue LOGO:      did:x%x",
 		ndlp->nlp_DID, 0, 0);
 
-	/*
-	 * If we are issuing a LOGO, we may try to recover the remote NPort
-	 * by issuing a PLOGI later. Even though we issue ELS cmds by the
-	 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
-	 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
-	 * for that ELS cmd. To avoid this situation, lets get rid of the
-	 * RPI right now, before any ELS cmds are sent.
-	 */
-	spin_lock_irq(shost->host_lock);
-	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
-	spin_unlock_irq(shost->host_lock);
-	if (lpfc_unreg_rpi(vport, ndlp)) {
-		lpfc_els_free_iocb(phba, elsiocb);
-		return 0;
-	}
-
 	phba->fc_stat.elsXmitLOGO++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
 	spin_lock_irq(shost->host_lock);
@@ -2834,7 +2822,6 @@
 	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
 	spin_unlock_irq(shost->host_lock);
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-
 	if (rc == IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_LOGO_SND;
@@ -2842,6 +2829,11 @@
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
 	}
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_prev_state = ndlp->nlp_state;
+	spin_unlock_irq(shost->host_lock);
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
 	return 0;
 }
 
@@ -5701,6 +5693,9 @@
 	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
 	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 
+	if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
+		stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	phba->fc_stat.elsXmitLSRJT++;
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -9502,7 +9497,8 @@
 				"rport in state 0x%x\n", ndlp->nlp_state);
 		return;
 	}
-	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+	lpfc_printf_log(phba, KERN_ERR,
+			LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
 			"3094 Start rport recovery on shost id 0x%x "
 			"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
 			"flags 0x%x\n",
@@ -9515,8 +9511,8 @@
 	 */
 	spin_lock_irqsave(shost->host_lock, flags);
 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
 	spin_unlock_irqrestore(shost->host_lock, flags);
-	lpfc_issue_els_logo(vport, ndlp, 0);
-	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+	lpfc_unreg_rpi(vport, ndlp);
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd9bce9..a6619fd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -836,7 +836,9 @@
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		spin_unlock_irq(shost->host_lock);
 		return 0;
 	}
 
@@ -851,7 +853,10 @@
 			return 1;
 		}
 	}
+
+	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+	spin_unlock_irq(shost->host_lock);
 	lpfc_unreg_rpi(vport, ndlp);
 	return 0;
 }
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 918ae18..ca62117 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -297,7 +297,8 @@
 			 lport);
 
 	/* release any threads waiting for the unreg to complete */
-	complete(&lport->lport_unreg_done);
+	if (lport->vport->localport)
+		complete(lport->lport_unreg_cmp);
 }
 
 /* lpfc_nvme_remoteport_delete
@@ -2556,7 +2557,8 @@
  */
 void
 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
-			   struct lpfc_nvme_lport *lport)
+			   struct lpfc_nvme_lport *lport,
+			   struct completion *lport_unreg_cmp)
 {
 #if (IS_ENABLED(CONFIG_NVME_FC))
 	u32 wait_tmo;
@@ -2568,8 +2570,7 @@
 	 */
 	wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
 	while (true) {
-		ret = wait_for_completion_timeout(&lport->lport_unreg_done,
-						  wait_tmo);
+		ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
 		if (unlikely(!ret)) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
 					 "6176 Lport %p Localport %p wait "
@@ -2603,12 +2604,12 @@
 	struct lpfc_nvme_lport *lport;
 	struct lpfc_nvme_ctrl_stat *cstat;
 	int ret;
+	DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
 
 	if (vport->nvmei_support == 0)
 		return;
 
 	localport = vport->localport;
-	vport->localport = NULL;
 	lport = (struct lpfc_nvme_lport *)localport->private;
 	cstat = lport->cstat;
 
@@ -2619,13 +2620,14 @@
 	/* lport's rport list is clear.  Unregister
 	 * lport and release resources.
 	 */
-	init_completion(&lport->lport_unreg_done);
+	lport->lport_unreg_cmp = &lport_unreg_cmp;
 	ret = nvme_fc_unregister_localport(localport);
 
 	/* Wait for completion.  This either blocks
 	 * indefinitely or succeeds
 	 */
-	lpfc_nvme_lport_unreg_wait(vport, lport);
+	lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
+	vport->localport = NULL;
 	kfree(cstat);
 
 	/* Regardless of the unregister upcall response, clear
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cfd4719..b234d02 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -50,7 +50,7 @@
 /* Declare nvme-based local and remote port definitions. */
 struct lpfc_nvme_lport {
 	struct lpfc_vport *vport;
-	struct completion lport_unreg_done;
+	struct completion *lport_unreg_cmp;
 	/* Add stats counters here */
 	struct lpfc_nvme_ctrl_stat *cstat;
 	atomic_t fc4NvmeLsRequests;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b766afe..e2575c8 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1003,7 +1003,8 @@
 	struct lpfc_nvmet_tgtport *tport = targetport->private;
 
 	/* release any threads waiting for the unreg to complete */
-	complete(&tport->tport_unreg_done);
+	if (tport->phba->targetport)
+		complete(tport->tport_unreg_cmp);
 }
 
 static void
@@ -1700,6 +1701,7 @@
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_queue *wq;
 	uint32_t qidx;
+	DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
 
 	if (phba->nvmet_support == 0)
 		return;
@@ -1709,9 +1711,9 @@
 			wq = phba->sli4_hba.nvme_wq[qidx];
 			lpfc_nvmet_wqfull_flush(phba, wq, NULL);
 		}
-		init_completion(&tgtp->tport_unreg_done);
+		tgtp->tport_unreg_cmp = &tport_unreg_cmp;
 		nvmet_fc_unregister_targetport(phba->targetport);
-		wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+		wait_for_completion_timeout(&tport_unreg_cmp, 5);
 		lpfc_nvmet_cleanup_io_context(phba);
 	}
 	phba->targetport = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 1aaff63..0ec1082 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -34,7 +34,7 @@
 /* Used for NVME Target */
 struct lpfc_nvmet_tgtport {
 	struct lpfc_hba *phba;
-	struct completion tport_unreg_done;
+	struct completion *tport_unreg_cmp;
 
 	/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
 	atomic_t rcv_ls_req_in;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index be2bac9..a490e63 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -14221,7 +14221,8 @@
 			hw_page_size))/hw_page_size;
 
 	/* If needed, Adjust page count to match the max the adapter supports */
-	if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
+	if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
+	    (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
 		queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
 
 	INIT_LIST_HEAD(&queue->list);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 59ecbb3..a336285 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1266,7 +1266,7 @@
 
 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
 		ld = MR_TargetIdToLdGet(ldCount, drv_map);
-		if (ld >= MAX_LOGICAL_DRIVES_EXT) {
+		if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
 			lbInfo[ldCount].loadBalanceFlag = 0;
 			continue;
 		}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c7f95ba..f45c54f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2832,7 +2832,7 @@
 		device_id < instance->fw_supported_vd_count)) {
 
 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
-		if (ld >= instance->fw_supported_vd_count)
+		if (ld >= instance->fw_supported_vd_count - 1)
 			fp_possible = 0;
 		else {
 			raid = MR_LdRaidGet(ld, local_map_ptr);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59d7844..b59bba3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3344,8 +3344,9 @@
 static inline void
 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
 {
+	wmb();
 	__raw_writeq(b, addr);
-	mmiowb();
+	barrier();
 }
 #else
 static inline void
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 53133cf..622832e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -9809,6 +9809,7 @@
 
 	/* release all the volumes */
 	_scsih_ir_shutdown(ioc);
+	sas_remove_host(shost);
 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
 	    list) {
 		if (raid_device->starget) {
@@ -9851,7 +9852,6 @@
 		ioc->sas_hba.num_phys = 0;
 	}
 
-	sas_remove_host(shost);
 	mpt3sas_base_detach(ioc);
 	spin_lock(&gioc_lock);
 	list_del(&ioc->list);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index f8cc267..20d3606 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -834,10 +834,13 @@
 			    mpt3sas_port->remote_identify.sas_address,
 			    mpt3sas_phy->phy_id);
 		mpt3sas_phy->phy_belongs_to_port = 0;
-		sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+		if (!ioc->remove_host)
+			sas_port_delete_phy(mpt3sas_port->port,
+						mpt3sas_phy->phy);
 		list_del(&mpt3sas_phy->port_siblings);
 	}
-	sas_port_delete(mpt3sas_port->port);
+	if (!ioc->remove_host)
+		sas_port_delete(mpt3sas_port->port);
 	kfree(mpt3sas_port);
 }
 
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 2f0a4f2..d4821b9 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -954,6 +954,7 @@
 
 	qedi_ep = ep->dd_data;
 	if (qedi_ep->state == EP_STATE_IDLE ||
+	    qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
 	    qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
 		return -1;
 
@@ -1036,6 +1037,7 @@
 
 	switch (qedi_ep->state) {
 	case EP_STATE_OFLDCONN_START:
+	case EP_STATE_OFLDCONN_NONE:
 		goto ep_release_conn;
 	case EP_STATE_OFLDCONN_FAILED:
 			break;
@@ -1226,6 +1228,7 @@
 
 	if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
 		QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+		qedi_ep->state = EP_STATE_OFLDCONN_NONE;
 		ret = -EIO;
 		goto set_path_exit;
 	}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 1126077..892d70d 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -59,6 +59,7 @@
 	EP_STATE_OFLDCONN_FAILED        = 0x2000,
 	EP_STATE_CONNECT_FAILED         = 0x4000,
 	EP_STATE_DISCONN_TIMEDOUT       = 0x8000,
+	EP_STATE_OFLDCONN_NONE          = 0x10000,
 };
 
 struct qedi_conn;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e5bd035..4de740d 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -952,6 +952,9 @@
 		cls_sess = iscsi_conn_to_session(cls_conn);
 		sess = cls_sess->dd_data;
 
+		if (!iscsi_is_session_online(cls_sess))
+			continue;
+
 		if (pri_ctrl_flags) {
 			if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
 			    !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ae9fd2d..42b8f0d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4808,10 +4808,10 @@
 			fcport->d_id = e->u.new_sess.id;
 			fcport->flags |= FCF_FABRIC_DEVICE;
 			fcport->fw_login_state = DSC_LS_PLOGI_PEND;
-			if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+			if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
 				fcport->fc4_type = FC4_TYPE_FCP_SCSI;
 
-			if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+			if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
 				fcport->fc4_type = FC4_TYPE_OTHER;
 				fcport->fc4f_nvme = FC4_TYPE_NVME;
 			}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0e13349..575445c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -7237,6 +7237,8 @@
 
 	rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
 					   fw_ddb_entry);
+	if (rc)
+		goto free_sess;
 
 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
 		   __func__, fnode_sess->dev.kobj.name);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 60bcc6d..65305b3 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -62,7 +62,7 @@
 
 /* make sure inq_product_rev string corresponds to this version */
 #define SDEBUG_VERSION "0188"	/* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20180128";
+static const char *sdebug_version_date = "20190125";
 
 #define MY_NAME "scsi_debug"
 
@@ -735,7 +735,7 @@
 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
 }
 
-static void *fake_store(unsigned long long lba)
+static void *lba2fake_store(unsigned long long lba)
 {
 	lba = do_div(lba, sdebug_store_sectors);
 
@@ -2514,8 +2514,8 @@
 	return ret;
 }
 
-/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into fake_store(lba,num) and return true. If comparison fails then
+/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
+ * arr into lba2fake_store(lba,num) and return true. If comparison fails then
  * return false. */
 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
 {
@@ -2643,7 +2643,7 @@
 		if (sdt->app_tag == cpu_to_be16(0xffff))
 			continue;
 
-		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+		ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
 		if (ret) {
 			dif_errors++;
 			return ret;
@@ -3261,10 +3261,12 @@
 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
 			   u32 ei_lba, bool unmap, bool ndob)
 {
+	int ret;
 	unsigned long iflags;
 	unsigned long long i;
-	int ret;
-	u64 lba_off;
+	u32 lb_size = sdebug_sector_size;
+	u64 block, lbaa;
+	u8 *fs1p;
 
 	ret = check_device_access_params(scp, lba, num);
 	if (ret)
@@ -3276,31 +3278,30 @@
 		unmap_region(lba, num);
 		goto out;
 	}
-
-	lba_off = lba * sdebug_sector_size;
+	lbaa = lba;
+	block = do_div(lbaa, sdebug_store_sectors);
 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
+	fs1p = fake_storep + (block * lb_size);
 	if (ndob) {
-		memset(fake_storep + lba_off, 0, sdebug_sector_size);
+		memset(fs1p, 0, lb_size);
 		ret = 0;
 	} else
-		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
-					  sdebug_sector_size);
+		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
 
 	if (-1 == ret) {
 		write_unlock_irqrestore(&atomic_rw, iflags);
 		return DID_ERROR << 16;
-	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
+	} else if (sdebug_verbose && !ndob && (ret < lb_size))
 		sdev_printk(KERN_INFO, scp->device,
 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
-			    my_name, "write same",
-			    sdebug_sector_size, ret);
+			    my_name, "write same", lb_size, ret);
 
 	/* Copy first sector to remaining blocks */
-	for (i = 1 ; i < num ; i++)
-		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
-		       fake_storep + lba_off,
-		       sdebug_sector_size);
-
+	for (i = 1 ; i < num ; i++) {
+		lbaa = lba + i;
+		block = do_div(lbaa, sdebug_store_sectors);
+		memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+	}
 	if (scsi_debug_lbp())
 		map_region(lba, num);
 out:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e229f2f..c678bf9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -761,6 +761,7 @@
 		set_host_byte(cmd, DID_OK);
 		return BLK_STS_TARGET;
 	case DID_NEXUS_FAILURE:
+		set_host_byte(cmd, DID_OK);
 		return BLK_STS_NEXUS;
 	case DID_ALLOC_FAILURE:
 		set_host_byte(cmd, DID_OK);
@@ -2244,6 +2245,8 @@
 	if (!shost->use_clustering)
 		q->limits.cluster = 0;
 
+	if (shost->inlinecrypt_support)
+		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
 	/*
 	 * Set a reasonable default alignment:  The larger of 32-byte (dword),
 	 * which is a common minimum for HBAs, and the minimum DMA alignment,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ade9adc..a73cbd8 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -132,6 +132,7 @@
 
 static struct kmem_cache *sd_cdb_cache;
 static mempool_t *sd_cdb_pool;
+static mempool_t *sd_page_pool;
 
 static const char *sd_cache_types[] = {
 	"write through", "none", "write back",
@@ -204,6 +205,12 @@
 	sp = buffer_data[0] & 0x80 ? 1 : 0;
 	buffer_data[0] &= ~0x80;
 
+	/*
+	 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
+	 * received mode parameter buffer before doing MODE SELECT.
+	 */
+	data.device_specific = 0;
+
 	if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
 			     SD_MAX_RETRIES, &data, &sshdr)) {
 		if (scsi_sense_valid(&sshdr))
@@ -758,9 +765,10 @@
 	unsigned int data_len = 24;
 	char *buf;
 
-	rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!rq->special_vec.bv_page)
 		return BLKPREP_DEFER;
+	clear_highpage(rq->special_vec.bv_page);
 	rq->special_vec.bv_offset = 0;
 	rq->special_vec.bv_len = data_len;
 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@ -791,9 +799,10 @@
 	u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
 	u32 data_len = sdp->sector_size;
 
-	rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!rq->special_vec.bv_page)
 		return BLKPREP_DEFER;
+	clear_highpage(rq->special_vec.bv_page);
 	rq->special_vec.bv_offset = 0;
 	rq->special_vec.bv_len = data_len;
 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@ -821,9 +830,10 @@
 	u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
 	u32 data_len = sdp->sector_size;
 
-	rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!rq->special_vec.bv_page)
 		return BLKPREP_DEFER;
+	clear_highpage(rq->special_vec.bv_page);
 	rq->special_vec.bv_offset = 0;
 	rq->special_vec.bv_len = data_len;
 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@ -1290,7 +1300,7 @@
 	u8 *cmnd;
 
 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
-		__free_page(rq->special_vec.bv_page);
+		mempool_free(rq->special_vec.bv_page, sd_page_pool);
 
 	if (SCpnt->cmnd != scsi_req(rq)->cmd) {
 		cmnd = SCpnt->cmnd;
@@ -2871,9 +2881,6 @@
 	if (rot == 1) {
 		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
-	} else {
-		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
-		blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
 	}
 
 	if (sdkp->device->type == TYPE_ZBC) {
@@ -3010,6 +3017,15 @@
 	if (sdkp->media_present) {
 		sd_read_capacity(sdkp, buffer);
 
+		/*
+		 * set the default to rotational.  All non-rotational devices
+		 * support the block characteristics VPD page, which will
+		 * cause this to be updated correctly and any device which
+		 * doesn't support it should be treated as rotational.
+		 */
+		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+		blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
+
 		if (scsi_device_supports_vpd(sdp)) {
 			sd_read_block_provisioning(sdkp);
 			sd_read_block_limits(sdkp);
@@ -3546,6 +3562,13 @@
 		goto err_out_cache;
 	}
 
+	sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
+	if (!sd_page_pool) {
+		printk(KERN_ERR "sd: can't init discard page pool\n");
+		err = -ENOMEM;
+		goto err_out_ppool;
+	}
+
 	err = scsi_register_driver(&sd_template.gendrv);
 	if (err)
 		goto err_out_driver;
@@ -3553,6 +3576,9 @@
 	return 0;
 
 err_out_driver:
+	mempool_destroy(sd_page_pool);
+
+err_out_ppool:
 	mempool_destroy(sd_cdb_pool);
 
 err_out_cache:
@@ -3579,6 +3605,7 @@
 
 	scsi_unregister_driver(&sd_template.gendrv);
 	mempool_destroy(sd_cdb_pool);
+	mempool_destroy(sd_page_pool);
 	kmem_cache_destroy(sd_cdb_cache);
 
 	class_unregister(&sd_disk_class);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 2112ea6..3781e81 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -653,6 +653,7 @@
 	u8	driver_version_tag[2];
 	__le16	driver_version_length;
 	char	driver_version[32];
+	u8	dont_write_tag[2];
 	u8	end_tag[2];
 };
 
@@ -682,6 +683,8 @@
 	strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
 		sizeof(buffer->driver_version) - 1);
 	buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
+	buffer->dont_write_tag[0] = 'D';
+	buffer->dont_write_tag[1] = 'W';
 	buffer->end_tag[0] = 'Z';
 	buffer->end_tag[1] = 'Z';
 
@@ -1181,6 +1184,9 @@
 	if (rc)
 		goto out;
 
+	if (vpd->page_code != CISS_VPD_LV_STATUS)
+		goto out;
+
 	page_length = offsetof(struct ciss_vpd_logical_volume_status,
 		volume_status) + vpd->page_length;
 	if (page_length < sizeof(*vpd))
@@ -2720,6 +2726,9 @@
 		switch (response->header.iu_type) {
 		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
 		case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
+			if (io_request->scmd)
+				io_request->scmd->result = 0;
+			/* fall through */
 		case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
 			break;
 		case PQI_RESPONSE_IU_TASK_MANAGEMENT:
@@ -6686,6 +6695,7 @@
 	 * storage.
 	 */
 	rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
+	pqi_free_interrupts(ctrl_info);
 	pqi_reset(ctrl_info);
 	if (rc == 0)
 		return;
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5141bd4..ca7dfb3 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -59,7 +59,7 @@
 
 #define SIS_CTRL_KERNEL_UP			0x80
 #define SIS_CTRL_KERNEL_PANIC			0x100
-#define SIS_CTRL_READY_TIMEOUT_SECS		30
+#define SIS_CTRL_READY_TIMEOUT_SECS		180
 #define SIS_CTRL_READY_RESUME_TIMEOUT_SECS	90
 #define SIS_CTRL_READY_POLL_INTERVAL_MSECS	10
 
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index d2b042e..95c212c 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -938,8 +938,9 @@
 	else
 		return 0;
 
-	/* Use request LBA as the DUN value */
+	/* Use request LBA or given dun as the DUN value */
 	if (req->bio) {
+#ifdef CONFIG_PFK
 		if (bio_dun(req->bio)) {
 			/* dun @bio can be split, so we have to adjust offset */
 			*dun = bio_dun(req->bio);
@@ -947,8 +948,11 @@
 			*dun = req->bio->bi_iter.bi_sector;
 			*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
 		}
+#else
+		*dun = req->bio->bi_iter.bi_sector;
+		*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+#endif
 	}
-
 	ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
 
 	return ret;
@@ -2191,6 +2195,8 @@
 		dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
 			__func__, err);
 		goto out_variant_clear;
+	} else {
+		hba->host->inlinecrypt_support = 1;
 	}
 
 	host->generic_phy = devm_phy_get(dev, "ufsphy");
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 001b719..d790923 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -141,7 +141,7 @@
 	QUERY_DESC_CONFIGURATION_DEF_SIZE	= 0x90,
 	QUERY_DESC_UNIT_DEF_SIZE		= 0x23,
 	QUERY_DESC_INTERCONNECT_DEF_SIZE	= 0x06,
-	QUERY_DESC_GEOMETRY_DEF_SIZE		= 0x44,
+	QUERY_DESC_GEOMETRY_DEF_SIZE		= 0x48,
 	QUERY_DESC_POWER_DEF_SIZE		= 0x62,
 	QUERY_DESC_HEALTH_DEF_SIZE		= 0x25,
 };
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9d0828f..d739dc8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4044,6 +4044,7 @@
 	int tag;
 	struct completion wait;
 	unsigned long flags;
+	bool has_read_lock = false;
 
 	/*
 	 * May get invoked from shutdown and IOCTL contexts.
@@ -4051,8 +4052,10 @@
 	 * In error recovery context, it may come with lock acquired.
 	 */
 
-	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba)) {
 		down_read(&hba->lock);
+		has_read_lock = true;
+	}
 
 	/*
 	 * Get free slot, sleep if slots are unavailable.
@@ -4090,7 +4093,7 @@
 out_put_tag:
 	ufshcd_put_dev_cmd_tag(hba, tag);
 	wake_up(&hba->dev_cmd.tag_wq);
-	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+	if (has_read_lock)
 		up_read(&hba->lock);
 	return err;
 }
@@ -8308,6 +8311,9 @@
 	dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
 				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
 
+	dev_desc->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
+				  desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+
 	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
 
 	/* Zero-pad entire buffer for string termination. */
@@ -8329,9 +8335,6 @@
 	/* Null terminate the model string */
 	dev_desc->model[MAX_MODEL_LEN] = '\0';
 
-	dev_desc->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
-				  desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
-
 out:
 	kfree(desc_buf);
 	return err;
@@ -9207,7 +9210,7 @@
 		switch (ioctl_data->idn) {
 		case QUERY_ATTR_IDN_BOOT_LU_EN:
 			index = 0;
-			if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
+			if (!att || att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
 				dev_err(hba->dev,
 					"%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
 					__func__, ioctl_data->opcode,
@@ -10458,6 +10461,8 @@
 	trace_ufshcd_system_resume(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
 		hba->curr_dev_pwr_mode, hba->uic_link_state);
+	if (!ret)
+		hba->is_sys_suspended = false;
 	return ret;
 }
 EXPORT_SYMBOL(ufshcd_system_resume);
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index 1418545..bf9123f 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -31,13 +31,17 @@
 
 bool soc_is_brcmstb(void)
 {
+	const struct of_device_id *match;
 	struct device_node *root;
 
 	root = of_find_node_by_path("/");
 	if (!root)
 		return false;
 
-	return of_match_node(brcmstb_machine_match, root) != NULL;
+	match = of_match_node(brcmstb_machine_match, root);
+	of_node_put(root);
+
+	return match != NULL;
 }
 
 u32 brcmstb_get_family_id(void)
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 8cc0151..a4ac607 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1081,18 +1081,19 @@
 static irqreturn_t portal_isr(int irq, void *ptr)
 {
 	struct qman_portal *p = ptr;
-
-	u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
 	u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+	u32 clear = 0;
 
 	if (unlikely(!is))
 		return IRQ_NONE;
 
 	/* DQRR-handling if it's interrupt-driven */
-	if (is & QM_PIRQ_DQRI)
+	if (is & QM_PIRQ_DQRI) {
 		__poll_portal_fast(p, QMAN_POLL_LIMIT);
+		clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
+	}
 	/* Handling of anything else that's interrupt-driven */
-	clear |= __poll_portal_slow(p, is);
+	clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
 	qm_out(&p->p, QM_REG_ISR, clear);
 	return IRQ_HANDLED;
 }
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b18bdde..345ccf3 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -60,6 +60,23 @@
 	  the entities if corruption is suspected.
 	  If unsure, say N
 
+config QCOM_RUN_QUEUE_STATS
+       bool "Enable collection and exporting of QTI Run Queue stats to userspace"
+       help
+        This option enables the driver to periodically collecting the statistics
+        of kernel run queue information and calculate the load of the system.
+        This information is exported to usespace via sysfs entries and userspace
+        algorithms uses info and decide when to turn on/off the cpu cores.
+
+config MSM_QBT_HANDLER
+	bool "Event Handler for QTI Ultrasonic Fingerprint Sensor"
+	help
+	  This driver acts as a interrupt handler, where the interrupt is generated
+	  by the QTI Ultrasonic Fingerprint Sensor. It queues the events for each
+	  interrupt in an event queue and notifies the userspace to read the events
+	  from the queue. It also creates an input device to send key events such as
+	  KEY_POWER, KEY_HOME.
+
 config QCOM_GSBI
         tristate "QCOM General Serial Bus Interface"
         depends on ARCH_QCOM
@@ -386,6 +403,15 @@
 	  subsystems within the SoC about other subsystems' power-up/down
 	  state-changes.
 
+config MSM_SYSMON_QMI_COMM
+       bool "MSM System Monitor communication support using QMI transport"
+       depends on QCOM_QMI_HELPERS && MSM_SUBSYSTEM_RESTART
+       help
+         This option adds support for MSM System Monitor APIs using the
+         QMI layer. The APIs provided may be used for notifying
+         subsystems within the SoC about other subsystems' power-up/down
+         state-changes.
+
 config MSM_PIL_SSR_GENERIC
 	tristate "MSM Subsystem Boot Support"
 	depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 8d0792a..f171fe6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,4 +1,3 @@
-# SPDX-License-Identifier: GPL-2.0
 CFLAGS_rpmh-rsc.o := -I$(src)
 obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
 obj-$(CONFIG_QCOM_GLINK_SSR) +=	glink_ssr.o
@@ -41,7 +40,8 @@
 obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
 obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o
 obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
-obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
+obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o
+obj-$(CONFIG_MSM_SYSMON_QMI_COMM) += sysmon-qmi.o
 obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
 obj-$(CONFIG_MEM_SHARE_QMI_SERVICE)		+= memshare/
 obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
@@ -49,6 +49,7 @@
 obj-$(CONFIG_MSM_CDSP_LOADER) += qdsp6v2/
 obj-$(CONFIG_MSM_JTAGV8) += jtagv8.o jtagv8-etm.o
 obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
+obj-$(CONFIG_QCOM_RUN_QUEUE_STATS) += rq_stats.o
 
 ifdef CONFIG_MSM_SUBSYSTEM_RESTART
 	obj-y += subsystem_notif.o
@@ -65,6 +66,7 @@
 obj-$(CONFIG_QCOM_GLINK) += glink_probe.o
 obj-$(CONFIG_QCOM_GLINK_PKT) += glink_pkt.o
 obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
+obj-$(CONFIG_MSM_QBT_HANDLER) += qbt_handler.o
 obj-$(CONFIG_QSEE_IPC_IRQ) += qsee_ipc_irq.o
 obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
 obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 7101500..c074d79 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -145,6 +145,7 @@
 	uint32_t		nr_config[DCC_MAX_LINK_LIST];
 	uint8_t			curr_list;
 	uint8_t			cti_trig;
+	uint8_t			loopoff;
 };
 
 static bool dcc_ready(struct dcc_drvdata *drvdata)
@@ -250,7 +251,6 @@
 				/* write new offset = 1 to continue
 				 * processing the list
 				 */
-				link |= ((0x1 << 8) & BM(8, 14));
 				dcc_sram_writel(drvdata, link, sram_offset);
 				sram_offset += 4;
 				/* Reset link and prev_off */
@@ -283,7 +283,8 @@
 
 			if (loop_start) {
 				loop = (sram_offset - loop_off) / 4;
-				loop |= (loop_cnt << 13) & BM(13, 27);
+				loop |= (loop_cnt << drvdata->loopoff) &
+					BM(drvdata->loopoff, 27);
 				loop |= DCC_LOOP_DESCRIPTOR;
 				total_len += (total_len - loop_len) * loop_cnt;
 
@@ -315,7 +316,6 @@
 				/* write new offset = 1 to continue
 				 * processing the list
 				 */
-				link |= ((0x1 << 8) & BM(8, 14));
 				dcc_sram_writel(drvdata, link, sram_offset);
 				sram_offset += 4;
 				/* Reset link and prev_off */
@@ -1624,6 +1624,8 @@
 	if (ret)
 		return -EINVAL;
 
+	drvdata->loopoff = get_bitmask_order((drvdata->ram_size +
+				drvdata->ram_offset) / 4 - 1);
 	mutex_init(&drvdata->mutex);
 
 	for (i = 0; i < DCC_MAX_LINK_LIST; i++) {
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index 0304fcc..8fae3f1 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -1,20 +1,25 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/rtnetlink.h>
 #include <net/pkt_sched.h>
 #include <linux/soc/qcom/qmi.h>
 #include <soc/qcom/rmnet_qmi.h>
+#include <soc/qcom/qmi_rmnet.h>
 
 #include "qmi_rmnet_i.h"
 #define CREATE_TRACE_POINTS
 #include <trace/events/dfc.h>
 
+#define DFC_MASK_TCP_BIDIR 0x1
+#define DFC_MASK_RAT_SWITCH 0x2
+#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
+#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
+
 #define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
 
-#define DFC_MAX_BEARERS_V01 16
 #define DFC_MAX_QOS_ID_V01 2
 
 #define DFC_ACK_TYPE_DISABLE 1
@@ -50,17 +55,20 @@
 	struct work_struct svc_arrive;
 	struct qmi_handle handle;
 	struct sockaddr_qrtr ssctl;
+	struct svc_info svc;
+	struct work_struct qmi_ind_work;
+	struct list_head qmi_ind_q;
+	spinlock_t qmi_ind_lock;
 	int index;
 	int restart_state;
 };
 
 static void dfc_svc_init(struct work_struct *work);
-static void dfc_do_burst_flow_control(struct work_struct *work);
 
 /* **************************************************** */
 #define DFC_SERVICE_ID_V01 0x4E
 #define DFC_SERVICE_VERS_V01 0x01
-#define DFC_TIMEOUT_MS 10000
+#define DFC_TIMEOUT_JF msecs_to_jiffies(1000)
 
 #define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020
 #define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020
@@ -75,6 +83,11 @@
 #define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
 #define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 540
 
+#define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023
+#define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023
+#define QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN 20
+#define QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN 543
+
 struct dfc_bind_client_req_msg_v01 {
 	u8 ep_id_valid;
 	struct data_ep_id_type_v01 ep_id;
@@ -298,9 +311,21 @@
 	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
 };
 
+struct dfc_get_flow_status_req_msg_v01 {
+	u8 bearer_id_list_valid;
+	u8 bearer_id_list_len;
+	u8 bearer_id_list[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_get_flow_status_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 flow_status_valid;
+	u8 flow_status_len;
+	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
+};
+
 struct dfc_svc_ind {
-	struct work_struct work;
-	struct dfc_qmi_data *data;
+	struct list_head list;
 	struct dfc_flow_status_ind_msg_v01 dfc_info;
 };
 
@@ -497,6 +522,100 @@
 	},
 };
 
+static struct qmi_elem_info dfc_get_flow_status_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_req_msg_v01,
+					   bearer_id_list_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_req_msg_v01,
+					   bearer_id_list_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(u8),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_req_msg_v01,
+					   bearer_id_list),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   flow_status_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   flow_status_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(struct
+					 dfc_flow_status_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   flow_status),
+		.ei_array	= dfc_flow_status_info_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
 static int
 dfc_bind_client_req(struct qmi_handle *dfc_handle,
 		    struct sockaddr_qrtr *ssctl, struct svc_info *svc)
@@ -538,7 +657,7 @@
 		goto out;
 	}
 
-	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS);
+	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
 	if (ret < 0) {
 		pr_err("%s() Response waiting failed, err: %d\n",
 			__func__, ret);
@@ -594,7 +713,7 @@
 		goto out;
 	}
 
-	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS);
+	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
 	if (ret < 0) {
 		pr_err("%s() Response waiting failed, err: %d\n",
 			__func__, ret);
@@ -610,12 +729,65 @@
 	return ret;
 }
 
-static int dfc_init_service(struct dfc_qmi_data *data, struct qmi_info *qmi)
+static int
+dfc_get_flow_status_req(struct qmi_handle *dfc_handle,
+			struct sockaddr_qrtr *ssctl,
+			struct dfc_get_flow_status_resp_msg_v01 *resp)
+{
+	struct dfc_get_flow_status_req_msg_v01 *req;
+	struct qmi_txn *txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	txn = kzalloc(sizeof(*txn), GFP_ATOMIC);
+	if (!txn) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(dfc_handle, txn,
+			   dfc_get_flow_status_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(dfc_handle, ssctl, txn,
+			       QMI_DFC_GET_FLOW_STATUS_REQ_V01,
+			       QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN,
+			       dfc_get_flow_status_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(txn);
+		pr_err("%s() Failed sending request, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(txn, DFC_TIMEOUT_JF);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, err: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(txn);
+	kfree(req);
+	return ret;
+}
+
+static int dfc_init_service(struct dfc_qmi_data *data)
 {
 	int rc;
 
-	rc = dfc_bind_client_req(&data->handle, &data->ssctl,
-				 &qmi->fc_info[data->index].svc);
+	rc = dfc_bind_client_req(&data->handle, &data->ssctl, &data->svc);
 	if (rc < 0)
 		return rc;
 
@@ -666,21 +838,18 @@
 			       struct rmnet_bearer_map *bearer,
 			       struct qos_info *qos)
 {
-	struct list_head *p;
 	struct rmnet_flow_map *itm;
 	int rc = 0, qlen;
 	int enable;
 
 	enable = bearer->grant_size ? 1 : 0;
 
-	list_for_each(p, &qos->flow_head) {
-		itm = list_entry(p, struct rmnet_flow_map, list);
-
+	list_for_each_entry(itm, &qos->flow_head, list) {
 		if (itm->bearer_id == bearer->bearer_id) {
 			/*
 			 * Do not flow disable ancillary q if ancillary is true
 			 */
-			if (bearer->ancillary && enable == 0 &&
+			if (bearer->tcp_bidir && enable == 0 &&
 					DFC_IS_ANCILLARY(itm->ip_type))
 				continue;
 
@@ -705,36 +874,39 @@
 				struct qos_info *qos, u8 ack_req, u32 ancillary,
 				struct dfc_flow_status_info_type_v01 *fc_info)
 {
-	struct list_head *p;
-	struct rmnet_bearer_map *bearer_itm = NULL;
-	int enable;
+	struct rmnet_bearer_map *bearer_itm;
+	struct rmnet_flow_map *flow_itm;
+	int rc = 0, qlen;
+	bool enable;
 
-	list_for_each(p, &qos->bearer_head) {
-		bearer_itm = list_entry(p, struct rmnet_bearer_map, list);
+	enable = fc_info->num_bytes > 0 ? 1 : 0;
 
+	list_for_each_entry(bearer_itm, &qos->bearer_head, list) {
 		bearer_itm->grant_size = fc_info->num_bytes;
 		bearer_itm->grant_thresh =
 			qmi_rmnet_grant_per(bearer_itm->grant_size);
 		bearer_itm->seq = fc_info->seq_num;
 		bearer_itm->ack_req = ack_req;
-		bearer_itm->ancillary = ancillary;
+		bearer_itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+		bearer_itm->last_grant = fc_info->num_bytes;
+		bearer_itm->last_seq = fc_info->seq_num;
 	}
 
-	enable = fc_info->num_bytes > 0 ? 1 : 0;
-
-	if (enable)
-		netif_tx_wake_all_queues(dev);
-	else
-		netif_tx_stop_all_queues(dev);
-
-	trace_dfc_qmi_tc(dev->name, 0xFF, 0, fc_info->num_bytes, 0, 0, enable);
+	list_for_each_entry(flow_itm, &qos->flow_head, list) {
+		qlen = qmi_rmnet_flow_control(dev, flow_itm->tcm_handle,
+					      enable);
+		trace_dfc_qmi_tc(dev->name, flow_itm->bearer_id,
+				 flow_itm->flow_id, fc_info->num_bytes,
+				 qlen, flow_itm->tcm_handle, enable);
+		rc++;
+	}
 
 	if (enable == 0 && ack_req)
 		dfc_send_ack(dev, fc_info->bearer_id,
 			     fc_info->seq_num, fc_info->mux_id,
 			     DFC_ACK_TYPE_DISABLE);
 
-	return 0;
+	return rc;
 }
 
 static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
@@ -743,33 +915,42 @@
 {
 	struct rmnet_bearer_map *itm = NULL;
 	int rc = 0;
-	int action = -1;
+	bool action = false;
 
 	itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
 	if (itm) {
-		if (itm->grant_size == 0 && fc_info->num_bytes > 0)
-			action = 1;
-		else if (itm->grant_size > 0 && fc_info->num_bytes == 0)
-			action = 0;
+		/* The RAT switch flag indicates the start and end of
+		 * the switch. Ignore indications in between.
+		 */
+		if (DFC_IS_RAT_SWITCH(ancillary))
+			itm->rat_switch = !fc_info->num_bytes;
+		else
+			if (itm->rat_switch)
+				return 0;
+
+		if ((itm->grant_size == 0 && fc_info->num_bytes > 0) ||
+		    (itm->grant_size > 0 && fc_info->num_bytes == 0))
+			action = true;
 
 		itm->grant_size = fc_info->num_bytes;
 		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
 		itm->seq = fc_info->seq_num;
 		itm->ack_req = ack_req;
-		itm->ancillary = ancillary;
+		itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+		itm->last_grant = fc_info->num_bytes;
+		itm->last_seq = fc_info->seq_num;
 
-		if (action != -1)
+		if (action)
 			rc = dfc_bearer_flow_ctl(dev, itm, qos);
 	} else {
-		pr_debug("grant %u before flow activate\n", fc_info->num_bytes);
 		qos->default_grant = fc_info->num_bytes;
 	}
 	return rc;
 }
 
-static void dfc_do_burst_flow_control(struct work_struct *work)
+static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+				      struct dfc_svc_ind *svc_ind)
 {
-	struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work;
 	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->dfc_info;
 	struct net_device *dev;
 	struct qos_info *qos;
@@ -779,11 +960,6 @@
 	u32 ancillary;
 	int i, j;
 
-	if (unlikely(svc_ind->data->restart_state)) {
-		kfree(svc_ind);
-		return;
-	}
-
 	rcu_read_lock();
 
 	for (i = 0; i < ind->flow_status_len; i++) {
@@ -801,7 +977,7 @@
 			}
 		}
 
-		trace_dfc_flow_ind(svc_ind->data->index,
+		trace_dfc_flow_ind(dfc->index,
 				   i, flow_status->mux_id,
 				   flow_status->bearer_id,
 				   flow_status->num_bytes,
@@ -809,7 +985,7 @@
 				   ack_req,
 				   ancillary);
 
-		dev = rmnet_get_rmnet_dev(svc_ind->data->rmnet_port,
+		dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
 					  flow_status->mux_id);
 		if (!dev)
 			goto clean_out;
@@ -832,7 +1008,38 @@
 
 clean_out:
 	rcu_read_unlock();
-	kfree(svc_ind);
+}
+
+static void dfc_qmi_ind_work(struct work_struct *work)
+{
+	struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
+						qmi_ind_work);
+	struct dfc_svc_ind *svc_ind;
+	unsigned long flags;
+
+	if (!dfc)
+		return;
+
+	local_bh_disable();
+
+	do {
+		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+		svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q,
+						   struct dfc_svc_ind, list);
+		if (svc_ind)
+			list_del(&svc_ind->list);
+		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+		if (svc_ind) {
+			if (!dfc->restart_state)
+				dfc_do_burst_flow_control(dfc, svc_ind);
+			kfree(svc_ind);
+		}
+	} while (svc_ind != NULL);
+
+	local_bh_enable();
+
+	qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
 }
 
 static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
@@ -842,6 +1049,7 @@
 						handle);
 	struct dfc_flow_status_ind_msg_v01 *ind_msg;
 	struct dfc_svc_ind *svc_ind;
+	unsigned long flags;
 
 	if (qmi != &dfc->handle)
 		return;
@@ -858,13 +1066,13 @@
 		if (!svc_ind)
 			return;
 
-		INIT_WORK((struct work_struct *)svc_ind,
-			  dfc_do_burst_flow_control);
-
 		memcpy(&svc_ind->dfc_info, ind_msg, sizeof(*ind_msg));
-		svc_ind->data = dfc;
 
-		queue_work(dfc->dfc_wq, (struct work_struct *)svc_ind);
+		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+		list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
+		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+		queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
 	}
 }
 
@@ -875,25 +1083,32 @@
 						 svc_arrive);
 	struct qmi_info *qmi;
 
+	if (data->restart_state == 1)
+		return;
+
+	rc = dfc_init_service(data);
+	if (rc < 0) {
+		pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
+		return;
+	}
+
+	rtnl_lock();
 	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
-	if (!qmi)
-		goto clean_out;
+	if (!qmi) {
+		rtnl_unlock();
+		return;
+	}
 
-	rc = dfc_init_service(data, qmi);
-	if (rc < 0)
-		goto clean_out;
-
-	qmi->fc_info[data->index].dfc_client = (void *)data;
+	qmi->dfc_pending[data->index] = NULL;
+	qmi->dfc_clients[data->index] = (void *)data;
 	trace_dfc_client_state_up(data->index,
-				  qmi->fc_info[data->index].svc.instance,
-				  qmi->fc_info[data->index].svc.ep_type,
-				  qmi->fc_info[data->index].svc.iface_id);
-	return;
+				  data->svc.instance,
+				  data->svc.ep_type,
+				  data->svc.iface_id);
 
-clean_out:
-	qmi_handle_release(&data->handle);
-	destroy_workqueue(data->dfc_wq);
-	kfree(data);
+	rtnl_unlock();
+
+	pr_info("Connection established with the DFC Service\n");
 }
 
 static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
@@ -935,11 +1150,15 @@
 	{},
 };
 
-int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi)
+int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+			struct qmi_info *qmi)
 {
 	struct dfc_qmi_data *data;
 	int rc = -ENOMEM;
 
+	if (!port || !qmi)
+		return -EINVAL;
+
 	data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
@@ -947,6 +1166,11 @@
 	data->rmnet_port = port;
 	data->index = index;
 	data->restart_state = 0;
+	memcpy(&data->svc, psvc, sizeof(data->svc));
+
+	INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work);
+	INIT_LIST_HEAD(&data->qmi_ind_q);
+	spin_lock_init(&data->qmi_ind_lock);
 
 	data->dfc_wq = create_singlethread_workqueue("dfc_wq");
 	if (!data->dfc_wq) {
@@ -956,7 +1180,7 @@
 
 	INIT_WORK(&data->svc_arrive, dfc_svc_init);
 	rc = qmi_handle_init(&data->handle,
-			     QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN,
+			     QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN,
 			     &server_ops, qmi_indication_handler);
 	if (rc < 0) {
 		pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc);
@@ -965,12 +1189,14 @@
 
 	rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01,
 			    DFC_SERVICE_VERS_V01,
-			    qmi->fc_info[index].svc.instance);
+			    psvc->instance);
 	if (rc < 0) {
 		pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc);
 		goto err2;
 	}
 
+	qmi->dfc_pending[index] = (void *)data;
+
 	return 0;
 
 err2:
@@ -1049,8 +1275,45 @@
 	int i;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
-		dfc_data = (struct dfc_qmi_data *)(qmi->fc_info[i].dfc_client);
+		dfc_data = (struct dfc_qmi_data *)(qmi->dfc_clients[i]);
 		if (dfc_data)
 			flush_workqueue(dfc_data->dfc_wq);
 	}
 }
+
+void dfc_qmi_query_flow(void *dfc_data)
+{
+	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+	struct dfc_get_flow_status_resp_msg_v01 *resp;
+	struct dfc_svc_ind *svc_ind;
+	int rc;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp)
+		return;
+
+	svc_ind = kzalloc(sizeof(*svc_ind), GFP_ATOMIC);
+	if (!svc_ind) {
+		kfree(resp);
+		return;
+	}
+
+	if (!data)
+		goto done;
+
+	rc = dfc_get_flow_status_req(&data->handle, &data->ssctl, resp);
+
+	if (rc < 0 || !resp->flow_status_valid || resp->flow_status_len < 1 ||
+	    resp->flow_status_len > DFC_MAX_BEARERS_V01)
+		goto done;
+
+	svc_ind->dfc_info.flow_status_valid = resp->flow_status_valid;
+	svc_ind->dfc_info.flow_status_len = resp->flow_status_len;
+	memcpy(&svc_ind->dfc_info.flow_status, resp->flow_status,
+		sizeof(resp->flow_status[0]) * resp->flow_status_len);
+	dfc_do_burst_flow_control(data, svc_ind);
+
+done:
+	kfree(svc_ind);
+	kfree(resp);
+}
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index dad62a7..83970eb 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/sysfs.h>
 #include <linux/io.h>
+#include <linux/of.h>
 #include <linux/bitops.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
@@ -22,6 +23,7 @@
 #include <linux/serial.h>
 #include <linux/workqueue.h>
 #include <linux/power_supply.h>
+#include <soc/qcom/scm.h>
 
 #define EUD_ENABLE_CMD 1
 #define EUD_DISABLE_CMD 0
@@ -32,6 +34,7 @@
 #define EUD_REG_COM_RX_ID	0x000C
 #define EUD_REG_COM_RX_LEN	0x0010
 #define EUD_REG_COM_RX_DAT	0x0014
+#define EUD_REG_EUD_EN2		0x0000
 #define EUD_REG_INT1_EN_MASK	0x0024
 #define EUD_REG_INT_STATUS_1	0x0044
 #define EUD_REG_CTL_OUT_1	0x0074
@@ -65,6 +68,9 @@
 	struct extcon_dev		*extcon;
 	struct uart_port		port;
 	struct work_struct		eud_work;
+	struct power_supply		*batt_psy;
+	bool				secure_eud_en;
+	phys_addr_t			eud_mode_mgr2_phys_base;
 };
 
 static const unsigned int eud_extcon_cable[] = {
@@ -119,6 +125,14 @@
 		/* Enable vbus, chgr & safe mode warning interrupts */
 		writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
 				priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
+		/* Enable secure eud if supported */
+		if (priv->secure_eud_en) {
+			ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
+					   EUD_REG_EUD_EN2, EUD_ENABLE_CMD);
+			if (ret)
+				dev_err(&pdev->dev,
+				"scm_io_write failed with rc:%d\n", ret);
+		}
 
 		/* Ensure Register Writes Complete */
 		wmb();
@@ -142,10 +156,21 @@
 static void disable_eud(struct platform_device *pdev)
 {
 	struct eud_chip *priv = platform_get_drvdata(pdev);
+	int ret;
 
 	/* write into CSR to disable EUD */
 	writel_relaxed(0, priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
-	dev_dbg(&pdev->dev, "%s: EUD is Disabled\n", __func__);
+
+	/* Disable secure eud if supported */
+	if (priv->secure_eud_en) {
+		ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
+				   EUD_REG_EUD_EN2, EUD_DISABLE_CMD);
+		if (ret)
+			dev_err(&pdev->dev,
+			"scm_io_write failed with rc:%d\n", ret);
+	}
+
+	dev_dbg(&pdev->dev, "%s: EUD Disabled!\n", __func__);
 }
 
 static int param_eud_set(const char *val, const struct kernel_param *kp)
@@ -180,17 +205,33 @@
 
 module_param_cb(enable, &eud_param_ops, &enable, 0644);
 
+static bool is_batt_available(struct eud_chip *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
 static void eud_event_notifier(struct work_struct *eud_work)
 {
 	struct eud_chip *chip = container_of(eud_work, struct eud_chip,
 					eud_work);
+	union power_supply_propval pval;
 
 	if (chip->int_status == EUD_INT_VBUS)
 		extcon_set_state_sync(chip->extcon, chip->extcon_id,
 					chip->usb_attach);
-	else if (chip->int_status == EUD_INT_CHGR)
-		extcon_set_state_sync(chip->extcon, chip->extcon_id,
-					chip->chgr_enable);
+	else if (chip->int_status == EUD_INT_CHGR) {
+		if (is_batt_available(chip)) {
+			pval.intval = !chip->chgr_enable;
+			power_supply_set_property(chip->batt_psy,
+				POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+		}
+	}
 }
 
 static void usb_attach_detach(struct eud_chip *chip)
@@ -508,6 +549,22 @@
 
 	chip->eud_irq = platform_get_irq_byname(pdev, "eud_irq");
 
+	chip->secure_eud_en = of_property_read_bool(pdev->dev.of_node,
+			      "qcom,secure-eud-en");
+	if (chip->secure_eud_en) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "eud_mode_mgr2");
+		if (!res) {
+			dev_err(chip->dev,
+			"%s: failed to get resource eud_mode_mgr2\n",
+			__func__);
+			ret = -ENOMEM;
+			return ret;
+		}
+
+		chip->eud_mode_mgr2_phys_base = res->start;
+	}
+
 	ret = devm_request_irq(&pdev->dev, chip->eud_irq, handle_eud_irq,
 				IRQF_TRIGGER_HIGH, "eud_irq", chip);
 	if (ret) {
diff --git a/drivers/soc/qcom/fsa4480-i2c.c b/drivers/soc/qcom/fsa4480-i2c.c
index ca57325..707d249 100644
--- a/drivers/soc/qcom/fsa4480-i2c.c
+++ b/drivers/soc/qcom/fsa4480-i2c.c
@@ -150,20 +150,27 @@
 	dev_dbg(dev, "%s: setting GPIOs active = %d\n",
 		__func__, mode.intval != POWER_SUPPLY_TYPEC_NONE);
 
-	if (mode.intval != POWER_SUPPLY_TYPEC_NONE) {
+	switch (mode.intval) {
+	/* add all modes FSA should notify for in here */
+	case POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER:
 		/* activate switches */
 		fsa4480_usbc_update_settings(fsa_priv, 0x00, 0x9F);
 
 		/* notify call chain on event */
 		blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
-		POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER, NULL);
-	} else {
+		mode.intval, NULL);
+		break;
+	case POWER_SUPPLY_TYPEC_NONE:
 		/* notify call chain on event */
 		blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
 				POWER_SUPPLY_TYPEC_NONE, NULL);
 
 		/* deactivate switches */
 		fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
+		break;
+	default:
+		/* ignore other usb connection modes */
+		break;
 	}
 
 done:
diff --git a/drivers/soc/qcom/llcc-kona.c b/drivers/soc/qcom/llcc-kona.c
index 35245bf..dbbd79f 100644
--- a/drivers/soc/qcom/llcc-kona.c
+++ b/drivers/soc/qcom/llcc-kona.c
@@ -56,7 +56,7 @@
 	SCT_ENTRY(LLCC_AUDIO,    6, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0),
 	SCT_ENTRY(LLCC_CMPT,    10, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0),
 	SCT_ENTRY(LLCC_GPUHTW,  11, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_GPU,     12, 2560, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_GPU,     12, 2048, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1, 0),
 	SCT_ENTRY(LLCC_MMUHWT,  13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1),
 	SCT_ENTRY(LLCC_CMPTDMA, 15, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 	SCT_ENTRY(LLCC_DISP,    16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
@@ -76,7 +76,7 @@
 }
 
 static const struct of_device_id kona_qcom_llcc_of_match[] = {
-	{ .compatible = "qcom,kona-llcc", },
+	{ .compatible = "qcom,llcc-v2", },
 	{ },
 };
 
diff --git a/drivers/soc/qcom/llcc-lito.c b/drivers/soc/qcom/llcc-lito.c
index dad23bf..d2fecb3 100644
--- a/drivers/soc/qcom/llcc-lito.c
+++ b/drivers/soc/qcom/llcc-lito.c
@@ -78,7 +78,7 @@
 }
 
 static const struct of_device_id lito_qcom_llcc_of_match[] = {
-	{ .compatible = "qcom,lito-llcc", },
+	{ .compatible = "qcom,llcc-v1", },
 	{ },
 };
 
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index 6a82b5d..833f047 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -42,6 +42,10 @@
 #define LLCC_TRP_ATTR0_CFGn(n)        (0x21000 + SZ_8 * n)
 #define LLCC_TRP_ATTR1_CFGn(n)        (0x21004 + SZ_8 * n)
 
+#define LLCC_TRP_C_AS_NC	      0x21F90
+#define LLCC_TRP_NC_AS_C	      0x21F94
+#define LLCC_FEAC_C_AS_NC	      0x35030
+#define LLCC_FEAC_NC_AS_C	      0x35034
 #define LLCC_TRP_WRSC_EN              0x21F20
 #define LLCC_WRSC_SCID_EN(n)          BIT(n)
 
@@ -232,15 +236,44 @@
 	u32 sz;
 	u32 pcb = 0;
 	u32 cad = 0;
+	u32 wren = 0;
 	int ret = 0;
 	const struct llcc_slice_config *llcc_table;
 	struct llcc_slice_desc desc;
 	bool cap_based_alloc_and_pwr_collapse =
 		drv_data->cap_based_alloc_and_pwr_collapse;
+	uint32_t mask = ~0;
+	int v2_ver = of_device_is_compatible(pdev->dev.of_node,
+							 "qcom,llcc-v2");
 
 	sz = drv_data->cfg_size;
 	llcc_table = drv_data->cfg;
 
+	/* Disable the Cache as Non-Cache override and enable
+	 * the Non-Cache as Cache override
+	 */
+	if (v2_ver) {
+		ret  = regmap_write(drv_data->bcast_regmap,
+						 LLCC_TRP_C_AS_NC, 0);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(drv_data->bcast_regmap,
+						 LLCC_TRP_NC_AS_C, mask);
+		if (ret)
+			return ret;
+	} else {
+		ret  = regmap_write(drv_data->bcast_regmap,
+						 LLCC_FEAC_C_AS_NC, 0);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(drv_data->bcast_regmap,
+						 LLCC_FEAC_NC_AS_C, mask);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < sz; i++) {
 		attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
 		attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
@@ -277,10 +310,11 @@
 		if (ret)
 			return ret;
 
-		if (llcc_table[i].write_scid_en) {
+		if (v2_ver) {
+			wren |= llcc_table[i].write_scid_en <<
+						llcc_table[i].slice_id;
 			ret = regmap_write(drv_data->bcast_regmap,
-				LLCC_TRP_WRSC_EN,
-				LLCC_WRSC_SCID_EN(llcc_table[i].slice_id));
+				LLCC_TRP_WRSC_EN, wren);
 			if (ret)
 				return ret;
 		}
@@ -288,13 +322,17 @@
 		if (cap_based_alloc_and_pwr_collapse) {
 			cad |= llcc_table[i].dis_cap_alloc <<
 				llcc_table[i].slice_id;
-			regmap_write(drv_data->bcast_regmap,
+			ret = regmap_write(drv_data->bcast_regmap,
 					LLCC_TRP_SCID_DIS_CAP_ALLOC, cad);
+			if (ret)
+				return ret;
 
 			pcb |= llcc_table[i].retain_on_pc <<
 					llcc_table[i].slice_id;
-			regmap_write(drv_data->bcast_regmap,
-					LLCC_TRP_PCB_ACT, pcb);
+			ret = regmap_write(drv_data->bcast_regmap,
+						LLCC_TRP_PCB_ACT, pcb);
+			if (ret)
+				return ret;
 		}
 
 		if (llcc_table[i].activate_on_init) {
@@ -380,8 +418,10 @@
 	platform_set_drvdata(pdev, drv_data);
 
 	ret = qcom_llcc_cfg_program(pdev);
-	if (ret)
+	if (ret) {
+		pr_err("llcc configuration failed!!\n");
 		return ret;
+	}
 
 	drv_data->ecc_irq = platform_get_irq(pdev, 0);
 	llcc_edac = platform_device_register_data(&pdev->dev,
diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c
index 02fdf01..711c3c5 100644
--- a/drivers/soc/qcom/mem-offline.c
+++ b/drivers/soc/qcom/mem-offline.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/memory.h>
@@ -92,7 +92,7 @@
 
 	pkt.size = MAX_LEN;
 	pkt.data = mbox_msg;
-	return mbox_send_message(mailbox.mbox, &pkt);
+	return (mbox_send_message(mailbox.mbox, &pkt) < 0);
 }
 
 static int mem_event_callback(struct notifier_block *self,
@@ -346,8 +346,9 @@
 
 	mailbox.mbox = mbox_request_channel(&mailbox.cl, 0);
 	if (IS_ERR(mailbox.mbox)) {
-		pr_err("mem-offline: failed to get mailbox channel %pK %d\n",
-			mailbox.mbox, PTR_ERR(mailbox.mbox));
+		if (PTR_ERR(mailbox.mbox) != -EPROBE_DEFER)
+			pr_err("mem-offline: failed to get mailbox channel %pK %d\n",
+				mailbox.mbox, PTR_ERR(mailbox.mbox));
 		return PTR_ERR(mailbox.mbox);
 	}
 
@@ -363,8 +364,9 @@
 {
 	int ret;
 
-	if (mem_parse_dt(pdev))
-		return -ENODEV;
+	ret = mem_parse_dt(pdev);
+	if (ret)
+		return ret;
 
 	ret = mem_online_remaining_blocks();
 	if (ret < 0)
diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile
index a217fd4..a764258 100644
--- a/drivers/soc/qcom/msm_bus/Makefile
+++ b/drivers/soc/qcom/msm_bus/Makefile
@@ -8,7 +8,7 @@
 
 ifdef CONFIG_QCOM_BUS_CONFIG_RPMH
 	obj-y += msm_bus_fabric_rpmh.o msm_bus_arb_rpmh.o msm_bus_rules.o \
-		msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o
+		msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o msm_bus_proxy_client.o
 	obj-$(CONFIG_OF) += msm_bus_of_rpmh.o
 else
 	obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o \
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 73f4258..83b1737 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -272,7 +272,6 @@
 
 	cmd->addr = cur_bcm->bcmdev->addr;
 	cmd->data = BCM_TCS_CMD(commit, valid, vec_a, vec_b);
-	cmd->wait = commit;
 
 	return ret;
 }
@@ -320,6 +319,7 @@
 			commit = false;
 			if (list_is_last(&cur_bcm->link,
 						&cur_bcm_clist[i])) {
+				cmdlist_active[k].wait = true;
 				commit = true;
 				idx++;
 			}
@@ -369,6 +369,11 @@
 				idx++;
 			}
 
+			if (cur_rsc->node_info->id == MSM_BUS_RSC_DISP) {
+				cmdlist_wake[last_tcs].wait = false;
+				cmdlist_sleep[last_tcs].wait = false;
+			}
+
 			tcs_cmd_gen(cur_bcm, &cmdlist_wake[k],
 				cur_bcm->node_vec[ACTIVE_CTX].vec_a,
 				cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c b/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c
new file mode 100644
index 0000000..d3ca18f
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/msm-bus.h>
+
+struct proxy_client {
+	struct msm_bus_scale_pdata *pdata;
+	unsigned int client_handle;
+};
+
+static struct proxy_client proxy_client_info;
+
+static int msm_bus_device_proxy_client_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	proxy_client_info.pdata = msm_bus_cl_get_pdata(pdev);
+
+	if (!proxy_client_info.pdata)
+		return 0;
+
+	proxy_client_info.client_handle =
+		msm_bus_scale_register_client(proxy_client_info.pdata);
+
+	if (!proxy_client_info.client_handle) {
+		dev_err(&pdev->dev, "Unable to register bus client\n");
+		return -ENODEV;
+	}
+
+	ret = msm_bus_scale_client_update_request(
+					proxy_client_info.client_handle, 1);
+	if (ret)
+		dev_err(&pdev->dev, "Bandwidth update failed (%d)\n", ret);
+
+	return ret;
+}
+
+static const struct of_device_id proxy_client_match[] = {
+	{.compatible = "qcom,bus-proxy-client"},
+	{}
+};
+
+static struct platform_driver msm_bus_proxy_client_driver = {
+	.probe = msm_bus_device_proxy_client_probe,
+	.driver = {
+		.name = "msm_bus_proxy_client_device",
+		.of_match_table = proxy_client_match,
+	},
+};
+
+static int __init msm_bus_proxy_client_init_driver(void)
+{
+	int rc;
+
+	rc =  platform_driver_register(&msm_bus_proxy_client_driver);
+	if (rc) {
+		pr_err("Failed to register proxy client device driver\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __init msm_bus_proxy_client_unvote(void)
+{
+	int ret;
+
+	if (!proxy_client_info.pdata || !proxy_client_info.client_handle)
+		return 0;
+
+	ret = msm_bus_scale_client_update_request(
+					proxy_client_info.client_handle, 0);
+	if (ret)
+		pr_err("%s: bandwidth update request failed (%d)\n",
+			__func__, ret);
+
+	msm_bus_scale_unregister_client(proxy_client_info.client_handle);
+
+	return 0;
+}
+
+subsys_initcall_sync(msm_bus_proxy_client_init_driver);
+late_initcall_sync(msm_bus_proxy_client_unvote);
diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c
index d4b772c..4c98eec 100644
--- a/drivers/soc/qcom/msm_performance.c
+++ b/drivers/soc/qcom/msm_performance.c
@@ -31,7 +31,7 @@
 	unsigned int min;
 	unsigned int max;
 };
-static DEFINE_PER_CPU(struct cpu_status, cpu_stats);
+static DEFINE_PER_CPU(struct cpu_status, msm_perf_cpu_stats);
 
 struct events {
 	spinlock_t cpu_hotplug_lock;
@@ -69,7 +69,7 @@
 		if (cpu > (num_present_cpus() - 1))
 			return -EINVAL;
 
-		i_cpu_stats = &per_cpu(cpu_stats, cpu);
+		i_cpu_stats = &per_cpu(msm_perf_cpu_stats, cpu);
 
 		i_cpu_stats->min = val;
 		cpumask_set_cpu(cpu, limit_mask);
@@ -87,7 +87,7 @@
 	 */
 	get_online_cpus();
 	for_each_cpu(i, limit_mask) {
-		i_cpu_stats = &per_cpu(cpu_stats, i);
+		i_cpu_stats = &per_cpu(msm_perf_cpu_stats, i);
 
 		if (cpufreq_get_policy(&policy, i))
 			continue;
@@ -109,7 +109,8 @@
 
 	for_each_present_cpu(cpu) {
 		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
-				"%d:%u ", cpu, per_cpu(cpu_stats, cpu).min);
+				"%d:%u ", cpu,
+				per_cpu(msm_perf_cpu_stats, cpu).min);
 	}
 	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
 	return cnt;
@@ -145,7 +146,7 @@
 		if (cpu > (num_present_cpus() - 1))
 			return -EINVAL;
 
-		i_cpu_stats = &per_cpu(cpu_stats, cpu);
+		i_cpu_stats = &per_cpu(msm_perf_cpu_stats, cpu);
 
 		i_cpu_stats->max = val;
 		cpumask_set_cpu(cpu, limit_mask);
@@ -156,7 +157,7 @@
 
 	get_online_cpus();
 	for_each_cpu(i, limit_mask) {
-		i_cpu_stats = &per_cpu(cpu_stats, i);
+		i_cpu_stats = &per_cpu(msm_perf_cpu_stats, i);
 		if (cpufreq_get_policy(&policy, i))
 			continue;
 
@@ -177,7 +178,8 @@
 
 	for_each_present_cpu(cpu) {
 		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
-				"%d:%u ", cpu, per_cpu(cpu_stats, cpu).max);
+				"%d:%u ", cpu,
+				per_cpu(msm_perf_cpu_stats, cpu).max);
 	}
 	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
 	return cnt;
@@ -245,7 +247,7 @@
 {
 	struct cpufreq_policy *policy = data;
 	unsigned int cpu = policy->cpu;
-	struct cpu_status *cpu_st = &per_cpu(cpu_stats, cpu);
+	struct cpu_status *cpu_st = &per_cpu(msm_perf_cpu_stats, cpu);
 	unsigned int min = cpu_st->min, max = cpu_st->max;
 
 
@@ -444,7 +446,7 @@
 	cpufreq_register_notifier(&perf_cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
 
 	for_each_present_cpu(cpu)
-		per_cpu(cpu_stats, cpu).max = UINT_MAX;
+		per_cpu(msm_perf_cpu_stats, cpu).max = UINT_MAX;
 
 	rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE,
 		"msm_performance_cpu_hotplug",
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 7f63c1f..0b8b7d1 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -1121,6 +1121,7 @@
 	}
 
 	trace_pil_event("before_auth_reset", desc);
+	notify_before_auth_and_reset(desc->dev);
 	ret = desc->ops->auth_and_reset(desc);
 	if (ret) {
 		pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
diff --git a/drivers/soc/qcom/qbt_handler.c b/drivers/soc/qcom/qbt_handler.c
new file mode 100644
index 0000000..0c93e1b
--- /dev/null
+++ b/drivers/soc/qcom/qbt_handler.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define DEBUG
+#define pr_fmt(fmt) "qbt:%s: " fmt, __func__
+
+#include <linux/input.h>
+#include <linux/ktime.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/of_gpio.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <uapi/linux/qbt_handler.h>
+
+#define QBT_DEV "qbt"
+#define MAX_FW_EVENTS 128
+#define MINOR_NUM_FD 0
+#define MINOR_NUM_IPC 1
+#define QBT_INPUT_DEV_NAME "qbt_key_input"
+#define QBT_INPUT_DEV_VERSION 0x0100
+
+struct finger_detect_gpio {
+	int gpio;
+	int active_low;
+	int irq;
+	struct work_struct work;
+	int last_gpio_state;
+	int event_reported;
+	bool irq_enabled;
+};
+
+struct fw_event_desc {
+	enum qbt_fw_event ev;
+};
+
+struct fw_ipc_info {
+	int gpio;
+	int irq;
+	bool irq_enabled;
+	struct work_struct work;
+};
+
+struct qbt_drvdata {
+	struct class	*qbt_class;
+	struct cdev	qbt_fd_cdev;
+	struct cdev	qbt_ipc_cdev;
+	struct input_dev	*in_dev;
+	struct device	*dev;
+	char		*qbt_node;
+	atomic_t	fd_available;
+	atomic_t	ipc_available;
+	struct mutex	mutex;
+	struct mutex	fd_events_mutex;
+	struct mutex	ipc_events_mutex;
+	struct fw_ipc_info	fw_ipc;
+	struct finger_detect_gpio fd_gpio;
+	DECLARE_KFIFO(fd_events, struct fw_event_desc, MAX_FW_EVENTS);
+	DECLARE_KFIFO(ipc_events, struct fw_event_desc, MAX_FW_EVENTS);
+	wait_queue_head_t read_wait_queue_fd;
+	wait_queue_head_t read_wait_queue_ipc;
+	bool is_wuhb_connected;
+};
+
+/**
+ * qbt_open() - Function called when user space opens device.
+ * Successful if driver not currently open.
+ * @inode:	ptr to inode object
+ * @file:	ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_open(struct inode *inode, struct file *file)
+{
+	struct qbt_drvdata *drvdata = NULL;
+	int rc = 0;
+	int minor_no = -1;
+
+	if (!inode || !inode->i_cdev || !file) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+	minor_no = iminor(inode);
+	if (minor_no == MINOR_NUM_FD) {
+		drvdata = container_of(inode->i_cdev,
+				struct qbt_drvdata, qbt_fd_cdev);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		drvdata = container_of(inode->i_cdev,
+				struct qbt_drvdata, qbt_ipc_cdev);
+	} else {
+		pr_err("Invalid minor number\n");
+		return -EINVAL;
+	}
+
+	file->private_data = drvdata;
+
+	pr_debug("entry minor_no=%d\n", minor_no);
+
+	/* disallowing concurrent opens */
+	if (minor_no == MINOR_NUM_FD &&
+			!atomic_dec_and_test(&drvdata->fd_available)) {
+		atomic_inc(&drvdata->fd_available);
+		rc = -EBUSY;
+	} else if (minor_no == MINOR_NUM_IPC &&
+			!atomic_dec_and_test(&drvdata->ipc_available)) {
+		atomic_inc(&drvdata->ipc_available);
+		rc = -EBUSY;
+	}
+
+	pr_debug("exit : %d\n", rc);
+	return rc;
+}
+
+/**
+ * qbt_release() - Function called when user space closes device.
+
+ * @inode:	ptr to inode object
+ * @file:	ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_release(struct inode *inode, struct file *file)
+{
+	struct qbt_drvdata *drvdata;
+	int minor_no = -1;
+
+	if (!file || !file->private_data || !inode) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+	drvdata = file->private_data;
+	minor_no = iminor(inode);
+	if (minor_no == MINOR_NUM_FD) {
+		atomic_inc(&drvdata->fd_available);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		atomic_inc(&drvdata->ipc_available);
+	} else {
+		pr_err("Invalid minor number\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * qbt_ioctl() - Function called when user space calls ioctl.
+ * @file:	struct file - not used
+ * @cmd:	cmd identifier such as QBT_IS_WUHB_CONNECTED
+ * @arg:	ptr to relevant structe: either qbt_app or
+ *              qbt_send_tz_cmd depending on which cmd is passed
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static long qbt_ioctl(
+		struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int rc = 0;
+	void __user *priv_arg = (void __user *)arg;
+	struct qbt_drvdata *drvdata;
+
+	if (!file || !file->private_data) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+
+	drvdata = file->private_data;
+
+	if (IS_ERR(priv_arg)) {
+		dev_err(drvdata->dev, "%s: invalid user space pointer %lu\n",
+			__func__, arg);
+		return -EINVAL;
+	}
+
+	mutex_lock(&drvdata->mutex);
+
+	pr_debug("cmd received %d\n", cmd);
+
+	switch (cmd) {
+	case QBT_ENABLE_IPC:
+	{
+		if (!drvdata->fw_ipc.irq_enabled) {
+			enable_irq(drvdata->fw_ipc.irq);
+			drvdata->fw_ipc.irq_enabled = true;
+			pr_debug("%s: QBT_ENABLE_IPC\n", __func__);
+		}
+		break;
+	}
+	case QBT_DISABLE_IPC:
+	{
+		if (drvdata->fw_ipc.irq_enabled) {
+			disable_irq(drvdata->fw_ipc.irq);
+			drvdata->fw_ipc.irq_enabled = false;
+			pr_debug("%s: QBT_DISABLE_IPC\n", __func__);
+		}
+		break;
+	}
+	case QBT_ENABLE_FD:
+	{
+		if (drvdata->is_wuhb_connected &&
+				!drvdata->fd_gpio.irq_enabled) {
+			enable_irq(drvdata->fd_gpio.irq);
+			drvdata->fd_gpio.irq_enabled = true;
+			pr_debug("%s: QBT_ENABLE_FD\n", __func__);
+		}
+		break;
+	}
+	case QBT_DISABLE_FD:
+	{
+		if (drvdata->is_wuhb_connected &&
+				drvdata->fd_gpio.irq_enabled) {
+			disable_irq(drvdata->fd_gpio.irq);
+			drvdata->fd_gpio.irq_enabled = false;
+			pr_debug("%s: QBT_DISABLE_FD\n", __func__);
+		}
+		break;
+	}
+	case QBT_IS_WUHB_CONNECTED:
+	{
+		struct qbt_wuhb_connected_status wuhb_connected_status;
+
+		wuhb_connected_status.is_wuhb_connected =
+				drvdata->is_wuhb_connected;
+		rc = copy_to_user((void __user *)priv_arg,
+				&wuhb_connected_status,
+				sizeof(wuhb_connected_status));
+
+		if (rc != 0) {
+			pr_err("Failed to copy wuhb connected status: %d\n",
+					rc);
+			rc = -EFAULT;
+			goto end;
+		}
+
+		break;
+	}
+	case QBT_SEND_KEY_EVENT:
+	{
+		struct qbt_key_event key_event;
+
+		if (copy_from_user(&key_event, priv_arg,
+			sizeof(key_event))
+				!= 0) {
+			rc = -EFAULT;
+			pr_err("failed copy from user space %d\n", rc);
+			goto end;
+		}
+
+		input_event(drvdata->in_dev, EV_KEY,
+				key_event.key, key_event.value);
+		input_sync(drvdata->in_dev);
+		break;
+	}
+	default:
+		pr_err("invalid cmd %d\n", cmd);
+		rc = -ENOIOCTLCMD;
+		goto end;
+	}
+
+end:
+	mutex_unlock(&drvdata->mutex);
+	return rc;
+}
+
+static int get_events_fifo_len_locked(
+		struct qbt_drvdata *drvdata, int minor_no)
+{
+	int len = 0;
+
+	if (minor_no == MINOR_NUM_FD) {
+		mutex_lock(&drvdata->fd_events_mutex);
+		len = kfifo_len(&drvdata->fd_events);
+		mutex_unlock(&drvdata->fd_events_mutex);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		mutex_lock(&drvdata->ipc_events_mutex);
+		len = kfifo_len(&drvdata->ipc_events);
+		mutex_unlock(&drvdata->ipc_events_mutex);
+	}
+
+	return len;
+}
+
+static ssize_t qbt_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	struct fw_event_desc fw_event;
+	struct qbt_drvdata *drvdata;
+	wait_queue_head_t *read_wait_queue = NULL;
+	int rc = 0;
+	int minor_no = -1;
+	int fifo_len;
+
+	pr_debug("entry with numBytes = %zd, minor_no = %d\n", cnt, minor_no);
+
+	if (!filp || !filp->private_data) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+	drvdata = filp->private_data;
+
+	if (cnt < sizeof(fw_event.ev)) {
+		pr_err("Num bytes to read is too small\n");
+		return -EINVAL;
+	}
+
+	minor_no = iminor(filp->f_path.dentry->d_inode);
+	if (minor_no == MINOR_NUM_FD) {
+		read_wait_queue = &drvdata->read_wait_queue_fd;
+	} else if (minor_no == MINOR_NUM_IPC) {
+		read_wait_queue = &drvdata->read_wait_queue_ipc;
+	} else {
+		pr_err("Invalid minor number\n");
+		return -EINVAL;
+	}
+
+	fifo_len = get_events_fifo_len_locked(drvdata, minor_no);
+	while (fifo_len == 0) {
+		if (filp->f_flags & O_NONBLOCK) {
+			pr_debug("fw_events fifo: empty, returning\n");
+			return -EAGAIN;
+		}
+		pr_debug("fw_events fifo: empty, waiting\n");
+		if (wait_event_interruptible(*read_wait_queue,
+				(get_events_fifo_len_locked(
+				drvdata, minor_no) > 0)))
+			return -ERESTARTSYS;
+		fifo_len = get_events_fifo_len_locked(drvdata, minor_no);
+	}
+
+	if (minor_no == MINOR_NUM_FD) {
+		mutex_lock(&drvdata->fd_events_mutex);
+		rc = kfifo_get(&drvdata->fd_events, &fw_event);
+		mutex_unlock(&drvdata->fd_events_mutex);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		mutex_lock(&drvdata->ipc_events_mutex);
+		rc = kfifo_get(&drvdata->ipc_events, &fw_event);
+		mutex_unlock(&drvdata->ipc_events_mutex);
+	} else {
+		pr_err("Invalid minor number\n");
+	}
+
+	if (!rc) {
+		pr_err("fw_events fifo: unexpectedly empty\n");
+		return -EINVAL;
+	}
+
+	pr_debug("Firmware event %d at minor no %d read at time %lu uS\n",
+			(int)fw_event.ev, minor_no,
+			(unsigned long)ktime_to_us(ktime_get()));
+	return copy_to_user(ubuf, &fw_event.ev, sizeof(fw_event.ev));
+}
+
+static unsigned int qbt_poll(struct file *filp,
+	struct poll_table_struct *wait)
+{
+	struct qbt_drvdata *drvdata;
+	unsigned int mask = 0;
+	int minor_no = -1;
+
+	if (!filp || !filp->private_data) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+	drvdata = filp->private_data;
+
+	minor_no = iminor(filp->f_path.dentry->d_inode);
+	if (minor_no == MINOR_NUM_FD) {
+		poll_wait(filp, &drvdata->read_wait_queue_fd, wait);
+		if (kfifo_len(&drvdata->fd_events) > 0)
+			mask |= (POLLIN | POLLRDNORM);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		poll_wait(filp, &drvdata->read_wait_queue_ipc, wait);
+		if (kfifo_len(&drvdata->ipc_events) > 0)
+			mask |= (POLLIN | POLLRDNORM);
+	} else {
+		pr_err("Invalid minor number\n");
+		return -EINVAL;
+	}
+
+	return mask;
+}
+
+static const struct file_operations qbt_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qbt_ioctl,
+	.open = qbt_open,
+	.release = qbt_release,
+	.read = qbt_read,
+	.poll = qbt_poll
+};
+
+static int qbt_dev_register(struct qbt_drvdata *drvdata)
+{
+	dev_t dev_no, major_no;
+	int ret = 0;
+	size_t node_size;
+	char *node_name = QBT_DEV;
+	struct device *dev = drvdata->dev;
+	struct device *device;
+
+	node_size = strlen(node_name) + 1;
+
+	drvdata->qbt_node = devm_kzalloc(dev, node_size, GFP_KERNEL);
+	if (!drvdata->qbt_node) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	strlcpy(drvdata->qbt_node, node_name, node_size);
+
+	ret = alloc_chrdev_region(&dev_no, 0, 2, drvdata->qbt_node);
+	if (ret) {
+		pr_err("alloc_chrdev_region failed %d\n", ret);
+		goto err_alloc;
+	}
+	major_no = MAJOR(dev_no);
+
+	cdev_init(&drvdata->qbt_fd_cdev, &qbt_fops);
+
+	drvdata->qbt_fd_cdev.owner = THIS_MODULE;
+	ret = cdev_add(&drvdata->qbt_fd_cdev,
+			MKDEV(major_no, MINOR_NUM_FD), 1);
+	if (ret) {
+		pr_err("cdev_add failed for fd %d\n", ret);
+		goto err_cdev_add;
+	}
+	cdev_init(&drvdata->qbt_ipc_cdev, &qbt_fops);
+
+	drvdata->qbt_ipc_cdev.owner = THIS_MODULE;
+	ret = cdev_add(&drvdata->qbt_ipc_cdev,
+			MKDEV(major_no, MINOR_NUM_IPC), 1);
+	if (ret) {
+		pr_err("cdev_add failed for ipc %d\n", ret);
+		goto err_cdev_add;
+	}
+
+	drvdata->qbt_class = class_create(THIS_MODULE,
+					   drvdata->qbt_node);
+	if (IS_ERR(drvdata->qbt_class)) {
+		ret = PTR_ERR(drvdata->qbt_class);
+		pr_err("class_create failed %d\n", ret);
+		goto err_class_create;
+	}
+
+	device = device_create(drvdata->qbt_class, NULL,
+			       drvdata->qbt_fd_cdev.dev, drvdata,
+			       "%s_fd", drvdata->qbt_node);
+	if (IS_ERR(device)) {
+		ret = PTR_ERR(device);
+		pr_err("fd device_create failed %d\n", ret);
+		goto err_dev_create;
+	}
+
+	device = device_create(drvdata->qbt_class, NULL,
+				drvdata->qbt_ipc_cdev.dev, drvdata,
+				"%s_ipc", drvdata->qbt_node);
+	if (IS_ERR(device)) {
+		ret = PTR_ERR(device);
+		pr_err("ipc device_create failed %d\n", ret);
+		goto err_dev_create;
+	}
+
+	return 0;
+err_dev_create:
+	class_destroy(drvdata->qbt_class);
+err_class_create:
+	cdev_del(&drvdata->qbt_fd_cdev);
+	cdev_del(&drvdata->qbt_ipc_cdev);
+err_cdev_add:
+	unregister_chrdev_region(drvdata->qbt_fd_cdev.dev, 1);
+	unregister_chrdev_region(drvdata->qbt_ipc_cdev.dev, 1);
+err_alloc:
+	return ret;
+}
+
+/**
+ * qbt1000_create_input_device() - Function allocates an input
+ * device, configures it for key events and registers it
+ *
+ * @drvdata:	ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_create_input_device(struct qbt_drvdata *drvdata)
+{
+	int rc = 0;
+
+	drvdata->in_dev = input_allocate_device();
+	if (drvdata->in_dev == NULL) {
+		dev_err(drvdata->dev, "%s: input_allocate_device() failed\n",
+			__func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	drvdata->in_dev->name = QBT_INPUT_DEV_NAME;
+	drvdata->in_dev->phys = NULL;
+	drvdata->in_dev->id.bustype = BUS_HOST;
+	drvdata->in_dev->id.vendor  = 0x0001;
+	drvdata->in_dev->id.product = 0x0001;
+	drvdata->in_dev->id.version = QBT_INPUT_DEV_VERSION;
+
+	drvdata->in_dev->evbit[0] = BIT_MASK(EV_KEY) |  BIT_MASK(EV_ABS);
+	drvdata->in_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+	drvdata->in_dev->keybit[BIT_WORD(KEY_HOMEPAGE)] |=
+		BIT_MASK(KEY_HOMEPAGE);
+	drvdata->in_dev->keybit[BIT_WORD(KEY_VOLUMEDOWN)] |=
+		BIT_MASK(KEY_VOLUMEDOWN);
+	drvdata->in_dev->keybit[BIT_WORD(KEY_POWER)] |=
+		BIT_MASK(KEY_POWER);
+
+	input_set_abs_params(drvdata->in_dev, ABS_X,
+			     0,
+			     1000,
+			     0, 0);
+	input_set_abs_params(drvdata->in_dev, ABS_Y,
+			     0,
+			     1000,
+			     0, 0);
+
+	rc = input_register_device(drvdata->in_dev);
+	if (rc) {
+		dev_err(drvdata->dev, "%s: input_reg_dev() failed %d\n",
+			__func__, rc);
+		goto end;
+	}
+
+end:
+	if (rc)
+		input_free_device(drvdata->in_dev);
+	return rc;
+}
+
+static void qbt_fd_report_event(struct qbt_drvdata *drvdata, int state)
+{
+	struct fw_event_desc fw_event;
+
+	if (!drvdata->is_wuhb_connected) {
+		pr_err("Skipping as WUHB_INT is disconnected\n");
+		return;
+	}
+
+	if (drvdata->fd_gpio.event_reported
+			&& state == drvdata->fd_gpio.last_gpio_state)
+		return;
+
+	pr_debug("gpio %d: report state %d current_time %lu uS\n",
+		drvdata->fd_gpio.gpio, state,
+		(unsigned long)ktime_to_us(ktime_get()));
+
+	drvdata->fd_gpio.event_reported = 1;
+	drvdata->fd_gpio.last_gpio_state = state;
+
+	fw_event.ev = (state ? FW_EVENT_FINGER_DOWN : FW_EVENT_FINGER_UP);
+
+	mutex_lock(&drvdata->fd_events_mutex);
+
+	kfifo_reset(&drvdata->fd_events);
+
+	if (!kfifo_put(&drvdata->fd_events, fw_event)) {
+		pr_err("FD events fifo: error adding item\n");
+	} else {
+		pr_debug("FD event %d queued at time %lu uS\n", fw_event.ev,
+				(unsigned long)ktime_to_us(ktime_get()));
+	}
+	mutex_unlock(&drvdata->fd_events_mutex);
+	wake_up_interruptible(&drvdata->read_wait_queue_fd);
+}
+
+static void qbt_gpio_work_func(struct work_struct *work)
+{
+	int state;
+	struct qbt_drvdata *drvdata;
+
+	if (!work) {
+		pr_err("NULL pointer passed\n");
+		return;
+	}
+
+	drvdata = container_of(work, struct qbt_drvdata, fd_gpio.work);
+
+	state = (__gpio_get_value(drvdata->fd_gpio.gpio) ? 1 : 0)
+			^ drvdata->fd_gpio.active_low;
+
+	qbt_fd_report_event(drvdata, state);
+
+	pm_relax(drvdata->dev);
+}
+
+static irqreturn_t qbt_gpio_isr(int irq, void *dev_id)
+{
+	struct qbt_drvdata *drvdata = dev_id;
+
+	if (!drvdata) {
+		pr_err("NULL pointer passed\n");
+		return IRQ_HANDLED;
+	}
+
+	if (irq != drvdata->fd_gpio.irq) {
+		pr_warn("invalid irq %d (expected %d)\n",
+			irq, drvdata->fd_gpio.irq);
+		return IRQ_HANDLED;
+	}
+
+	pr_debug("FD event received at time %lu uS\n",
+			(unsigned long)ktime_to_us(ktime_get()));
+
+	pm_stay_awake(drvdata->dev);
+	schedule_work(&drvdata->fd_gpio.work);
+
+	return IRQ_HANDLED;
+}
+
+static void qbt_irq_report_event(struct work_struct *work)
+{
+	struct qbt_drvdata *drvdata;
+	struct fw_event_desc fw_ev_des;
+
+	if (!work) {
+		pr_err("NULL pointer passed\n");
+		return;
+	}
+	drvdata = container_of(work, struct qbt_drvdata, fw_ipc.work);
+
+	fw_ev_des.ev = FW_EVENT_IPC;
+	mutex_lock(&drvdata->ipc_events_mutex);
+	if (!kfifo_put(&drvdata->ipc_events, fw_ev_des)) {
+		pr_err("ipc events: fifo full, drop event %d\n",
+				(int) fw_ev_des.ev);
+	} else {
+		pr_debug("IPC event %d queued at time %lu uS\n", fw_ev_des.ev,
+				(unsigned long)ktime_to_us(ktime_get()));
+	}
+	mutex_unlock(&drvdata->ipc_events_mutex);
+	wake_up_interruptible(&drvdata->read_wait_queue_ipc);
+	pm_relax(drvdata->dev);
+}
+
+/**
+ * qbt_ipc_irq_handler() - function processes IPC
+ * interrupts on its own thread
+ * @irq:	the interrupt that occurred
+ * @dev_id: pointer to the qbt_drvdata
+ *
+ * Return: IRQ_HANDLED when complete
+ */
+static irqreturn_t qbt_ipc_irq_handler(int irq, void *dev_id)
+{
+	struct qbt_drvdata *drvdata = (struct qbt_drvdata *)dev_id;
+
+	if (!drvdata) {
+		pr_err("NULL pointer passed\n");
+		return IRQ_HANDLED;
+	}
+
+	if (irq != drvdata->fw_ipc.irq) {
+		pr_warn("invalid irq %d (expected %d)\n",
+			irq, drvdata->fw_ipc.irq);
+		return IRQ_HANDLED;
+	}
+
+	pr_debug("IPC event received at time %lu uS\n",
+			(unsigned long)ktime_to_us(ktime_get()));
+
+	pm_stay_awake(drvdata->dev);
+	schedule_work(&drvdata->fw_ipc.work);
+
+	return IRQ_HANDLED;
+}
+
+static int setup_fd_gpio_irq(struct platform_device *pdev,
+		struct qbt_drvdata *drvdata)
+{
+	int rc = 0;
+	int irq;
+	const char *desc = "qbt_finger_detect";
+
+	if (!drvdata->is_wuhb_connected) {
+		pr_err("Skipping as WUHB_INT is disconnected\n");
+		goto end;
+	}
+
+	rc = devm_gpio_request_one(&pdev->dev, drvdata->fd_gpio.gpio,
+		GPIOF_IN, desc);
+	if (rc < 0) {
+		pr_err("failed to request gpio %d, error %d\n",
+			drvdata->fd_gpio.gpio, rc);
+		goto end;
+	}
+
+
+	irq = gpio_to_irq(drvdata->fd_gpio.gpio);
+	if (irq < 0) {
+		rc = irq;
+		pr_err("unable to get irq number for gpio %d, error %d\n",
+			drvdata->fd_gpio.gpio, rc);
+		goto end;
+	}
+
+
+	drvdata->fd_gpio.irq = irq;
+	INIT_WORK(&drvdata->fd_gpio.work, qbt_gpio_work_func);
+
+	rc = devm_request_any_context_irq(&pdev->dev, drvdata->fd_gpio.irq,
+		qbt_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+		desc, drvdata);
+
+	if (rc < 0) {
+		pr_err("unable to claim irq %d; error %d\n",
+			drvdata->fd_gpio.irq, rc);
+		goto end;
+	}
+
+end:
+	pr_debug("rc %d\n", rc);
+	return rc;
+}
+
+static int setup_ipc_irq(struct platform_device *pdev,
+	struct qbt_drvdata *drvdata)
+{
+	int rc = 0;
+	const char *desc = "qbt_ipc";
+
+	drvdata->fw_ipc.irq = gpio_to_irq(drvdata->fw_ipc.gpio);
+	INIT_WORK(&drvdata->fw_ipc.work, qbt_irq_report_event);
+	pr_debug("irq %d gpio %d\n",
+			drvdata->fw_ipc.irq, drvdata->fw_ipc.gpio);
+
+	if (drvdata->fw_ipc.irq < 0) {
+		rc = drvdata->fw_ipc.irq;
+		pr_err("no irq for gpio %d, error=%d\n",
+		  drvdata->fw_ipc.gpio, rc);
+		goto end;
+	}
+
+	rc = devm_gpio_request_one(&pdev->dev, drvdata->fw_ipc.gpio,
+			GPIOF_IN, desc);
+
+	if (rc < 0) {
+		pr_err("failed to request gpio %d, error %d\n",
+			drvdata->fw_ipc.gpio, rc);
+		goto end;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev,
+		drvdata->fw_ipc.irq,
+		NULL,
+		qbt_ipc_irq_handler,
+		IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+		desc,
+		drvdata);
+
+	if (rc < 0) {
+		pr_err("failed to register for ipc irq %d, rc = %d\n",
+			drvdata->fw_ipc.irq, rc);
+		goto end;
+	}
+
+end:
+	return rc;
+}
+
+/**
+ * qbt_read_device_tree() - Function reads device tree
+ * properties into driver data
+ * @pdev:	ptr to platform device object
+ * @drvdata:	ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_read_device_tree(struct platform_device *pdev,
+	struct qbt_drvdata *drvdata)
+{
+	int rc = 0;
+	int gpio;
+	enum of_gpio_flags flags;
+
+	/* read IPC gpio */
+	drvdata->fw_ipc.gpio = of_get_named_gpio(pdev->dev.of_node,
+		"qcom,ipc-gpio", 0);
+	if (drvdata->fw_ipc.gpio < 0) {
+		rc = drvdata->fw_ipc.gpio;
+		pr_err("ipc gpio not found, error=%d\n", rc);
+		goto end;
+	}
+
+	gpio = of_get_named_gpio_flags(pdev->dev.of_node,
+				"qcom,finger-detect-gpio", 0, &flags);
+	if (gpio < 0) {
+		pr_err("failed to get gpio flags\n");
+		drvdata->is_wuhb_connected = 0;
+		goto end;
+	}
+
+	drvdata->is_wuhb_connected = 1;
+	drvdata->fd_gpio.gpio = gpio;
+	drvdata->fd_gpio.active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+end:
+	return rc;
+}
+
+/**
+ * qbt_probe() - Function loads hardware config from device tree
+ * @pdev:	ptr to platform device object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct qbt_drvdata *drvdata;
+	int rc = 0;
+
+	pr_debug("entry\n");
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	rc = qbt_read_device_tree(pdev, drvdata);
+	if (rc < 0)
+		goto end;
+
+	atomic_set(&drvdata->fd_available, 1);
+	atomic_set(&drvdata->ipc_available, 1);
+
+	mutex_init(&drvdata->mutex);
+	mutex_init(&drvdata->fd_events_mutex);
+	mutex_init(&drvdata->ipc_events_mutex);
+
+	rc = qbt_dev_register(drvdata);
+	if (rc < 0)
+		goto end;
+	rc = qbt_create_input_device(drvdata);
+	if (rc < 0)
+		goto end;
+	INIT_KFIFO(drvdata->fd_events);
+	INIT_KFIFO(drvdata->ipc_events);
+	init_waitqueue_head(&drvdata->read_wait_queue_fd);
+	init_waitqueue_head(&drvdata->read_wait_queue_ipc);
+
+	rc = setup_fd_gpio_irq(pdev, drvdata);
+	if (rc < 0)
+		goto end;
+	drvdata->fd_gpio.irq_enabled = false;
+	disable_irq(drvdata->fd_gpio.irq);
+
+	rc = setup_ipc_irq(pdev, drvdata);
+	if (rc < 0)
+		goto end;
+	drvdata->fw_ipc.irq_enabled = false;
+	disable_irq(drvdata->fw_ipc.irq);
+
+	rc = device_init_wakeup(&pdev->dev, 1);
+	if (rc < 0)
+		goto end;
+
+end:
+	pr_debug("exit : %d\n", rc);
+	return rc;
+}
+
+static int qbt_remove(struct platform_device *pdev)
+{
+	struct qbt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	mutex_destroy(&drvdata->mutex);
+	mutex_destroy(&drvdata->fd_events_mutex);
+	mutex_destroy(&drvdata->ipc_events_mutex);
+
+	device_destroy(drvdata->qbt_class, drvdata->qbt_fd_cdev.dev);
+	device_destroy(drvdata->qbt_class, drvdata->qbt_ipc_cdev.dev);
+
+	class_destroy(drvdata->qbt_class);
+	cdev_del(&drvdata->qbt_fd_cdev);
+	cdev_del(&drvdata->qbt_ipc_cdev);
+	unregister_chrdev_region(drvdata->qbt_fd_cdev.dev, 1);
+	unregister_chrdev_region(drvdata->qbt_ipc_cdev.dev, 1);
+
+	device_init_wakeup(&pdev->dev, 0);
+
+	return 0;
+}
+
+static int qbt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int rc = 0;
+	struct qbt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	/*
+	 * Returning an error code if driver currently making a TZ call.
+	 * Note: The purpose of this driver is to ensure that the clocks are on
+	 * while making a TZ call. Hence the clock check to determine if the
+	 * driver will allow suspend to occur.
+	 */
+	if (!mutex_trylock(&drvdata->mutex))
+		return -EBUSY;
+
+	else {
+		if (drvdata->is_wuhb_connected)
+			enable_irq_wake(drvdata->fd_gpio.irq);
+
+		enable_irq_wake(drvdata->fw_ipc.irq);
+	}
+
+	mutex_unlock(&drvdata->mutex);
+
+	return rc;
+}
+
+static int qbt_resume(struct platform_device *pdev)
+{
+	struct qbt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	if (drvdata->is_wuhb_connected)
+		disable_irq_wake(drvdata->fd_gpio.irq);
+
+	disable_irq_wake(drvdata->fw_ipc.irq);
+
+	return 0;
+}
+
+static const struct of_device_id qbt_match[] = {
+	{ .compatible = "qcom,qbt-handler" },
+	{}
+};
+
+static struct platform_driver qbt_plat_driver = {
+	.probe = qbt_probe,
+	.remove = qbt_remove,
+	.suspend = qbt_suspend,
+	.resume = qbt_resume,
+	.driver = {
+		.name = "qbt_handler",
+		.of_match_table = qbt_match,
+	},
+};
+
+module_platform_driver(qbt_plat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. QBT HANDLER");
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index ed2ee73..2d1e465 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #define KMSG_COMPONENT "QDSS diag bridge"
@@ -18,17 +18,24 @@
 #include <linux/mhi.h>
 #include <linux/usb/usb_qdss.h>
 #include <linux/of.h>
+#include <linux/delay.h>
 #include "qdss_bridge.h"
 
 #define MODULE_NAME "qdss_bridge"
-
-#define QDSS_BUF_SIZE		(16*1024)
-#define MHI_CLIENT_QDSS_IN	9
+#define INIT_STATUS -1
 
 /* Max number of objects needed */
 static int poolsize = 32;
 
 static struct class *mhi_class;
+static enum mhi_dev_state dev_state = INIT_STATUS;
+static enum mhi_ch curr_chan;
+
+static const char * const str_mhi_curr_chan[] = {
+		[QDSS]			= "QDSS",
+		[QDSS_HW]		= "IP_HW_QDSS",
+		[EMPTY]			= "EMPTY",
+};
 
 static const char * const str_mhi_transfer_mode[] = {
 		[MHI_TRANSFER_TYPE_USB]			= "usb",
@@ -136,6 +143,20 @@
 	return NULL;
 }
 
+static int qdss_check_entry(struct qdss_bridge_drvdata *drvdata)
+{
+	struct qdss_buf_tbl_lst *entry;
+	int ret = 0;
+
+	list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+		if (atomic_read(&entry->available) == 0) {
+			ret = 1;
+			return ret;
+		}
+	}
+
+	return ret;
+}
 
 static void qdss_del_buf_tbl_entry(struct qdss_bridge_drvdata *drvdata,
 				void *buf)
@@ -152,7 +173,6 @@
 			return;
 		}
 	}
-
 	spin_unlock_bh(&drvdata->lock);
 }
 
@@ -206,6 +226,14 @@
 			str_mhi_transfer_mode[drvdata->mode]);
 }
 
+static ssize_t curr_chan_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	if (curr_chan < QDSS || curr_chan > EMPTY)
+		return -EINVAL;
+	return scnprintf(buf, PAGE_SIZE, "%s\n", str_mhi_curr_chan[curr_chan]);
+}
+
 static ssize_t mode_store(struct device *dev,
 					struct device_attribute *attr,
 					const char *buf, size_t size)
@@ -275,6 +303,7 @@
 }
 
 static DEVICE_ATTR_RW(mode);
+static DEVICE_ATTR_RO(curr_chan);
 
 static void mhi_read_work_fn(struct work_struct *work)
 {
@@ -776,6 +805,13 @@
 	drvdata = mhi_dev->priv_data;
 	if (!drvdata)
 		return;
+
+	pr_debug("remove dev state: %d\n", mhi_dev->mhi_cntrl->dev_state);
+
+	dev_state = mhi_dev->mhi_cntrl->dev_state;
+	if (mhi_dev->mhi_cntrl->dev_state != MHI_STATE_RESET)
+		curr_chan = EMPTY;
+
 	spin_lock_bh(&drvdata->lock);
 	if (drvdata->opened == ENABLE) {
 		drvdata->opened = SSR;
@@ -787,9 +823,11 @@
 			spin_unlock_bh(&drvdata->lock);
 			if (drvdata->usb_ch && drvdata->usb_ch->priv_usb)
 				usb_qdss_close(drvdata->usb_ch);
+			do {
+				msleep(20);
+			} while (qdss_check_entry(drvdata));
 		}
 		mhi_ch_close(drvdata);
-
 	} else
 		spin_unlock_bh(&drvdata->lock);
 
@@ -823,11 +861,40 @@
 				const struct mhi_device_id *id)
 {
 	int ret;
+	bool def = false;
 	unsigned int baseminor = 0;
 	unsigned int count = 1;
 	struct qdss_bridge_drvdata *drvdata;
+	struct device_node *of_node = mhi_dev->dev.of_node;
 	dev_t dev;
 
+	pr_debug("probe dev state: %d chan: %s curr_chan: %d\n",
+		  mhi_dev->mhi_cntrl->dev_state,
+		  id->chan,
+		  curr_chan);
+
+	def = of_property_read_bool(of_node, "mhi,default-channel");
+	if (dev_state == INIT_STATUS) {
+		if (!def)
+			return -EINVAL;
+		if (!strcmp(id->chan, "QDSS"))
+			curr_chan = QDSS;
+		if (!strcmp(id->chan, "QDSS_HW"))
+			curr_chan = QDSS_HW;
+	} else if (dev_state == MHI_STATE_RESET) {
+		if (strcmp(id->chan, str_mhi_curr_chan[curr_chan]))
+			return -EINVAL;
+	} else {
+		if (curr_chan != EMPTY) {
+			pr_err("Need unbind another channel before bind.\n");
+			return -EINVAL;
+		}
+		if (!strcmp(id->chan, "QDSS"))
+			curr_chan = QDSS;
+		if (!strcmp(id->chan, "QDSS_HW"))
+			curr_chan = QDSS_HW;
+	}
+
 	drvdata = devm_kzalloc(&mhi_dev->dev, sizeof(*drvdata), GFP_KERNEL);
 	if (!drvdata) {
 		ret = -ENOMEM;
@@ -865,7 +932,12 @@
 
 	ret = device_create_file(drvdata->dev, &dev_attr_mode);
 	if (ret) {
-		pr_err("sysfs node create failed error:%d\n", ret);
+		pr_err("mode sysfs node create failed error:%d\n", ret);
+		goto exit_destroy_device;
+	}
+	ret = device_create_file(drvdata->dev, &dev_attr_curr_chan);
+	if (ret) {
+		pr_err("curr_chan sysfs node create failed error:%d\n", ret);
 		goto exit_destroy_device;
 	}
 
@@ -891,6 +963,7 @@
 
 static const struct mhi_device_id qdss_mhi_match_table[] = {
 	{ .chan = "QDSS", .driver_data = 0x4000 },
+	{ .chan = "IP_HW_QDSS", .driver_data = 0x4000 },
 	{},
 };
 
diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h
index 0967aea..81f096f 100644
--- a/drivers/soc/qcom/qdss_bridge.h
+++ b/drivers/soc/qcom/qdss_bridge.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _QDSS_BRIDGE_H
@@ -30,6 +30,12 @@
 	SSR,
 };
 
+enum mhi_ch {
+	QDSS,
+	QDSS_HW,
+	EMPTY,
+};
+
 struct qdss_bridge_drvdata {
 	int alias;
 	enum open_status opened;
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index 3aaab71..331d67f 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2019 The Linux Foundation. All rights reserved.
  * Copyright (C) 2017 Linaro Ltd.
  */
 #include <linux/slab.h>
@@ -534,8 +534,8 @@
 		decoded_bytes += rc;
 	}
 
-	if (string_len > temp_ei->elem_len) {
-		pr_err("%s: String len %d > Max Len %d\n",
+	if (string_len >= temp_ei->elem_len) {
+		pr_err("%s: String len %d >= Max Len %d\n",
 		       __func__, string_len, temp_ei->elem_len);
 		return -ETOOSMALL;
 	} else if (string_len > tlv_len) {
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 4400f51..a391dae 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -347,6 +347,13 @@
 
 	ret = wait_for_completion_timeout(&txn->completion, timeout);
 
+	mutex_lock(&txn->lock);
+	if (txn->result == -ENETRESET) {
+		mutex_unlock(&txn->lock);
+		return txn->result;
+	}
+	mutex_unlock(&txn->lock);
+
 	mutex_lock(&qmi->txn_lock);
 	mutex_lock(&txn->lock);
 	idr_remove(&qmi->txns, txn->id);
@@ -446,17 +453,18 @@
 	if (IS_ERR(sock))
 		return;
 
-	mutex_lock(&qmi->sock_lock);
-	sock_release(qmi->sock);
-	qmi->sock = NULL;
-	mutex_unlock(&qmi->sock_lock);
-
 	qmi_recv_del_server(qmi, -1, -1);
 
 	if (qmi->ops.net_reset)
 		qmi->ops.net_reset(qmi);
 
 	mutex_lock(&qmi->sock_lock);
+	/* Already qmi_handle_release() started */
+	if (!qmi->sock) {
+		sock_release(sock);
+		return;
+	}
+	sock_release(qmi->sock);
 	qmi->sock = sock;
 	qmi->sq = sq;
 	mutex_unlock(&qmi->sock_lock);
@@ -570,16 +578,21 @@
 
 static void qmi_data_ready(struct sock *sk)
 {
-	struct qmi_handle *qmi = sk->sk_user_data;
+	struct qmi_handle *qmi = NULL;
 
 	/*
 	 * This will be NULL if we receive data while being in
 	 * qmi_handle_release()
 	 */
-	if (!qmi)
+	read_lock_bh(&sk->sk_callback_lock);
+	qmi = sk->sk_user_data;
+	if (!qmi) {
+		read_unlock_bh(&sk->sk_callback_lock);
 		return;
+	}
 
 	queue_work(qmi->wq, &qmi->work);
+	read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static struct socket *qmi_sock_create(struct qmi_handle *qmi,
@@ -602,6 +615,7 @@
 	sock->sk->sk_user_data = qmi;
 	sock->sk->sk_data_ready = qmi_data_ready;
 	sock->sk->sk_error_report = qmi_data_ready;
+	sock->sk->sk_sndtimeo = HZ * 10;
 
 	return sock;
 }
@@ -682,21 +696,35 @@
  */
 void qmi_handle_release(struct qmi_handle *qmi)
 {
-	struct socket *sock = qmi->sock;
+	struct socket *sock;
 	struct qmi_service *svc, *tmp;
-
-	sock->sk->sk_user_data = NULL;
-	cancel_work_sync(&qmi->work);
-
-	qmi_recv_del_server(qmi, -1, -1);
+	struct qmi_txn *txn;
+	int txn_id;
 
 	mutex_lock(&qmi->sock_lock);
+	sock = qmi->sock;
+	write_lock_bh(&sock->sk->sk_callback_lock);
+	sock->sk->sk_user_data = NULL;
+	write_unlock_bh(&sock->sk->sk_callback_lock);
 	sock_release(sock);
 	qmi->sock = NULL;
 	mutex_unlock(&qmi->sock_lock);
 
+	cancel_work_sync(&qmi->work);
+
+	qmi_recv_del_server(qmi, -1, -1);
+
 	destroy_workqueue(qmi->wq);
 
+	mutex_lock(&qmi->txn_lock);
+	idr_for_each_entry(&qmi->txns, txn, txn_id) {
+		mutex_lock(&txn->lock);
+		idr_remove(&qmi->txns, txn->id);
+		txn->result = -ENETRESET;
+		complete(&txn->completion);
+		mutex_unlock(&txn->lock);
+	}
+	mutex_unlock(&qmi->txn_lock);
 	idr_destroy(&qmi->txns);
 
 	kfree(qmi->recv_buf);
@@ -761,7 +789,7 @@
 	if (qmi->sock) {
 		ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len);
 		if (ret < 0)
-			pr_err("failed to send QMI message\n");
+			pr_info("failed to send QMI message %d\n", ret);
 	} else {
 		ret = -EPIPE;
 	}
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 9e00f79..047de99 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <soc/qcom/qmi_rmnet.h>
@@ -25,13 +25,16 @@
 #define FLAG_POWERSAVE_MASK 0x0010
 #define DFC_MODE_MULTIQ 2
 
-unsigned int rmnet_wq_frequency __read_mostly = 4;
+unsigned int rmnet_wq_frequency __read_mostly = 1000;
 
 #define PS_WORK_ACTIVE_BIT 0
-#define PS_INTERVAL (((!rmnet_wq_frequency) ? 1 : rmnet_wq_frequency) * HZ)
+#define PS_INTERVAL (((!rmnet_wq_frequency) ?                             \
+					1 : rmnet_wq_frequency/10) * (HZ/100))
 #define NO_DELAY (0x0000 * HZ)
 
+#ifdef CONFIG_QCOM_QMI_DFC
 static unsigned int qmi_rmnet_scale_factor = 5;
+#endif
 
 struct qmi_elem_info data_ep_id_type_v01_ei[] = {
 	{
@@ -74,8 +77,8 @@
 		return NULL;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
-		if (qmi->fc_info[i].dfc_client)
-			return qmi->fc_info[i].dfc_client;
+		if (qmi->dfc_clients[i])
+			return qmi->dfc_clients[i];
 	}
 
 	return NULL;
@@ -90,6 +93,22 @@
 	return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0;
 }
 
+static int
+qmi_rmnet_has_pending(struct qmi_info *qmi)
+{
+	int i;
+
+	if (qmi->wda_pending)
+		return 1;
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (qmi->dfc_pending[i])
+			return 1;
+	}
+
+	return 0;
+}
+
 #ifdef CONFIG_QCOM_QMI_DFC
 static void
 qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
@@ -295,6 +314,17 @@
 
 	return 0;
 }
+
+static void qmi_rmnet_query_flows(struct qmi_info *qmi)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (qmi->dfc_clients[i])
+			dfc_qmi_query_flow(qmi->dfc_clients[i]);
+	}
+}
+
 #else
 static inline void
 qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
@@ -325,12 +355,17 @@
 {
 	return -EINVAL;
 }
+
+static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
+{
+}
 #endif
 
 static int
 qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 {
 	int idx, rc, err = 0;
+	struct svc_info svc;
 
 	ASSERT_RTNL();
 
@@ -341,7 +376,7 @@
 	idx = (tcm->tcm_handle == 0) ? 0 : 1;
 
 	if (!qmi) {
-		qmi = kzalloc(sizeof(struct qmi_info), GFP_KERNEL);
+		qmi = kzalloc(sizeof(struct qmi_info), GFP_ATOMIC);
 		if (!qmi)
 			return -ENOMEM;
 
@@ -349,20 +384,20 @@
 	}
 
 	qmi->flag = tcm->tcm_ifindex;
-	qmi->fc_info[idx].svc.instance = tcm->tcm_handle;
-	qmi->fc_info[idx].svc.ep_type = tcm->tcm_info;
-	qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent;
+	svc.instance = tcm->tcm_handle;
+	svc.ep_type = tcm->tcm_info;
+	svc.iface_id = tcm->tcm_parent;
 
 	if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) &&
-	    (qmi->fc_info[idx].dfc_client == NULL)) {
-		rc = dfc_qmi_client_init(port, idx, qmi);
+	    !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
+		rc = dfc_qmi_client_init(port, idx, &svc, qmi);
 		if (rc < 0)
 			err = rc;
 	}
 
 	if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
-	    (idx == 0) && (qmi->wda_client == NULL)) {
-		rc = wda_qmi_client_init(port, tcm->tcm_handle);
+	    (idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
+		rc = wda_qmi_client_init(port, &svc, qmi);
 		if (rc < 0)
 			err = rc;
 	}
@@ -373,15 +408,22 @@
 static int
 __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
 {
+	void *data = NULL;
 
 	ASSERT_RTNL();
 
-	if (qmi->fc_info[idx].dfc_client) {
-		dfc_qmi_client_exit(qmi->fc_info[idx].dfc_client);
-		qmi->fc_info[idx].dfc_client = NULL;
+	if (qmi->dfc_clients[idx])
+		data = qmi->dfc_clients[idx];
+	else if (qmi->dfc_pending[idx])
+		data = qmi->dfc_pending[idx];
+
+	if (data) {
+		dfc_qmi_client_exit(data);
+		qmi->dfc_clients[idx] = NULL;
+		qmi->dfc_pending[idx] = NULL;
 	}
 
-	if (!qmi_rmnet_has_client(qmi)) {
+	if (!qmi_rmnet_has_client(qmi) && !qmi_rmnet_has_pending(qmi)) {
 		rmnet_reset_qmi_pt(port);
 		kfree(qmi);
 		return 0;
@@ -394,15 +436,21 @@
 qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 {
 	int idx;
+	void *data = NULL;
 
 	/* client delete: tcm->tcm_handle - instance*/
 	idx = (tcm->tcm_handle == 0) ? 0 : 1;
 
 	ASSERT_RTNL();
+	if (qmi->wda_client)
+		data = qmi->wda_client;
+	else if (qmi->wda_pending)
+		data = qmi->wda_pending;
 
-	if ((idx == 0) && qmi->wda_client) {
-		wda_qmi_client_exit(qmi->wda_client);
+	if ((idx == 0) && data) {
+		wda_qmi_client_exit(data);
 		qmi->wda_client = NULL;
+		qmi->wda_pending = NULL;
 	}
 
 	__qmi_rmnet_delete_client(port, qmi, idx);
@@ -433,12 +481,15 @@
 			return;
 
 		if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
-			if (!qmi_rmnet_has_client(qmi)) {
-				kfree(qmi);
+			/* retrieve qmi again as it could have been changed */
+			qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+			if (qmi &&
+			    !qmi_rmnet_has_client(qmi) &&
+			    !qmi_rmnet_has_pending(qmi)) {
 				rmnet_reset_qmi_pt(port);
+				kfree(qmi);
 			}
-		}
-		if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
+		} else if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
 			qmi_rmnet_work_init(port);
 			rmnet_set_powersave_format(port);
 		}
@@ -446,7 +497,7 @@
 	case NLMSG_CLIENT_DELETE:
 		if (!qmi)
 			return;
-		if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
+		if (tcm->tcm_handle == 0) { /* instance 0 */
 			rmnet_clear_powersave_format(port);
 			qmi_rmnet_work_exit(port);
 		}
@@ -471,6 +522,7 @@
 {
 	struct qmi_info *qmi = (struct qmi_info *)qmi_pt;
 	int i;
+	void *data = NULL;
 
 	if (!qmi)
 		return;
@@ -479,9 +531,15 @@
 
 	qmi_rmnet_work_exit(port);
 
-	if (qmi->wda_client) {
-		wda_qmi_client_exit(qmi->wda_client);
+	if (qmi->wda_client)
+		data = qmi->wda_client;
+	else if (qmi->wda_pending)
+		data = qmi->wda_pending;
+
+	if (data) {
+		wda_qmi_client_exit(data);
 		qmi->wda_client = NULL;
+		qmi->wda_pending = NULL;
 	}
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -495,7 +553,7 @@
 {
 	struct qos_info *qos;
 	struct rmnet_bearer_map *bearer;
-	int do_wake = 0;
+	bool do_wake = false;
 
 	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
 	if (!qos)
@@ -504,14 +562,14 @@
 	spin_lock_bh(&qos->qos_lock);
 
 	list_for_each_entry(bearer, &qos->bearer_head, list) {
-		bearer->grant_before_ps = bearer->grant_size;
-		bearer->seq_before_ps = bearer->seq;
+		if (!bearer->grant_size)
+			do_wake = true;
 		bearer->grant_size = DEFAULT_GRANT;
-		bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
+		bearer->grant_thresh = DEFAULT_GRANT;
 		bearer->seq = 0;
 		bearer->ack_req = 0;
-		bearer->ancillary = 0;
-		do_wake = 1;
+		bearer->tcp_bidir = false;
+		bearer->rat_switch = false;
 	}
 
 	if (do_wake) {
@@ -523,6 +581,31 @@
 }
 EXPORT_SYMBOL(qmi_rmnet_enable_all_flows);
 
+bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
+{
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+	bool ret = true;
+
+	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+	if (!qos)
+		return true;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		if (!bearer->grant_size) {
+			ret = false;
+			break;
+		}
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
+
 #ifdef CONFIG_QCOM_QMI_DFC
 void qmi_rmnet_burst_fc_check(struct net_device *dev,
 			      int ip_type, u32 mark, unsigned int len)
@@ -633,7 +716,7 @@
 #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
 static struct workqueue_struct  *rmnet_ps_wq;
 static struct rmnet_powersave_work *rmnet_work;
-static struct list_head ps_list;
+static LIST_HEAD(ps_list);
 
 struct rmnet_powersave_work {
 	struct delayed_work work;
@@ -711,18 +794,19 @@
 
 	if (enable)
 		dfc_qmi_wq_flush(qmi);
+	else
+		qmi_rmnet_query_flows(qmi);
 
 	return 0;
 }
 EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
 
-void qmi_rmnet_work_restart(void *port)
+static void qmi_rmnet_work_restart(void *port)
 {
 	if (!rmnet_ps_wq || !rmnet_work)
 		return;
 	queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
 }
-EXPORT_SYMBOL(qmi_rmnet_work_restart);
 
 static void qmi_rmnet_check_stats(struct work_struct *work)
 {
@@ -730,6 +814,7 @@
 	struct qmi_info *qmi;
 	u64 rxd, txd;
 	u64 rx, tx;
+	bool dl_msg_active;
 
 	real_work = container_of(to_delayed_work(work),
 				 struct rmnet_powersave_work, work);
@@ -742,17 +827,15 @@
 		return;
 
 	if (qmi->ps_enabled) {
-		/* Retry after small delay if qmi error
-		 * This resumes UL grants by disabling
-		 * powersave mode if successful.
-		 */
+		/* Register to get QMI DFC and DL marker */
 		if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) {
+			/* If this failed need to retry quickly */
 			queue_delayed_work(rmnet_ps_wq,
 					   &real_work->work, HZ / 50);
 			return;
 
 		}
-		qmi->ps_enabled = 0;
+		qmi->ps_enabled = false;
 
 		if (rmnet_get_powersave_notif(real_work->port))
 			qmi_rmnet_ps_off_notify(real_work->port);
@@ -767,18 +850,29 @@
 	real_work->old_rx_pkts = rx;
 	real_work->old_tx_pkts = tx;
 
+	dl_msg_active = qmi->dl_msg_active;
+	qmi->dl_msg_active = false;
+
 	if (!rxd && !txd) {
+		/* If no DL msg received and there is a flow disabled,
+		 * (likely in RLF), no need to enter powersave
+		 */
+		if (!dl_msg_active &&
+		    !rmnet_all_flows_enabled(real_work->port))
+			goto end;
+
+		/* Deregister to suppress QMI DFC and DL marker */
 		if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) {
 			queue_delayed_work(rmnet_ps_wq,
 					   &real_work->work, PS_INTERVAL);
 			return;
 		}
-		qmi->ps_enabled = 1;
-		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+		qmi->ps_enabled = true;
 
-		/* Enable flow after clear the bit so a new
-		 * work can be triggered.
+		/* Clear the bit before enabling flow so pending packets
+		 * can trigger the work again
 		 */
+		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
 		rmnet_enable_all_flows(real_work->port);
 
 		if (rmnet_get_powersave_notif(real_work->port))
@@ -818,7 +912,6 @@
 		rmnet_ps_wq = NULL;
 		return;
 	}
-	INIT_LIST_HEAD(&ps_list);
 	INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
 	rmnet_work->port = port;
 	rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
@@ -854,4 +947,16 @@
 	rmnet_work = NULL;
 }
 EXPORT_SYMBOL(qmi_rmnet_work_exit);
+
+void qmi_rmnet_set_dl_msg_active(void *port)
+{
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	if (unlikely(!qmi))
+		return;
+
+	qmi->dl_msg_active = true;
+}
+EXPORT_SYMBOL(qmi_rmnet_set_dl_msg_active);
 #endif
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index 0efff48..7fe4862 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _RMNET_QMI_I_H
@@ -14,7 +14,8 @@
 
 #define MAX_CLIENT_NUM 2
 #define MAX_FLOW_NUM 32
-#define DEFAULT_GRANT 10240
+#define DEFAULT_GRANT 1
+#define DFC_MAX_BEARERS_V01 16
 
 struct rmnet_flow_map {
 	struct list_head list;
@@ -32,9 +33,10 @@
 	u32 grant_thresh;
 	u16 seq;
 	u8  ack_req;
-	u32 grant_before_ps;
-	u16 seq_before_ps;
-	u32 ancillary;
+	u32 last_grant;
+	u16 last_seq;
+	bool tcp_bidir;
+	bool rat_switch;
 };
 
 struct svc_info {
@@ -43,11 +45,6 @@
 	u32 iface_id;
 };
 
-struct fc_info {
-	struct svc_info svc;
-	void *dfc_client;
-};
-
 struct qos_info {
 	u8 mux_id;
 	struct net_device *real_dev;
@@ -66,9 +63,12 @@
 struct qmi_info {
 	int flag;
 	void *wda_client;
-	struct fc_info fc_info[MAX_CLIENT_NUM];
+	void *wda_pending;
+	void *dfc_clients[MAX_CLIENT_NUM];
+	void *dfc_pending[MAX_CLIENT_NUM];
 	unsigned long ps_work_active;
-	int ps_enabled;
+	bool ps_enabled;
+	bool dl_msg_active;
 };
 
 enum data_ep_type_enum_v01 {
@@ -101,7 +101,8 @@
 
 unsigned int qmi_rmnet_grant_per(unsigned int grant);
 
-int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi);
+int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+			struct qmi_info *qmi);
 
 void dfc_qmi_client_exit(void *dfc_data);
 
@@ -112,6 +113,7 @@
 
 void dfc_qmi_wq_flush(struct qmi_info *qmi);
 
+void dfc_qmi_query_flow(void *dfc_data);
 #else
 static inline struct rmnet_flow_map *
 qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -121,13 +123,14 @@
 }
 
 static inline struct rmnet_bearer_map *
-qmi_rmnet_get_bearer_map(struct qos_info *qos_info, uint8_t bearer_id)
+qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id)
 {
 	return NULL;
 }
 
 static inline int
-dfc_qmi_client_init(void *port, int modem, struct qmi_info *qmi)
+dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+		    struct qmi_info *qmi)
 {
 	return -EINVAL;
 }
@@ -146,14 +149,21 @@
 dfc_qmi_wq_flush(struct qmi_info *qmi)
 {
 }
+
+static inline void
+dfc_qmi_query_flow(void *dfc_data)
+{
+}
 #endif
 
 #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
-int wda_qmi_client_init(void *port, uint32_t instance);
+int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi);
 void wda_qmi_client_exit(void *wda_data);
-int wda_set_powersave_mode(void *wda_data, uint8_t enable);
+int wda_set_powersave_mode(void *wda_data, u8 enable);
 #else
-static inline int wda_qmi_client_init(void *port, uint32_t instance)
+static inline int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
 {
 	return -EINVAL;
 }
@@ -162,7 +172,7 @@
 {
 }
 
-static inline int wda_set_powersave_mode(void *wda_data, uint8_t enable)
+static inline int wda_set_powersave_mode(void *wda_data, u8 enable)
 {
 	return -EINVAL;
 }
diff --git a/drivers/soc/qcom/qtee_shmbridge.c b/drivers/soc/qcom/qtee_shmbridge.c
index 13339b4..f021b7c 100644
--- a/drivers/soc/qcom/qtee_shmbridge.c
+++ b/drivers/soc/qcom/qtee_shmbridge.c
@@ -13,7 +13,6 @@
 #include <soc/qcom/scm.h>
 #include <soc/qcom/qseecomi.h>
 #include <soc/qcom/qtee_shmbridge.h>
-#include <soc/qcom/secure_buffer.h>
 
 #define DEFAULT_BRIDGE_SIZE	SZ_4M	/*4M*/
 /*
@@ -52,19 +51,26 @@
 	TZ_SYSCALL_CREATE_PARAM_ID_1( \
 	TZ_SYSCALL_PARAM_TYPE_VAL)
 
+#define MAXSHMVMS 4
 #define PERM_BITS 3
 #define VM_BITS 16
 #define SELF_OWNER_BIT 1
 #define SHM_NUM_VM_SHIFT 9
+#define SHM_VM_MASK 0xFFFF
+#define SHM_PERM_MASK 0x7
 
 #define VM_PERM_R PERM_READ
 #define VM_PERM_W PERM_WRITE
 
-/* ns_vmids = ns_vmid as destination number is only 1 */
-#define UPDATE_NS_VMIDS(ns_vmid)	((uint64_t)(ns_vmid))
+/* ns_vmids */
+#define UPDATE_NS_VMIDS(ns_vmids, id)	\
+				(((uint64_t)(ns_vmids) << VM_BITS) \
+				| ((uint64_t)(id) & SHM_VM_MASK))
 
-/* ns_perms = ns_vm_perm as destination number is only 1 */
-#define UPDATE_NS_PERMS(ns_vm_perm)	((uint64_t)(ns_vm_perm))
+/* ns_perms */
+#define UPDATE_NS_PERMS(ns_perms, perm)	\
+				(((uint64_t)(ns_perms) << PERM_BITS) \
+				| ((uint64_t)(perm) & SHM_PERM_MASK))
 
 /* pfn_and_ns_perm_flags = paddr | ns_perms */
 #define UPDATE_PFN_AND_NS_PERM_FLAGS(paddr, ns_perms)	\
@@ -106,7 +112,7 @@
 	desc.arginfo = TZ_SHM_BRIDGE_ENABLE_PARAM_ID;
 	ret = scm_call2(TZ_SHM_BRIDGE_ENABLE, &desc);
 	if (ret) {
-		pr_err("Failed to enable shmbridge, rsp = %d, ret = %d\n",
+		pr_err("Failed to enable shmbridge, rsp = %lld, ret = %d\n",
 			desc.ret[0], ret);
 		return -EINVAL;
 	}
@@ -126,40 +132,44 @@
 int32_t qtee_shmbridge_register(
 		phys_addr_t paddr,
 		size_t size,
-		uint32_t ns_vmid,
-		uint32_t ns_vm_perm,
+		uint32_t *ns_vmid_list,
+		uint32_t *ns_vm_perm_list,
+		uint32_t ns_vmid_num,
 		uint32_t tz_perm,
 		uint64_t *handle)
 
 {
 	int32_t ret = 0;
 	uint64_t ns_perms = 0;
-	uint64_t destnum = 1;
+	uint64_t ns_vmids = 0;
 	struct scm_desc desc = {0};
+	int i = 0;
 
-	if (!handle) {
-		pr_err("shmb handle pointer is NULL\n");
+	if (!handle || !ns_vmid_list || !ns_vm_perm_list ||
+				ns_vmid_num > MAXSHMVMS) {
+		pr_err("invalid input parameters\n");
 		return -EINVAL;
 	}
-	pr_debug("%s: paddr %lx, size %zu, ns_vmid %x, ns_vm_perm %x, ns_perms %s, tz_perm %x\n",
-			__func__, (uint64_t)paddr, size, ns_vmid,
-			ns_vm_perm, ns_perms, tz_perm);
 
-	ns_perms = UPDATE_NS_PERMS(ns_vm_perm);
+	for (i = 0; i < ns_vmid_num; i++) {
+		ns_perms = UPDATE_NS_PERMS(ns_perms, ns_vm_perm_list[i]);
+		ns_vmids = UPDATE_NS_VMIDS(ns_vmids, ns_vmid_list[i]);
+	}
+
 	desc.arginfo = TZ_SHM_BRIDGE_CREATE_PARAM_ID;
 	desc.args[0] = UPDATE_PFN_AND_NS_PERM_FLAGS(paddr, ns_perms);
 	desc.args[1] = UPDATE_IPFN_AND_S_PERM_FLAGS(paddr, tz_perm);
-	desc.args[2] = UPDATE_SIZE_AND_FLAGS(size, destnum);
-	desc.args[3] = UPDATE_NS_VMIDS(ns_vmid);
+	desc.args[2] = UPDATE_SIZE_AND_FLAGS(size, ns_vmid_num);
+	desc.args[3] = ns_vmids;
 
-	pr_debug("%s: arginfo %lx, desc.args[0] %lx, args[1] %lx, args[2] %lx, args[3] %lx\n",
+	pr_debug("%s: arginfo %x, desc.args[0] %llx, args[1] %llx, args[2] %llx, args[3] %llx\n",
 			__func__, desc.arginfo, desc.args[0],
 			desc.args[1], desc.args[2], desc.args[3]);
 	ret = scm_call2(TZ_SHM_BRIDGE_CREATE, &desc);
 	if (ret || desc.ret[0]) {
-		pr_err("create shmbridge failed, ret = %d, status = %x\n",
+		pr_err("create shmbridge failed, ret = %d, status = %llx\n",
 				ret, desc.ret[0]);
-		return ret;
+		return -EINVAL;
 	}
 	*handle = desc.ret[1];
 	return 0;
@@ -191,7 +201,7 @@
 	unsigned long va;
 
 	if (size > DEFAULT_BRIDGE_SIZE) {
-		pr_err("requestd size %zu is larger than bridge size %zu\n",
+		pr_err("requestd size %zu is larger than bridge size %d\n",
 			size, DEFAULT_BRIDGE_SIZE);
 		ret = -EINVAL;
 		goto exit;
@@ -216,7 +226,7 @@
 	shm->paddr = gen_pool_virt_to_phys(default_bridge.genpool, va);
 	shm->size = size;
 
-	pr_debug("%s: shm->paddr %lx, size %zu\n",
+	pr_debug("%s: shm->paddr %llx, size %zu\n",
 			__func__, (uint64_t)shm->paddr, shm->size);
 
 exit:
@@ -242,6 +252,8 @@
 static int __init qtee_shmbridge_init(void)
 {
 	int ret = 0;
+	uint32_t ns_vm_ids[] = {VMID_HLOS};
+	uint32_t ns_vm_perms[] = {VM_PERM_R|VM_PERM_W};
 
 	if (default_bridge.vaddr) {
 		pr_warn("qtee shmbridge is already initialized\n");
@@ -264,8 +276,8 @@
 
 	/*register default bridge*/
 	ret = qtee_shmbridge_register(default_bridge.paddr,
-			default_bridge.size, VMID_HLOS,
-			VM_PERM_R|VM_PERM_W, VM_PERM_R|VM_PERM_W,
+			default_bridge.size, ns_vm_ids,
+			ns_vm_perms, 1, VM_PERM_R|VM_PERM_W,
 			&default_bridge.handle);
 	if (ret) {
 		pr_err("Failed to register default bridge, size %zu\n",
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index 5de7d32..a4798fe 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -17,6 +17,7 @@
 #include <linux/elf.h>
 #include <linux/wait.h>
 #include <linux/cdev.h>
+#include <linux/atomic.h>
 #include <soc/qcom/ramdump.h>
 #include <linux/dma-mapping.h>
 #include <linux/of.h>
@@ -34,14 +35,22 @@
 #define MAX_STRTBL_SIZE 512
 #define MAX_NAME_LENGTH 16
 
+struct consumer_entry {
+	bool data_ready;
+	struct ramdump_device *rd_dev;
+	struct list_head list;
+};
+
 struct ramdump_device {
 	char name[256];
 
-	unsigned int data_ready;
-	unsigned int consumer_present;
+	unsigned int consumers;
+	atomic_t readers_left;
 	int ramdump_status;
 
 	struct completion ramdump_complete;
+	struct mutex consumer_lock;
+	struct list_head consumer_list;
 	struct cdev cdev;
 	struct device *dev;
 
@@ -58,20 +67,51 @@
 {
 	struct ramdump_device *rd_dev = container_of(inode->i_cdev,
 					struct ramdump_device, cdev);
-	rd_dev->consumer_present = 1;
+	struct consumer_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+	if (!entry)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&entry->list);
+	entry->rd_dev = rd_dev;
+	mutex_lock(&rd_dev->consumer_lock);
+	rd_dev->consumers++;
 	rd_dev->ramdump_status = 0;
-	filep->private_data = rd_dev;
+	list_add_tail(&entry->list, &rd_dev->consumer_list);
+	mutex_unlock(&rd_dev->consumer_lock);
+	filep->private_data = entry;
 	return 0;
 }
 
+static void reset_ramdump_entry(struct consumer_entry *entry)
+{
+	struct ramdump_device *rd_dev = entry->rd_dev;
+
+	entry->data_ready = false;
+	if (atomic_dec_return(&rd_dev->readers_left) == 0)
+		complete(&rd_dev->ramdump_complete);
+}
+
 static int ramdump_release(struct inode *inode, struct file *filep)
 {
 
 	struct ramdump_device *rd_dev = container_of(inode->i_cdev,
 					struct ramdump_device, cdev);
-	rd_dev->consumer_present = 0;
-	rd_dev->data_ready = 0;
-	complete(&rd_dev->ramdump_complete);
+	struct consumer_entry *entry = filep->private_data;
+
+	mutex_lock(&rd_dev->consumer_lock);
+	/*
+	 * Avoid double decrementing in cases where we finish reading the dump
+	 * and then close the file, but there are other readers that have not
+	 * yet finished.
+	 */
+	if (entry->data_ready)
+		reset_ramdump_entry(entry);
+	rd_dev->consumers--;
+	list_del(&entry->list);
+	mutex_unlock(&rd_dev->consumer_lock);
+	entry->rd_dev = NULL;
+	kfree(entry);
 	return 0;
 }
 
@@ -112,7 +152,8 @@
 static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
 			loff_t *pos)
 {
-	struct ramdump_device *rd_dev = filep->private_data;
+	struct consumer_entry *entry = filep->private_data;
+	struct ramdump_device *rd_dev = entry->rd_dev;
 	void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
 	unsigned long data_left = 0, bytes_before, bytes_after;
 	unsigned long addr = 0;
@@ -121,10 +162,10 @@
 	int ret = 0;
 	loff_t orig_pos = *pos;
 
-	if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
+	if ((filep->f_flags & O_NONBLOCK) && !entry->data_ready)
 		return -EAGAIN;
 
-	ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
+	ret = wait_event_interruptible(rd_dev->dump_wait_q, entry->data_ready);
 	if (ret)
 		return ret;
 
@@ -224,19 +265,19 @@
 		dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
 
 	kfree(finalbuf);
-	rd_dev->data_ready = 0;
 	*pos = 0;
-	complete(&rd_dev->ramdump_complete);
+	reset_ramdump_entry(entry);
 	return ret;
 }
 
 static unsigned int ramdump_poll(struct file *filep,
 					struct poll_table_struct *wait)
 {
-	struct ramdump_device *rd_dev = filep->private_data;
+	struct consumer_entry *entry = filep->private_data;
+	struct ramdump_device *rd_dev = entry->rd_dev;
 	unsigned int mask = 0;
 
-	if (rd_dev->data_ready)
+	if (entry->data_ready)
 		mask |= (POLLIN | POLLRDNORM);
 
 	poll_wait(filep, &rd_dev->dump_wait_q, wait);
@@ -312,6 +353,7 @@
 			"for %s segments only will be dumped.", dev_name);
 	}
 
+	INIT_LIST_HEAD(&rd_dev->consumer_list);
 	init_waitqueue_head(&rd_dev->dump_wait_q);
 
 	rd_dev->dev = device_create(ramdump_class, parent,
@@ -324,6 +366,8 @@
 		goto fail_return_minor;
 	}
 
+	mutex_init(&rd_dev->consumer_lock);
+	atomic_set(&rd_dev->readers_left, 0);
 	cdev_init(&rd_dev->cdev, &ramdump_file_ops);
 
 	ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
@@ -336,6 +380,7 @@
 	return (void *)rd_dev;
 
 fail_cdev_add:
+	mutex_destroy(&rd_dev->consumer_lock);
 	device_unregister(rd_dev->dev);
 fail_return_minor:
 	ida_simple_remove(&rd_minor_id, minor);
@@ -365,12 +410,27 @@
 {
 	int ret, i;
 	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+	struct consumer_entry *entry;
 	Elf32_Phdr *phdr;
 	Elf32_Ehdr *ehdr;
 	unsigned long offset;
 
-	if (!rd_dev->consumer_present) {
+	/*
+	 * Acquire the consumer lock here, and hold the lock until we are done
+	 * preparing the data structures required for the ramdump session, and
+	 * have woken all readers. This essentially freezes the current readers
+	 * when the lock is taken here, such that the readers at that time are
+	 * the only ones that will participate in the ramdump session. After
+	 * the current list of readers has been awoken, new readers that add
+	 * themselves to the reader list will not participate in the current
+	 * ramdump session. This allows for the lock to be free while the
+	 * ramdump is occurring, which prevents stalling readers who want to
+	 * close the ramdump node or new readers that want to open it.
+	 */
+	mutex_lock(&rd_dev->consumer_lock);
+	if (!rd_dev->consumers) {
 		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
+		mutex_unlock(&rd_dev->consumer_lock);
 		return -EPIPE;
 	}
 
@@ -388,8 +448,10 @@
 				       sizeof(*phdr) * nsegments;
 		ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
 		rd_dev->elfcore_buf = (char *)ehdr;
-		if (!rd_dev->elfcore_buf)
+		if (!rd_dev->elfcore_buf) {
+			mutex_unlock(&rd_dev->consumer_lock);
 			return -ENOMEM;
+		}
 
 		memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
 		ehdr->e_ident[EI_CLASS] = ELFCLASS32;
@@ -415,13 +477,16 @@
 		}
 	}
 
-	rd_dev->data_ready = 1;
+	list_for_each_entry(entry, &rd_dev->consumer_list, list)
+		entry->data_ready = true;
 	rd_dev->ramdump_status = -1;
 
 	reinit_completion(&rd_dev->ramdump_complete);
+	atomic_set(&rd_dev->readers_left, rd_dev->consumers);
 
 	/* Tell userspace that the data is ready */
 	wake_up(&rd_dev->dump_wait_q);
+	mutex_unlock(&rd_dev->consumer_lock);
 
 	/* Wait (with a timeout) to let the ramdump complete */
 	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
@@ -434,7 +499,6 @@
 	} else
 		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
 
-	rd_dev->data_ready = 0;
 	rd_dev->elfcore_size = 0;
 	kfree(rd_dev->elfcore_buf);
 	rd_dev->elfcore_buf = NULL;
@@ -465,12 +529,27 @@
 {
 	int ret, i;
 	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+	struct consumer_entry *entry;
 	struct elfhdr *ehdr;
 	struct elf_shdr *shdr;
 	unsigned long offset, strtbl_off;
 
-	if (!rd_dev->consumer_present) {
+	/*
+	 * Acquire the consumer lock here, and hold the lock until we are done
+	 * preparing the data structures required for the ramdump session, and
+	 * have woken all readers. This essentially freezes the current readers
+	 * when the lock is taken here, such that the readers at that time are
+	 * the only ones that will participate in the ramdump session. After
+	 * the current list of readers has been awoken, new readers that add
+	 * themselves to the reader list will not participate in the current
+	 * ramdump session. This allows for the lock to be free while the
+	 * ramdump is occurring, which prevents stalling readers who want to
+	 * close the ramdump node or new readers that want to open it.
+	 */
+	mutex_lock(&rd_dev->consumer_lock);
+	if (!rd_dev->consumers) {
 		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
+		mutex_unlock(&rd_dev->consumer_lock);
 		return -EPIPE;
 	}
 
@@ -481,8 +560,10 @@
 			(sizeof(*shdr) * (nsegments + 2)) + MAX_STRTBL_SIZE;
 	ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
 	rd_dev->elfcore_buf = (char *)ehdr;
-	if (!rd_dev->elfcore_buf)
+	if (!rd_dev->elfcore_buf) {
+		mutex_unlock(&rd_dev->consumer_lock);
 		return -ENOMEM;
+	}
 
 	memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
 	ehdr->e_ident[EI_CLASS] = ELF_CLASS;
@@ -523,13 +604,16 @@
 	}
 	ehdr->e_shnum = nsegments + 2;
 
-	rd_dev->data_ready = 1;
+	list_for_each_entry(entry, &rd_dev->consumer_list, list)
+		entry->data_ready = true;
 	rd_dev->ramdump_status = -1;
 
 	reinit_completion(&rd_dev->ramdump_complete);
+	atomic_set(&rd_dev->readers_left, rd_dev->consumers);
 
 	/* Tell userspace that the data is ready */
 	wake_up(&rd_dev->dump_wait_q);
+	mutex_unlock(&rd_dev->consumer_lock);
 
 	/* Wait (with a timeout) to let the ramdump complete */
 	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
@@ -543,7 +627,6 @@
 		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
 	}
 
-	rd_dev->data_ready = 0;
 	rd_dev->elfcore_size = 0;
 	kfree(rd_dev->elfcore_buf);
 	rd_dev->elfcore_buf = NULL;
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
index dfc3e91..4b477cb 100644
--- a/drivers/soc/qcom/rpmh-internal.h
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 
@@ -95,6 +95,7 @@
  * @lock:       synchronize state of the controller
  * @client:     handle to the DRV's client.
  * @irq:        IRQ at gic
+ * @ipc_log_ctx IPC logger handle
  */
 struct rsc_drv {
 	const char *name;
@@ -108,6 +109,7 @@
 	spinlock_t lock;
 	struct rpmh_ctrlr client;
 	int irq;
+	void *ipc_log_ctx;
 };
 
 extern bool rpmh_standalone;
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index ab950d8..7537d18 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
@@ -9,6 +9,7 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/ipc_logging.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/of.h>
@@ -27,6 +28,8 @@
 #define CREATE_TRACE_POINTS
 #include "trace-rpmh.h"
 
+#define RSC_DRV_IPC_LOG_SIZE		2
+
 #define RSC_DRV_TCS_OFFSET		672
 #define RSC_DRV_CMD_OFFSET		20
 
@@ -66,6 +69,13 @@
 #define RSC_PDC_DRV_DATA		0x38
 #define RSC_PDC_DATA_OFFSET		0x08
 
+#define ACCL_TYPE(addr)			((addr >> 16) & 0xF)
+#define NR_ACCL_TYPES			3
+
+static const char * const accl_str[] = {
+	"", "", "", "CLK", "VREG", "BUS",
+};
+
 bool rpmh_standalone;
 static struct rsc_drv *__rsc_drv[2];
 static int __rsc_count;
@@ -228,7 +238,8 @@
 		enable = TCS_AMC_MODE_ENABLE;
 		write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
 		enable |= TCS_AMC_MODE_TRIGGER;
-		write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+		write_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, enable);
+		ipc_log_string(drv->ipc_log_ctx, "TCS trigger: m=%d", tcs_id);
 	}
 }
 
@@ -280,6 +291,8 @@
 		}
 
 		trace_rpmh_tx_done(drv, i, req, err);
+		ipc_log_string(drv->ipc_log_ctx,
+			       "IRQ response: m=%d err=%d", i, err);
 
 		/*
 		 * if wake tcs was re-purposed for sending active
@@ -334,6 +347,10 @@
 		write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
 		write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
 		trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
+		ipc_log_string(drv->ipc_log_ctx,
+			       "TCS write: m=%d n=%d msgid=%#x addr=%#x data=%#x wait=%d",
+			       tcs_id, j, msgid, cmd->addr,
+			       cmd->data, cmd->wait);
 	}
 
 	write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
@@ -603,15 +620,19 @@
 {
 	int i;
 	void __iomem *addr = drv->base + RSC_PDC_DRV_DATA;
+	struct tcs_cmd *cmd;
 
 	if (!msg || !msg->cmds || msg->num_cmds != RSC_PDC_DATA_SIZE)
 		return -EINVAL;
 
 	for (i = 0; i < msg->num_cmds; i++) {
+		cmd = &msg->cmds[i];
 		/* Only data is write capable */
-		writel_relaxed(msg->cmds[i].data, addr);
-		trace_rpmh_send_msg(drv, RSC_PDC_DRV_DATA, i, 0,
-				    &msg->cmds[i]);
+		writel_relaxed(cmd->data, addr);
+		trace_rpmh_send_msg(drv, RSC_PDC_DRV_DATA, i, 0, cmd);
+		ipc_log_string(drv->ipc_log_ctx,
+			       "PDC write: n=%d addr=%#x data=%x",
+			       i, cmd->addr, cmd->data);
 		addr += RSC_PDC_DATA_OFFSET;
 	}
 
@@ -630,11 +651,10 @@
 	return NULL;
 }
 
-static void print_tcs_info(struct rsc_drv *drv, int tcs_id)
+static void print_tcs_info(struct rsc_drv *drv, int tcs_id, unsigned long *accl)
 {
 	struct tcs_group *tcs_grp = get_tcs_from_index(drv, tcs_id);
 	const struct tcs_request *req = get_req_from_tcs(drv, tcs_id);
-	struct tcs_cmd *cmd;
 	unsigned long cmds_enabled;
 	u32 addr, data, msgid, sts, irq_sts;
 	bool in_use = test_bit(tcs_id, drv->tcs_in_use);
@@ -658,29 +678,17 @@
 		tcs_id, sts ? "IDLE" : "BUSY", data,
 		(irq_sts & BIT(tcs_id)) ? "COMPLETED" : "PENDING");
 
-	for (i = 0; i < req->num_cmds; i++) {
-		cmd = &req->cmds[i];
-		pr_warn("\tREQ=%d [addr=0x%x data=0x%x wait=0x%x]\n",
-			i, cmd->addr, cmd->data, cmd->wait);
-
-		if (i < MAX_CMDS_PER_TCS) {
-			addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, i);
-			data = read_tcs_reg(drv, RSC_DRV_CMD_DATA, tcs_id, i);
-			msgid = read_tcs_reg(drv, RSC_DRV_CMD_MSGID, tcs_id, i);
-			sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, tcs_id, i);
-			pr_warn("\tCMD=%d [addr=0x%x data=0x%x hdr=0x%x sts=0x%x enabled=%ld]\n",
-				i, addr, data, msgid, sts,
-				(cmds_enabled & BIT(i)));
-		}
-	}
-
-	for_each_set_bit_from(i, &cmds_enabled, MAX_CMDS_PER_TCS) {
+	for_each_set_bit(i, &cmds_enabled, MAX_CMDS_PER_TCS) {
 		addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, i);
 		data = read_tcs_reg(drv, RSC_DRV_CMD_DATA, tcs_id, i);
 		msgid = read_tcs_reg(drv, RSC_DRV_CMD_MSGID, tcs_id, i);
 		sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, tcs_id, i);
 		pr_warn("\tCMD=%d [addr=0x%x data=0x%x hdr=0x%x sts=0x%x enabled=1]\n",
 			i, addr, data, msgid, sts);
+		if (!(sts & CMD_STATUS_ISSUED))
+			continue;
+		if (!(sts & CMD_STATUS_COMPL))
+			*accl |= BIT(ACCL_TYPE(addr));
 	}
 }
 
@@ -690,6 +698,8 @@
 	bool irq_sts;
 	int i;
 	int busy = 0;
+	unsigned long accl = 0;
+	char str[20] = "";
 
 	pr_warn("RSC:%s\n", drv->name);
 
@@ -697,7 +707,7 @@
 		if (!test_bit(i, drv->tcs_in_use))
 			continue;
 		busy++;
-		print_tcs_info(drv, i);
+		print_tcs_info(drv, i, &accl);
 	}
 
 	if (!rsc_irq_data) {
@@ -709,6 +719,17 @@
 	pr_warn("HW IRQ %lu is %s at GIC\n", rsc_irq_data->hwirq,
 		irq_sts ? "PENDING" : "NOT PENDING");
 
+	for_each_set_bit(i, &accl, ARRAY_SIZE(accl_str)) {
+		strlcat(str, accl_str[i], sizeof(str));
+		strlcat(str, " ", sizeof(str));
+	}
+
+	if (busy && !irq_sts)
+		pr_warn("ERROR:Accelerator(s) { %s } at AOSS did not respond\n",
+			str);
+	else if (irq_sts)
+		pr_warn("ERROR:Possible lockup in Linux\n");
+
 	/*
 	 * The TCS(s) are busy waiting, we have no way to recover from this.
 	 * If this debug function is called, we assume it's because timeout
@@ -870,6 +891,9 @@
 	INIT_LIST_HEAD(&drv->client.cache);
 	INIT_LIST_HEAD(&drv->client.batch_cache);
 
+	drv->ipc_log_ctx = ipc_log_context_create(RSC_DRV_IPC_LOG_SIZE,
+						  drv->name, 0);
+
 	dev_set_drvdata(&pdev->dev, drv);
 	__rsc_drv[__rsc_count++] = drv;
 
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 3f1bcf2..4443e277 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -117,6 +117,7 @@
 	struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
 						    msg);
 	struct completion *compl = rpm_msg->completion;
+	bool free = rpm_msg->needs_free;
 
 	rpm_msg->err = r;
 
@@ -131,7 +132,7 @@
 	complete(compl);
 
 exit:
-	if (rpm_msg->needs_free)
+	if (free)
 		kfree(rpm_msg);
 }
 
@@ -400,11 +401,12 @@
 {
 	struct batch_cache_req *req;
 	struct rpmh_request *rpm_msgs;
-	DECLARE_COMPLETION_ONSTACK(compl);
+	struct completion *compls;
 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
 	unsigned long time_left;
 	int count = 0;
-	int ret, i, j;
+	int ret, i;
+	void *ptr;
 
 	if (!cmd || !n)
 		return -EINVAL;
@@ -421,10 +423,15 @@
 	if (!count)
 		return -EINVAL;
 
-	req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+	ptr = kzalloc(sizeof(*req) +
+		      count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
 		      GFP_ATOMIC);
-	if (!req)
+	if (!ptr)
 		return -ENOMEM;
+
+	req = ptr;
+	compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
 	req->count = count;
 	rpm_msgs = req->rpm_msgs;
 
@@ -439,25 +446,26 @@
 	}
 
 	for (i = 0; i < count; i++) {
-		rpm_msgs[i].completion = &compl;
+		struct completion *compl = &compls[i];
+
+		init_completion(compl);
+		rpm_msgs[i].completion = compl;
 		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
 		if (ret) {
 			pr_err("Error(%d) sending RPMH message addr=%#x\n",
 			       ret, rpm_msgs[i].msg.cmds[0].addr);
-			for (j = i; j < count; j++)
-				rpmh_tx_done(&rpm_msgs[j].msg, ret);
 			break;
 		}
 	}
 
 	time_left = RPMH_TIMEOUT_MS;
-	for (i = 0; i < count; i++) {
-		time_left = wait_for_completion_timeout(&compl, time_left);
+	while (i--) {
+		time_left = wait_for_completion_timeout(&compls[i], time_left);
 		if (!time_left) {
 			/*
 			 * Better hope they never finish because they'll signal
-			 * the completion on our stack and that's bad once
-			 * we've returned from the function.
+			 * the completion that we're going to free once
+			 * we've returned from this function.
 			 */
 			rpmh_rsc_debug(ctrlr_to_drv(ctrlr));
 			ret = -ETIMEDOUT;
@@ -466,7 +474,7 @@
 	}
 
 exit:
-	kfree(req);
+	kfree(ptr);
 
 	return ret;
 }
diff --git a/drivers/soc/qcom/rq_stats.c b/drivers/soc/qcom/rq_stats.c
new file mode 100644
index 0000000..4906d97
--- /dev/null
+++ b/drivers/soc/qcom/rq_stats.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010-2015, 2017, 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/rq_stats.h>
+
+#define MAX_LONG_SIZE 24
+#define DEFAULT_DEF_TIMER_JIFFIES 5
+
+static void def_work_fn(struct work_struct *work)
+{
+	/* Notify polling threads on change of value */
+	sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
+}
+
+static ssize_t show_def_timer_ms(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int64_t diff;
+	unsigned int udiff;
+
+	diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
+	do_div(diff, 1000 * 1000);
+	udiff = (unsigned int) diff;
+
+	return snprintf(buf, MAX_LONG_SIZE, "%u\n", udiff);
+}
+
+static ssize_t store_def_timer_ms(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int val = 0;
+
+	if (kstrtouint(buf, 0, &val))
+		return -EINVAL;
+
+	rq_info.def_timer_jiffies = msecs_to_jiffies(val);
+
+	rq_info.def_start_time = ktime_to_ns(ktime_get());
+	return count;
+}
+
+static struct kobj_attribute def_timer_ms_attr =
+	__ATTR(def_timer_ms, 0600, show_def_timer_ms,
+			store_def_timer_ms);
+
+static struct attribute *rq_attrs[] = {
+	&def_timer_ms_attr.attr,
+	NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+	.attrs = rq_attrs,
+};
+
+static int init_rq_attribs(void)
+{
+	int err;
+
+	rq_info.attr_group = &rq_attr_group;
+
+	/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
+	rq_info.kobj = kobject_create_and_add("rq-stats",
+			&get_cpu_device(0)->kobj);
+	if (!rq_info.kobj)
+		return -ENOMEM;
+
+	err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
+	if (err)
+		kobject_put(rq_info.kobj);
+	else
+		kobject_uevent(rq_info.kobj, KOBJ_ADD);
+
+	return err;
+}
+
+static int __init msm_rq_stats_init(void)
+{
+	int ret;
+
+#ifndef CONFIG_SMP
+	/* Bail out if this is not an SMP Target */
+	rq_info.init = 0;
+	return -EPERM;
+#endif
+
+	rq_wq = create_singlethread_workqueue("rq_stats");
+	WARN_ON(!rq_wq);
+	INIT_WORK(&rq_info.def_timer_work, def_work_fn);
+	spin_lock_init(&rq_lock);
+	rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
+	rq_info.def_timer_last_jiffy = 0;
+	ret = init_rq_attribs();
+
+	rq_info.init = 1;
+
+	return ret;
+}
+late_initcall(msm_rq_stats_init);
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index d9d4b82..599a8d9 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -238,10 +238,17 @@
 #define BATCH_MAX_SIZE SZ_2M
 #define BATCH_MAX_SECTIONS 32
 
-int hyp_assign_table(struct sg_table *table,
+/*
+ *  When -EAGAIN is returned it is safe for the caller to try to call
+ *  __hyp_assign_table again.
+ *
+ *  When -EADDRNOTAVAIL is returned the memory may no longer be in
+ *  a usable state and should no longer be accessed by the HLOS.
+ */
+static int __hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
-			int dest_nelems)
+			int dest_nelems, bool try_lock)
 {
 	int ret = 0;
 	struct scm_desc desc = {0};
@@ -271,10 +278,17 @@
 					  &dest_vm_copy_size);
 	if (!dest_vm_copy) {
 		ret = -ENOMEM;
-		goto out_free;
+		goto out_free_src;
 	}
 
-	mutex_lock(&secure_buffer_mutex);
+	if (try_lock) {
+		if (!mutex_trylock(&secure_buffer_mutex)) {
+			ret = -EAGAIN;
+			goto out_free_dest;
+		}
+	} else {
+		mutex_lock(&secure_buffer_mutex);
+	}
 
 	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
 	if (!sg_table_copy) {
@@ -330,6 +344,12 @@
 		if (ret) {
 			pr_info("%s: Failed to assign memory protection, ret = %d\n",
 				__func__, ret);
+
+			/*
+			 * Make it clear to clients that the memory may no
+			 * longer be in a usable state.
+			 */
+			ret = -EADDRNOTAVAIL;
 			break;
 		}
 		batch_start = batch_end;
@@ -337,12 +357,31 @@
 
 out_unlock:
 	mutex_unlock(&secure_buffer_mutex);
+out_free_dest:
 	kfree(dest_vm_copy);
-out_free:
+out_free_src:
 	kfree(source_vm_copy);
 	return ret;
 }
 
+int hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems)
+{
+	return __hyp_assign_table(table, source_vm_list, source_nelems,
+				  dest_vmids, dest_perms, dest_nelems, false);
+}
+
+int try_hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems)
+{
+	return __hyp_assign_table(table, source_vm_list, source_nelems,
+				  dest_vmids, dest_perms, dest_nelems, true);
+}
+
 int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
 			int source_nelems, int *dest_vmids,
 			int *dest_perms, int dest_nelems)
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index b8379f1..b8585d1 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -26,6 +26,7 @@
 #include <linux/soc/qcom/smem.h>
 #include <linux/soc/qcom/smem_state.h>
 #include <linux/spinlock.h>
+#include <linux/pm_wakeup.h>
 
 #include <linux/ipc_logging.h>
 
@@ -160,6 +161,7 @@
 	struct regmap *ipc_regmap;
 	int ipc_offset;
 	int ipc_bit;
+	struct wakeup_source ws;
 
 	struct mbox_client mbox_client;
 	struct mbox_chan *mbox_chan;
@@ -297,6 +299,14 @@
 	}
 }
 
+static irqreturn_t qcom_smp2p_isr(int irq, void *data)
+{
+	struct qcom_smp2p *smp2p = data;
+
+	__pm_stay_awake(&smp2p->ws);
+	return IRQ_WAKE_THREAD;
+}
+
 /**
  * qcom_smp2p_intr() - interrupt handler for incoming notifications
  * @irq:	unused
@@ -321,7 +331,7 @@
 		if (IS_ERR(in)) {
 			dev_err(smp2p->dev,
 				"Unable to acquire remote smp2p item\n");
-			return IRQ_HANDLED;
+			goto out;
 		}
 
 		smp2p->in = in;
@@ -340,6 +350,8 @@
 			qcom_smp2p_do_ssr_ack(smp2p);
 	}
 
+out:
+	__pm_relax(&smp2p->ws);
 	return IRQ_HANDLED;
 }
 
@@ -636,12 +648,13 @@
 			list_add(&entry->node, &smp2p->outbound);
 		}
 	}
+	wakeup_source_init(&smp2p->ws, "smp2p");
 
 	/* Kick the outgoing edge after allocating entries */
 	qcom_smp2p_kick(smp2p);
 
 	ret = devm_request_threaded_irq(&pdev->dev, smp2p->irq,
-					NULL, qcom_smp2p_intr,
+					qcom_smp2p_isr, qcom_smp2p_intr,
 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
 					"smp2p", (void *)smp2p);
 	if (ret) {
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 7a4edd7..595f0de 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -400,7 +400,7 @@
 			goto exit_err;
 		}
 	} else {
-		pr_debug("pending data size [%zu], requested size [%zu], ch->txn_id %d\n",
+		pr_debug("pending data size [%zu], requested size [%u], ch->txn_id %d\n",
 			 ch->actual_rx_size, size, ch->txn_id);
 	}
 	if (!ch->rpmsg_rx_buf) {
@@ -895,7 +895,7 @@
 	}
 
 	if (cmd->arg > (unsigned int)INT_MAX) {
-		pr_err("int overflow [%ld]\n", cmd->arg);
+		pr_err("int overflow [%u]\n", cmd->arg);
 		return -EINVAL;
 	}
 	fd = cmd->arg;
@@ -926,7 +926,7 @@
 		if (ch->dmabuf_handle_table[i] == NULL) {
 			ch->dmabuf_handle_table[i] = dma_buf;
 			ch->dmabuf_fd_table[i] = fd;
-			pr_debug("ch [%s] locked ion buf #%d fd [%d] dma_buf=0x%x\n",
+			pr_debug("ch [%s] locked ion buf #%d fd [%d] dma_buf=0x%pK\n",
 				ch->name, i,
 				ch->dmabuf_fd_table[i],
 				ch->dmabuf_handle_table[i]);
@@ -964,7 +964,7 @@
 		return -EINVAL;
 	}
 	if (cmd->arg > (unsigned int)INT_MAX) {
-		pr_err("int overflow [%ld]\n", cmd->arg);
+		pr_err("int overflow [%u]\n", cmd->arg);
 		return -EINVAL;
 	}
 	fd = cmd->arg;
@@ -1000,7 +1000,7 @@
 			if (!ch->dmabuf_handle_table[i])
 				continue;
 			if (ch->dmabuf_handle_table[i] == dma_buf) {
-				pr_debug("ch [%s] unlocked ion buf #%d fd [%d] dma_buf=0x%x\n",
+				pr_debug("ch [%s] unlocked ion buf #%d fd [%d] dma_buf=0x%pK\n",
 					ch->name, i,
 					ch->dmabuf_fd_table[i],
 					ch->dmabuf_handle_table[i]);
@@ -1369,7 +1369,7 @@
 	ch->is_busy = false;
 	ch->pid = 0;
 	if (ch->rpmsg_rx_buf) {
-		pr_debug("ch [%s] discarting unconsumed rx packet actual_rx_size=%d\n",
+		pr_debug("ch [%s] discarting unconsumed rx packet actual_rx_size=%zd\n",
 		       name, ch->actual_rx_size);
 		kfree(ch->rpmsg_rx_buf);
 		ch->rpmsg_rx_buf = NULL;
@@ -1884,7 +1884,7 @@
 
 		if (ch->rpmsg_abort) {
 			if (ch->rpmsg_rx_buf) {
-				pr_debug("ch [%s] rx aborted free %d bytes\n",
+				pr_debug("ch [%s] rx aborted free %zd bytes\n",
 					ch->name, ch->actual_rx_size);
 				kfree(ch->rpmsg_rx_buf);
 				ch->actual_rx_size = 0;
@@ -1892,7 +1892,7 @@
 			goto rx_aborted;
 		}
 		if (ch->rpmsg_rx_buf) {
-			pr_err("ch [%s] previous buffer not consumed %d bytes\n",
+			pr_err("ch [%s] previous buffer not consumed %zd bytes\n",
 			       ch->name, ch->actual_rx_size);
 			kfree(ch->rpmsg_rx_buf);
 			ch->rpmsg_rx_buf = NULL;
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
index fe5498c..0f33a44 100644
--- a/drivers/soc/qcom/spss_utils.c
+++ b/drivers/soc/qcom/spss_utils.c
@@ -345,6 +345,8 @@
 		firmware_name = none_firmware_name;
 		break;
 	default:
+		pr_err("invalid firmware type %d, sysfs entry not created\n",
+			firmware_type);
 		return -EINVAL;
 	}
 
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index dcdf907..f9ac0a7 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__
@@ -1342,6 +1342,16 @@
 		notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL);
 }
 
+void notify_before_auth_and_reset(struct device *device)
+{
+	struct subsys_device *dev = desc_to_subsys(device);
+
+	if (dev)
+		notify_each_subsys_device(&dev, 1,
+			SUBSYS_BEFORE_AUTH_AND_RESET, NULL);
+}
+
+
 static int subsys_device_open(struct inode *inode, struct file *file)
 {
 	struct subsys_device *device, *subsys_dev = 0;
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index 5d350e9..b1ab461 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "sysmon-qmi: %s: " fmt, __func__
@@ -48,7 +48,7 @@
 	.data_type = QMI_EOTI,	\
 	.elem_len  = 0,		\
 	.elem_size = 0,		\
-	.is_array  = NO_ARRAY,	\
+	.array_type  = NO_ARRAY,	\
 	.tlv_type  = 0x00,	\
 	.offset    = 0,		\
 	.ei_array  = NULL,	\
@@ -162,7 +162,7 @@
 		.data_type = QMI_DATA_LEN,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint8_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x01,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      subsys_name_len),
@@ -172,7 +172,7 @@
 		.data_type = QMI_UNSIGNED_1_BYTE,
 		.elem_len  = QMI_SSCTL_SUBSYS_NAME_LENGTH,
 		.elem_size = sizeof(char),
-		.is_array  = VAR_LEN_ARRAY,
+		.array_type  = VAR_LEN_ARRAY,
 		.tlv_type  = 0x01,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      subsys_name),
@@ -182,7 +182,7 @@
 		.data_type = QMI_SIGNED_4_BYTE_ENUM,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint32_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x02,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      event),
@@ -192,7 +192,7 @@
 		.data_type = QMI_OPT_FLAG,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint8_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      evt_driven_valid),
@@ -202,7 +202,7 @@
 		.data_type = QMI_SIGNED_4_BYTE_ENUM,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint32_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      evt_driven),
@@ -216,7 +216,7 @@
 		.data_type = QMI_STRUCT,
 		.elem_len  = 1,
 		.elem_size = sizeof(struct qmi_response_type_v01),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x02,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_resp_msg,
 				      resp),
@@ -343,7 +343,7 @@
 		.data_type = QMI_STRUCT,
 		.elem_len  = 1,
 		.elem_size = sizeof(struct qmi_response_type_v01),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x02,
 		.offset    = offsetof(struct qmi_ssctl_shutdown_resp_msg,
 				      resp),
@@ -471,7 +471,7 @@
 		.data_type = QMI_STRUCT,
 		.elem_len  = 1,
 		.elem_size = sizeof(struct qmi_response_type_v01),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x02,
 		.offset    = offsetof(
 			struct qmi_ssctl_get_failure_reason_resp_msg,
@@ -482,7 +482,7 @@
 		.data_type = QMI_OPT_FLAG,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint8_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(
 			struct qmi_ssctl_get_failure_reason_resp_msg,
@@ -493,7 +493,7 @@
 		.data_type = QMI_DATA_LEN,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint8_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(
 			struct qmi_ssctl_get_failure_reason_resp_msg,
@@ -504,7 +504,7 @@
 		.data_type = QMI_UNSIGNED_1_BYTE,
 		.elem_len  = QMI_SSCTL_ERROR_MSG_LENGTH,
 		.elem_size = sizeof(char),
-		.is_array  = VAR_LEN_ARRAY,
+		.array_type  = VAR_LEN_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(
 			struct qmi_ssctl_get_failure_reason_resp_msg,
diff --git a/drivers/soc/qcom/wda_qmi.c b/drivers/soc/qcom/wda_qmi.c
index 2c15cb8..4fc5c3e 100644
--- a/drivers/soc/qcom/wda_qmi.c
+++ b/drivers/soc/qcom/wda_qmi.c
@@ -1,8 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/rtnetlink.h>
 #include <linux/soc/qcom/qmi.h>
 #include <soc/qcom/rmnet_qmi.h>
 #define CREATE_TRACE_POINTS
@@ -15,13 +16,14 @@
 	struct work_struct svc_arrive;
 	struct qmi_handle handle;
 	struct sockaddr_qrtr ssctl;
+	struct svc_info svc;
 };
 
 static void wda_svc_config(struct work_struct *work);
 /* **************************************************** */
 #define WDA_SERVICE_ID_V01 0x1A
 #define WDA_SERVICE_VERS_V01 0x01
-#define WDA_TIMEOUT_MS  20
+#define WDA_TIMEOUT_JF  msecs_to_jiffies(1000)
 
 #define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D
 #define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D
@@ -231,7 +233,7 @@
 		goto out;
 	}
 
-	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
+	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
 	if (ret < 0) {
 		pr_err("%s() Response waiting failed, err: %d\n",
 			__func__, ret);
@@ -247,8 +249,7 @@
 	return ret;
 }
 
-static int wda_set_powersave_config_req(struct qmi_handle *wda_handle,
-					struct qmi_info *qmi)
+static int wda_set_powersave_config_req(struct qmi_handle *wda_handle)
 {
 	struct wda_qmi_data *data = container_of(wda_handle,
 						 struct wda_qmi_data, handle);
@@ -275,8 +276,8 @@
 		goto out;
 	}
 
-	req->ep_id.ep_type = qmi->fc_info[0].svc.ep_type;
-	req->ep_id.iface_id = qmi->fc_info[0].svc.iface_id;
+	req->ep_id.ep_type = data->svc.ep_type;
+	req->ep_id.iface_id = data->svc.iface_id;
 	req->req_data_cfg_valid = 1;
 	req->req_data_cfg = WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01;
 	ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
@@ -289,7 +290,7 @@
 		goto out;
 	}
 
-	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
+	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
 	if (ret < 0) {
 		pr_err("%s() Response waiting failed, err: %d\n",
 			__func__, ret);
@@ -310,28 +311,30 @@
 	struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
 						 svc_arrive);
 	struct qmi_info *qmi;
+	int rc;
 
-	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
-	if (!qmi)
-		goto clean_out;
-
-	if (wda_set_powersave_config_req(&data->handle, qmi) < 0) {
-		pr_err("%s() failed, qmi handle pt: %p\n",
-			__func__, &data->handle);
-		goto clean_out;
+	rc = wda_set_powersave_config_req(&data->handle);
+	if (rc < 0) {
+		pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
+		return;
 	}
 
-	trace_wda_client_state_up(qmi->fc_info[0].svc.instance,
-				  qmi->fc_info[0].svc.ep_type,
-				  qmi->fc_info[0].svc.iface_id);
-	qmi->wda_client = (void *)data;
-	pr_info("Connection established with the WDA Service\n");
-	return;
+	rtnl_lock();
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
+	if (!qmi) {
+		rtnl_unlock();
+		return;
+	}
 
-clean_out:
-	qmi_handle_release(&data->handle);
-	destroy_workqueue(data->wda_wq);
-	kfree(data);
+	qmi->wda_pending = NULL;
+	qmi->wda_client = (void *)data;
+	trace_wda_client_state_up(data->svc.instance,
+				  data->svc.ep_type,
+				  data->svc.iface_id);
+
+	rtnl_unlock();
+
+	pr_info("Connection established with the WDA Service\n");
 }
 
 static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
@@ -362,16 +365,15 @@
 	.del_server = wda_svc_exit,
 };
 
-int wda_qmi_client_init(void *port, uint32_t instance)
+int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
 {
 	struct wda_qmi_data *data;
-	int rc = 0;
+	int rc = -ENOMEM;
 
-	if (!port)
+	if (!port || !qmi)
 		return -EINVAL;
 
-	pr_info("%s\n", __func__);
-
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
@@ -379,11 +381,11 @@
 	data->wda_wq = create_singlethread_workqueue("wda_wq");
 	if (!data->wda_wq) {
 		pr_err("%s Could not create workqueue\n", __func__);
-		kfree(data);
-		return -ENOMEM;
+		goto err0;
 	}
 
 	data->rmnet_port = port;
+	memcpy(&data->svc, psvc, sizeof(data->svc));
 	INIT_WORK(&data->svc_arrive, wda_svc_config);
 
 	rc = qmi_handle_init(&data->handle,
@@ -391,19 +393,25 @@
 			     &server_ops, NULL);
 	if (rc < 0) {
 		pr_err("%s: Failed qmi_handle_init, err: %d\n", __func__, rc);
-		kfree(data);
-		return rc;
+		goto err1;
 	}
 
 	rc = qmi_add_lookup(&data->handle, WDA_SERVICE_ID_V01,
-			    WDA_SERVICE_VERS_V01, instance);
+			    WDA_SERVICE_VERS_V01, psvc->instance);
 	if (rc < 0) {
 		pr_err("%s(): Failed qmi_add_lookup, err: %d\n", __func__, rc);
-		qmi_handle_release(&data->handle);
-		destroy_workqueue(data->wda_wq);
-		kfree(data);
+		goto err2;
 	}
 
+	qmi->wda_pending = (void *)data;
+	return 0;
+
+err2:
+	qmi_handle_release(&data->handle);
+err1:
+	destroy_workqueue(data->wda_wq);
+err0:
+	kfree(data);
 	return rc;
 }
 
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index cd8f413..7bfb154 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -22,11 +22,15 @@
 
 bool soc_is_tegra(void)
 {
+	const struct of_device_id *match;
 	struct device_node *root;
 
 	root = of_find_node_by_path("/");
 	if (!root)
 		return false;
 
-	return of_match_node(tegra_machine_match, root) != NULL;
+	match = of_match_node(tegra_machine_match, root);
+	of_node_put(root);
+
+	return match != NULL;
 }
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index f35cc10..25abf2d 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -88,7 +88,7 @@
 	u8 *rx_buf;
 	int tx_len;
 	int rx_len;
-	bool dma_pending;
+	unsigned int dma_pending;
 };
 
 static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
@@ -155,8 +155,7 @@
 	/* Write as many bytes as possible to FIFO */
 	bcm2835_wr_fifo(bs);
 
-	/* based on flags decide if we can finish the transfer */
-	if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+	if (!bs->rx_len) {
 		/* Transfer complete - reset SPI HW */
 		bcm2835_spi_reset_hw(master);
 		/* wake up the framework */
@@ -233,10 +232,9 @@
 	 * is called the tx-dma must have finished - can't get to this
 	 * situation otherwise...
 	 */
-	dmaengine_terminate_all(master->dma_tx);
-
-	/* mark as no longer pending */
-	bs->dma_pending = 0;
+	if (cmpxchg(&bs->dma_pending, true, false)) {
+		dmaengine_terminate_all(master->dma_tx);
+	}
 
 	/* and mark as completed */;
 	complete(&master->xfer_completion);
@@ -342,6 +340,7 @@
 	if (ret) {
 		/* need to reset on errors */
 		dmaengine_terminate_all(master->dma_tx);
+		bs->dma_pending = false;
 		bcm2835_spi_reset_hw(master);
 		return ret;
 	}
@@ -617,10 +616,9 @@
 	struct bcm2835_spi *bs = spi_master_get_devdata(master);
 
 	/* if an error occurred and we have an active dma, then terminate */
-	if (bs->dma_pending) {
+	if (cmpxchg(&bs->dma_pending, true, false)) {
 		dmaengine_terminate_all(master->dma_tx);
 		dmaengine_terminate_all(master->dma_rx);
-		bs->dma_pending = 0;
 	}
 	/* and reset */
 	bcm2835_spi_reset_hw(master);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 424354f..ffe21e8 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -188,7 +188,7 @@
 
 	res_freq = (sclk_freq / (*clk_div));
 
-	dev_dbg(mas->dev, "%s: req %u resultant %u sclk %lu, idx %d, div %d\n",
+	dev_dbg(mas->dev, "%s: req %u resultant %lu sclk %lu, idx %d, div %d\n",
 		__func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
 
 	ret = clk_set_rate(rsc->se_clk, sclk_freq);
@@ -1378,6 +1378,7 @@
 		goto spi_geni_probe_err;
 	}
 
+	geni_mas->spi_rsc.ctrl_dev = geni_mas->dev;
 	rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
 	if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
 		dev_err(&pdev->dev, "No pinctrl config specified!\n");
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index fdcf307..4b77fa1 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -15,6 +15,7 @@
 
 #include <linux/clk.h>
 #include <linux/dmaengine.h>
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index a880b5c..be81533 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,9 @@
 /* LRU list of unpinned pages, protected by ashmem_mutex */
 static LIST_HEAD(ashmem_lru_list);
 
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
 /*
  * long lru_count - The count of pages on our LRU list.
  *
@@ -168,19 +171,15 @@
  * @end:	   The ending page (inclusive)
  *
  * This function is protected by ashmem_mutex.
- *
- * Return: 0 if successful, or -ENOMEM if there is an error
  */
-static int range_alloc(struct ashmem_area *asma,
-		       struct ashmem_range *prev_range, unsigned int purged,
-		       size_t start, size_t end)
+static void range_alloc(struct ashmem_area *asma,
+			struct ashmem_range *prev_range, unsigned int purged,
+			size_t start, size_t end,
+			struct ashmem_range **new_range)
 {
-	struct ashmem_range *range;
+	struct ashmem_range *range = *new_range;
 
-	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
-	if (!range)
-		return -ENOMEM;
-
+	*new_range = NULL;
 	range->asma = asma;
 	range->pgstart = start;
 	range->pgend = end;
@@ -190,8 +189,6 @@
 
 	if (range_on_lru(range))
 		lru_add(range);
-
-	return 0;
 }
 
 /**
@@ -438,7 +435,6 @@
 static unsigned long
 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-	struct ashmem_range *range, *next;
 	unsigned long freed = 0;
 
 	/* We might recurse into filesystem code, so bail out if necessary */
@@ -448,21 +444,33 @@
 	if (!mutex_trylock(&ashmem_mutex))
 		return -1;
 
-	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+	while (!list_empty(&ashmem_lru_list)) {
+		struct ashmem_range *range =
+			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
+		struct file *f = range->asma->file;
 
-		range->asma->file->f_op->fallocate(range->asma->file,
-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-				start, end - start);
+		get_file(f);
+		atomic_inc(&ashmem_shrink_inflight);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
 		freed += range_size(range);
+		mutex_unlock(&ashmem_mutex);
+		f->f_op->fallocate(f,
+				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				   start, end - start);
+		fput(f);
+		if (atomic_dec_and_test(&ashmem_shrink_inflight))
+			wake_up_all(&ashmem_shrink_wait);
+		if (!mutex_trylock(&ashmem_mutex))
+			goto out;
 		if (--sc->nr_to_scan <= 0)
 			break;
 	}
 	mutex_unlock(&ashmem_mutex);
+out:
 	return freed;
 }
 
@@ -582,7 +590,8 @@
  *
  * Caller must hold ashmem_mutex.
  */
-static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+		      struct ashmem_range **new_range)
 {
 	struct ashmem_range *range, *next;
 	int ret = ASHMEM_NOT_PURGED;
@@ -635,7 +644,7 @@
 			 * second half and adjust the first chunk's endpoint.
 			 */
 			range_alloc(asma, range, range->purged,
-				    pgend + 1, range->pgend);
+				    pgend + 1, range->pgend, new_range);
 			range_shrink(range, range->pgstart, pgstart - 1);
 			break;
 		}
@@ -649,7 +658,8 @@
  *
  * Caller must hold ashmem_mutex.
  */
-static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+			struct ashmem_range **new_range)
 {
 	struct ashmem_range *range, *next;
 	unsigned int purged = ASHMEM_NOT_PURGED;
@@ -675,7 +685,8 @@
 		}
 	}
 
-	return range_alloc(asma, range, purged, pgstart, pgend);
+	range_alloc(asma, range, purged, pgstart, pgend, new_range);
+	return 0;
 }
 
 /*
@@ -708,11 +719,19 @@
 	struct ashmem_pin pin;
 	size_t pgstart, pgend;
 	int ret = -EINVAL;
+	struct ashmem_range *range = NULL;
 
 	if (copy_from_user(&pin, p, sizeof(pin)))
 		return -EFAULT;
 
+	if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
+		range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+		if (!range)
+			return -ENOMEM;
+	}
+
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	if (!asma->file)
 		goto out_unlock;
@@ -735,10 +754,10 @@
 
 	switch (cmd) {
 	case ASHMEM_PIN:
-		ret = ashmem_pin(asma, pgstart, pgend);
+		ret = ashmem_pin(asma, pgstart, pgend, &range);
 		break;
 	case ASHMEM_UNPIN:
-		ret = ashmem_unpin(asma, pgstart, pgend);
+		ret = ashmem_unpin(asma, pgstart, pgend, &range);
 		break;
 	case ASHMEM_GET_PIN_STATUS:
 		ret = ashmem_get_pin_status(asma, pgstart, pgend);
@@ -747,6 +766,8 @@
 
 out_unlock:
 	mutex_unlock(&ashmem_mutex);
+	if (range)
+		kmem_cache_free(ashmem_range_cachep, range);
 
 	return ret;
 }
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index f116a64..5a53ae0 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -172,8 +172,7 @@
 void ion_buffer_destroy(struct ion_buffer *buffer)
 {
 	if (buffer->kmap_cnt > 0) {
-		pr_warn_once("%s: buffer still mapped in the kernel\n",
-			     __func__);
+		pr_warn_ratelimited("ION client likely missing a call to dma_buf_kunmap or dma_buf_vunmap\n");
 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
 	}
 	buffer->heap->ops->free(buffer);
@@ -220,7 +219,7 @@
 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
 {
 	if (buffer->kmap_cnt == 0) {
-		pr_warn_ratelimited("Call dma_buf_begin_cpu_access before dma_buf_end_cpu_access, pid:%d\n",
+		pr_warn_ratelimited("ION client likely missing a call to dma_buf_kmap or dma_buf_vmap, pid:%d\n",
 				    current->pid);
 		return;
 	}
@@ -310,9 +309,9 @@
 	struct ion_buffer *buffer = dmabuf->priv;
 
 	mutex_lock(&buffer->lock);
-	free_duped_table(a->table);
 	list_del(&a->list);
 	mutex_unlock(&buffer->lock);
+	free_duped_table(a->table);
 
 	kfree(a);
 }
@@ -495,31 +494,59 @@
 	struct ion_buffer *buffer = dmabuf->priv;
 
 	_ion_buffer_destroy(buffer);
-}
-
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
-	struct ion_buffer *buffer = dmabuf->priv;
-
-	WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_kmap\n");
-	return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
-			       void *ptr)
-{
+	kfree(dmabuf->exp_name);
 }
 
 static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
 {
 	struct ion_buffer *buffer = dmabuf->priv;
+	void *vaddr = ERR_PTR(-EINVAL);
 
-	WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_vmap\n");
-	return buffer->vaddr;
+	if (buffer->heap->ops->map_kernel) {
+		mutex_lock(&buffer->lock);
+		vaddr = ion_buffer_kmap_get(buffer);
+		mutex_unlock(&buffer->lock);
+	} else {
+		pr_warn_ratelimited("heap %s doesn't support map_kernel\n",
+				    buffer->heap->name);
+	}
+
+	return vaddr;
 }
 
 static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
 {
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	if (buffer->heap->ops->map_kernel) {
+		mutex_lock(&buffer->lock);
+		ion_buffer_kmap_put(buffer);
+		mutex_unlock(&buffer->lock);
+	}
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+	/*
+	 * TODO: Once clients remove their hacks where they assume kmap(ed)
+	 * addresses are virtually contiguous implement this properly
+	 */
+	void *vaddr = ion_dma_buf_vmap(dmabuf);
+
+	if (IS_ERR(vaddr))
+		return vaddr;
+
+	return vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+			       void *ptr)
+{
+	/*
+	 * TODO: Once clients remove their hacks where they assume kmap(ed)
+	 * addresses are virtually contiguous implement this properly
+	 */
+	ion_dma_buf_vunmap(dmabuf, ptr);
 }
 
 static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
@@ -604,7 +631,6 @@
 					  bool sync_only_mapped)
 {
 	struct ion_buffer *buffer = dmabuf->priv;
-	void *vaddr;
 	struct ion_dma_buf_attachment *a;
 	int ret = 0;
 
@@ -617,19 +643,6 @@
 		goto out;
 	}
 
-	/*
-	 * TODO: Move this elsewhere because we don't always need a vaddr
-	 */
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		vaddr = ion_buffer_kmap_get(buffer);
-		if (IS_ERR(vaddr)) {
-			ret = PTR_ERR(vaddr);
-			goto unlock;
-		}
-		mutex_unlock(&buffer->lock);
-	}
-
 	if (!(buffer->flags & ION_FLAG_CACHED)) {
 		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
 						    true, direction,
@@ -701,8 +714,6 @@
 		}
 
 	}
-
-unlock:
 	mutex_unlock(&buffer->lock);
 out:
 	return ret;
@@ -725,12 +736,6 @@
 		goto out;
 	}
 
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		ion_buffer_kmap_put(buffer);
-		mutex_unlock(&buffer->lock);
-	}
-
 	if (!(buffer->flags & ION_FLAG_CACHED)) {
 		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
 						  true, direction,
@@ -833,7 +838,6 @@
 						unsigned int len)
 {
 	struct ion_buffer *buffer = dmabuf->priv;
-	void *vaddr;
 	struct ion_dma_buf_attachment *a;
 	int ret = 0;
 
@@ -846,15 +850,6 @@
 		goto out;
 	}
 
-	/*
-	 * TODO: Move this elsewhere because we don't always need a vaddr
-	 */
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		vaddr = ion_buffer_kmap_get(buffer);
-		mutex_unlock(&buffer->lock);
-	}
-
 	if (!(buffer->flags & ION_FLAG_CACHED)) {
 		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
 						    true, dir,
@@ -934,12 +929,6 @@
 		goto out;
 	}
 
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		ion_buffer_kmap_put(buffer);
-		mutex_unlock(&buffer->lock);
-	}
-
 	if (!(buffer->flags & ION_FLAG_CACHED)) {
 		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
 						  true, direction,
@@ -1038,6 +1027,7 @@
 	struct ion_heap *heap;
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 	struct dma_buf *dmabuf;
+	char task_comm[TASK_COMM_LEN];
 
 	pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
 		 len, heap_id_mask, flags);
@@ -1069,14 +1059,20 @@
 	if (IS_ERR(buffer))
 		return ERR_CAST(buffer);
 
+	get_task_comm(task_comm, current->group_leader);
+
 	exp_info.ops = &dma_buf_ops;
 	exp_info.size = buffer->size;
 	exp_info.flags = O_RDWR;
 	exp_info.priv = buffer;
+	exp_info.exp_name = kasprintf(GFP_KERNEL, "%s-%s-%d-%s", KBUILD_MODNAME,
+				      heap->name, current->tgid, task_comm);
 
 	dmabuf = dma_buf_export(&exp_info);
-	if (IS_ERR(dmabuf))
+	if (IS_ERR(dmabuf)) {
 		_ion_buffer_destroy(buffer);
+		kfree(exp_info.exp_name);
+	}
 
 	return dmabuf;
 }
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index a9aed00..8b29a76 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -3,7 +3,7 @@
  * drivers/staging/android/ion/ion.h
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  *
  */
 
@@ -30,6 +30,7 @@
 #define ION_MM_HEAP_NAME	"mm"
 #define ION_SPSS_HEAP_NAME	"spss"
 #define ION_SECURE_CARVEOUT_HEAP_NAME	"secure_carveout"
+#define ION_USER_CONTIG_HEAP_NAME	"user_contig"
 #define ION_QSECOM_HEAP_NAME	"qsecom"
 #define ION_QSECOM_TA_HEAP_NAME	"qsecom_ta"
 #define ION_SECURE_HEAP_NAME	"secure_heap"
diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c
index 5a18b27..8e28ba0 100644
--- a/drivers/staging/android/ion/ion_cma_secure_heap.c
+++ b/drivers/staging/android/ion/ion_cma_secure_heap.c
@@ -343,8 +343,8 @@
 	kfree(chunk);
 }
 
-static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
-					 int max_nr)
+static unsigned long
+__ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
 {
 	struct list_head *entry, *_n;
 	unsigned long drained_size = 0, skipped_size = 0;
@@ -368,6 +368,7 @@
 	}
 
 	trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
+	return drained_size;
 }
 
 int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
@@ -385,6 +386,7 @@
 static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
 					     struct shrink_control *sc)
 {
+	unsigned long freed;
 	struct ion_cma_secure_heap *sheap = container_of(shrinker,
 					struct ion_cma_secure_heap, shrinker);
 	int nr_to_scan = sc->nr_to_scan;
@@ -397,11 +399,11 @@
 	if (!mutex_trylock(&sheap->chunk_lock))
 		return -EAGAIN;
 
-	__ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+	freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
 
 	mutex_unlock(&sheap->chunk_lock);
 
-	return atomic_read(&sheap->total_pool_size);
+	return freed;
 }
 
 static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c
index df88427..1c1d4dd 100644
--- a/drivers/staging/android/ion/ion_secure_util.c
+++ b/drivers/staging/android/ion/ion_secure_util.c
@@ -89,7 +89,8 @@
 }
 
 int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
-			int source_nelems, bool clear_page_private)
+			int source_nelems, bool clear_page_private,
+			bool try_lock)
 {
 	u32 dest_vmid = VMID_HLOS;
 	u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
@@ -103,11 +104,16 @@
 		goto out;
 	}
 
-	ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
-			       &dest_vmid, &dest_perms, 1);
+	if (try_lock)
+		ret = try_hyp_assign_table(sgt, source_vm_list, source_nelems,
+					   &dest_vmid, &dest_perms, 1);
+	else
+		ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
+				       &dest_vmid, &dest_perms, 1);
 	if (ret) {
-		pr_err("%s: Unassign call failed.\n",
-		       __func__);
+		if (!try_lock)
+			pr_err("%s: Unassign call failed.\n",
+			       __func__);
 		goto out;
 	}
 	if (clear_page_private)
@@ -183,7 +189,7 @@
 	}
 
 	ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems,
-				  set_page_private);
+				  set_page_private, false);
 
 out_free_source:
 	kfree(source_vm_list);
diff --git a/drivers/staging/android/ion/ion_secure_util.h b/drivers/staging/android/ion/ion_secure_util.h
index 6267342..bd525e5 100644
--- a/drivers/staging/android/ion/ion_secure_util.h
+++ b/drivers/staging/android/ion/ion_secure_util.h
@@ -13,7 +13,8 @@
 int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
 		      int dest_nelems, bool set_page_private);
 int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
-			int source_nelems, bool clear_page_private);
+			int source_nelems, bool clear_page_private,
+			bool try_lock);
 int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
 				   bool set_page_private);
 int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index aa781f5..35355e5 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -3,7 +3,7 @@
  * drivers/staging/android/ion/ion_system_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  *
  */
 
@@ -158,6 +158,9 @@
 	struct page_info *info;
 	int i;
 
+	if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
+		goto force_alloc;
+
 	info = kmalloc(sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return ERR_PTR(-ENOMEM);
@@ -189,6 +192,7 @@
 	}
 
 	kfree(info);
+force_alloc:
 	return alloc_largest_available(heap, buffer, size, max_order);
 }
 
@@ -325,8 +329,10 @@
 		goto err;
 
 	table = kzalloc(sizeof(*table), GFP_KERNEL);
-	if (!table)
+	if (!table) {
+		ret = -ENOMEM;
 		goto err_free_data_pages;
+	}
 
 	ret = sg_alloc_table(table, i, GFP_KERNEL);
 	if (ret)
@@ -388,7 +394,7 @@
 	buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
 
 	if (vmid > 0)
-		ion_hyp_unassign_sg(table, &vmid, 1, true);
+		ion_hyp_unassign_sg(table, &vmid, 1, true, false);
 
 	for_each_sg(table->sgl, sg, table->nents, i)
 		free_buffer_page(sys_heap, buffer, sg_page(sg),
@@ -429,7 +435,7 @@
 		if (vmid < 0)
 			ion_heap_buffer_zero(buffer);
 	} else if (vmid > 0) {
-		if (ion_hyp_unassign_sg(table, &vmid, 1, true))
+		if (ion_hyp_unassign_sg(table, &vmid, 1, true, false))
 			return;
 	}
 
@@ -613,6 +619,7 @@
 					bool cached)
 {
 	int i;
+
 	for (i = 0; i < NUM_ORDERS; i++) {
 		struct ion_page_pool *pool;
 		gfp_t gfp_flags = low_order_gfp_flags;
diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c
index 53fcd55..f0d8d72 100644
--- a/drivers/staging/android/ion/ion_system_secure_heap.c
+++ b/drivers/staging/android/ion/ion_system_secure_heap.c
@@ -149,7 +149,8 @@
 	return total << PAGE_SHIFT;
 }
 
-static void process_one_shrink(struct ion_heap *sys_heap,
+static void process_one_shrink(struct ion_system_secure_heap *secure_heap,
+			       struct ion_heap *sys_heap,
 			       struct prefetch_info *info)
 {
 	struct ion_buffer buffer;
@@ -157,7 +158,7 @@
 	int ret;
 
 	memset(&buffer, 0, sizeof(struct ion_buffer));
-	buffer.heap = sys_heap;
+	buffer.heap = &secure_heap->heap;
 	buffer.flags = info->vmid;
 
 	pool_size = ion_system_secure_heap_page_pool_total(sys_heap,
@@ -171,6 +172,7 @@
 	}
 
 	buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
+	buffer.heap = sys_heap;
 	sys_heap->ops->free(&buffer);
 }
 
@@ -190,7 +192,7 @@
 		spin_unlock_irqrestore(&secure_heap->work_lock, flags);
 
 		if (info->shrink)
-			process_one_shrink(sys_heap, info);
+			process_one_shrink(secure_heap, sys_heap, info);
 		else
 			process_one_prefetch(sys_heap, info);
 
@@ -205,7 +207,7 @@
 			       struct list_head *items)
 {
 	struct prefetch_info *info;
-	u64 __user *user_sizes;
+	u64 user_sizes;
 	int err;
 	unsigned int nr_sizes, vmid, i;
 
@@ -226,7 +228,7 @@
 		if (!info)
 			return -ENOMEM;
 
-		err = get_user(info->size, &user_sizes[i]);
+		err = get_user(info->size, ((u64 __user *)user_sizes + i));
 		if (err)
 			goto out_free;
 
@@ -260,7 +262,10 @@
 		return -EINVAL;
 
 	for (i = 0; i < data->nr_regions; i++) {
-		ret = alloc_prefetch_info(&data->regions[i], shrink, &items);
+		struct ion_prefetch_regions *r;
+
+		r = (struct ion_prefetch_regions *)data->regions + i;
+		ret = alloc_prefetch_info(r, shrink, &items);
 		if (ret)
 			goto out_free;
 	}
@@ -270,9 +275,9 @@
 		spin_unlock_irqrestore(&secure_heap->work_lock, flags);
 		goto out_free;
 	}
-	list_splice_init(&items, &secure_heap->prefetch_list);
-	schedule_delayed_work(&secure_heap->prefetch_work,
-			      shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0);
+	list_splice_tail_init(&items, &secure_heap->prefetch_list);
+	queue_delayed_work(system_unbound_wq, &secure_heap->prefetch_work,
+			   shrink ?  msecs_to_jiffies(SHRINK_DELAY) : 0);
 	spin_unlock_irqrestore(&secure_heap->work_lock, flags);
 
 	return 0;
@@ -449,7 +454,10 @@
 		sg = sg_next(sg);
 	}
 
-	if (ion_hyp_unassign_sg(&sgt, &vmid, 1, true))
+	ret = ion_hyp_unassign_sg(&sgt, &vmid, 1, true, true);
+	if (ret == -EADDRNOTAVAIL)
+		goto out3;
+	else if (ret < 0)
 		goto out2;
 
 	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -460,6 +468,8 @@
 	sg_free_table(&sgt);
 	return freed;
 
+out2:
+	sg_free_table(&sgt);
 out1:
 	/* Restore pages to secure pool */
 	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -467,7 +477,7 @@
 		ion_page_pool_free(pool, page);
 	}
 	return 0;
-out2:
+out3:
 	/*
 	 * The security state of the pages is unknown after a failure;
 	 * They can neither be added back to the secure pool nor buddy system.
diff --git a/drivers/staging/android/ion/msm/msm_ion_of.c b/drivers/staging/android/ion/msm/msm_ion_of.c
index 4c313b9..a1dc3f8 100644
--- a/drivers/staging/android/ion/msm/msm_ion_of.c
+++ b/drivers/staging/android/ion/msm/msm_ion_of.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/err.h>
@@ -41,6 +41,10 @@
 		.name	= ION_MM_HEAP_NAME,
 	},
 	{
+		.id	= ION_USER_CONTIG_HEAP_ID,
+		.name	= ION_USER_CONTIG_HEAP_NAME,
+	},
+	{
 		.id	= ION_QSECOM_HEAP_ID,
 		.name	= ION_QSECOM_HEAP_NAME,
 	},
@@ -161,6 +165,10 @@
 				base = cma_get_base(dev->cma_area);
 				size = cma_get_size(dev->cma_area);
 				ret = 0;
+			} else if (dev->dma_mem) {
+				base = dma_get_device_base(dev, dev->dma_mem);
+				size = dma_get_size(dev->dma_mem);
+				ret = 0;
 			}
 		} else {
 			base = of_translate_address(pnode, basep);
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index f6016c7..860ec69 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -53,6 +53,7 @@
 #define ION_QSECOM_TA_HEAP_ID		19
 #define ION_AUDIO_HEAP_ID		28
 #define ION_CAMERA_HEAP_ID		20
+#define ION_USER_CONTIG_HEAP_ID		26
 /**
  * Flags to be used when allocating from the secure heap for
  * content protection
@@ -100,15 +101,15 @@
 #define ION_IOC_MSM_MAGIC 'M'
 
 struct ion_prefetch_regions {
+	__u64 sizes;
 	__u32 vmid;
-	__u64 __user *sizes;
 	__u32 nr_sizes;
 };
 
 struct ion_prefetch_data {
-	__u32 heap_id;
 	__u64 len;
-	struct ion_prefetch_regions __user *regions;
+	__u64 regions;
+	__u32 heap_id;
 	__u32 nr_regions;
 };
 
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index e521ed9..35bd4d2 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -602,6 +602,7 @@
 	case NI_660X_PFI_OUTPUT_DIO:
 		if (chan > 31)
 			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index ac263a1..894e60e 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -25,7 +25,7 @@
 		struct page *page = bvec->bv_page;
 
 		/* page is already locked */
-		BUG_ON(PageUptodate(page));
+		DBG_BUGON(PageUptodate(page));
 
 		if (unlikely(err))
 			SetPageError(page);
@@ -91,12 +91,12 @@
 	struct erofs_map_blocks *map,
 	int flags)
 {
+	int err = 0;
 	erofs_blk_t nblocks, lastblk;
 	u64 offset = map->m_la;
 	struct erofs_vnode *vi = EROFS_V(inode);
 
 	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
-	BUG_ON(is_inode_layout_compression(inode));
 
 	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
 	lastblk = nblocks - is_inode_layout_inline(inode);
@@ -123,18 +123,27 @@
 		map->m_plen = inode->i_size - offset;
 
 		/* inline data should locate in one meta block */
-		BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
+		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
+			DBG_BUGON(1);
+			err = -EIO;
+			goto err_out;
+		}
+
 		map->m_flags |= EROFS_MAP_META;
 	} else {
 		errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
 			vi->nid, inode->i_size, map->m_la);
-		BUG();
+		DBG_BUGON(1);
+		err = -EIO;
+		goto err_out;
 	}
 
 out:
 	map->m_llen = map->m_plen;
+
+err_out:
 	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
-	return 0;
+	return err;
 }
 
 #ifdef CONFIG_EROFS_FS_ZIP
@@ -190,7 +199,7 @@
 	erofs_off_t current_block = (erofs_off_t)page->index;
 	int err;
 
-	BUG_ON(!nblocks);
+	DBG_BUGON(!nblocks);
 
 	if (PageUptodate(page)) {
 		err = 0;
@@ -233,7 +242,7 @@
 		}
 
 		/* for RAW access mode, m_plen must be equal to m_llen */
-		BUG_ON(map.m_plen != map.m_llen);
+		DBG_BUGON(map.m_plen != map.m_llen);
 
 		blknr = erofs_blknr(map.m_pa);
 		blkoff = erofs_blkoff(map.m_pa);
@@ -243,7 +252,7 @@
 			void *vsrc, *vto;
 			struct page *ipage;
 
-			BUG_ON(map.m_plen > PAGE_SIZE);
+			DBG_BUGON(map.m_plen > PAGE_SIZE);
 
 			ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
 
@@ -270,7 +279,7 @@
 		}
 
 		/* pa must be block-aligned for raw reading */
-		BUG_ON(erofs_blkoff(map.m_pa) != 0);
+		DBG_BUGON(erofs_blkoff(map.m_pa));
 
 		/* max # of continuous pages */
 		if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
@@ -331,7 +340,7 @@
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
-	BUG_ON(bio != NULL);	/* since we have only one bio -- must be NULL */
+	DBG_BUGON(bio);	/* since we have only one bio -- must be NULL */
 	return 0;
 }
 
@@ -369,7 +378,7 @@
 		/* pages could still be locked */
 		put_page(page);
 	}
-	BUG_ON(!list_empty(pages));
+	DBG_BUGON(!list_empty(pages));
 
 	/* the rare case (end in gaps) */
 	if (unlikely(bio != NULL))
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index be6ae3b..04b84ff 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -53,8 +53,11 @@
 			strnlen(de_name, maxsize - nameoff) :
 			le16_to_cpu(de[1].nameoff) - nameoff;
 
-		/* the corrupted directory found */
-		BUG_ON(de_namelen < 0);
+		/* a corrupted entry is found */
+		if (unlikely(de_namelen < 0)) {
+			DBG_BUGON(1);
+			return -EIO;
+		}
 
 #ifdef CONFIG_EROFS_FS_DEBUG
 		dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
index fbf6ff2..7448744 100644
--- a/drivers/staging/erofs/inode.c
+++ b/drivers/staging/erofs/inode.c
@@ -132,7 +132,13 @@
 			return -ENOMEM;
 
 		m_pofs += vi->inode_isize + vi->xattr_isize;
-		BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
+
+		/* inline symlink data shouldn't across page boundary as well */
+		if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
+			DBG_BUGON(1);
+			kfree(lnk);
+			return -EIO;
+		}
 
 		/* get in-page inline data */
 		memcpy(lnk, data + m_pofs, inode->i_size);
@@ -170,7 +176,7 @@
 		return PTR_ERR(page);
 	}
 
-	BUG_ON(!PageUptodate(page));
+	DBG_BUGON(!PageUptodate(page));
 	data = page_address(page);
 
 	err = read_inode(inode, data + ofs);
@@ -178,16 +184,16 @@
 		/* setup the new inode */
 		if (S_ISREG(inode->i_mode)) {
 #ifdef CONFIG_EROFS_FS_XATTR
-			if (vi->xattr_isize)
-				inode->i_op = &erofs_generic_xattr_iops;
+			inode->i_op = &erofs_generic_xattr_iops;
 #endif
 			inode->i_fop = &generic_ro_fops;
 		} else if (S_ISDIR(inode->i_mode)) {
 			inode->i_op =
 #ifdef CONFIG_EROFS_FS_XATTR
-				vi->xattr_isize ? &erofs_dir_xattr_iops :
-#endif
+				&erofs_dir_xattr_iops;
+#else
 				&erofs_dir_iops;
+#endif
 			inode->i_fop = &erofs_dir_fops;
 		} else if (S_ISLNK(inode->i_mode)) {
 			/* by default, page_get_link is used for symlink */
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 367b39f..58d8cbc 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -39,7 +39,7 @@
 #define debugln(x, ...)         ((void)0)
 
 #define dbg_might_sleep()       ((void)0)
-#define DBG_BUGON(...)          ((void)0)
+#define DBG_BUGON(x)            ((void)(x))
 #endif
 
 #ifdef CONFIG_EROFS_FAULT_INJECTION
@@ -184,50 +184,70 @@
 
 #define EROFS_LOCKED_MAGIC     (INT_MIN | 0xE0F510CCL)
 
-static inline bool erofs_workgroup_try_to_freeze(
-	struct erofs_workgroup *grp, int v)
+#if defined(CONFIG_SMP)
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+						 int val)
 {
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-	if (v != atomic_cmpxchg(&grp->refcount,
-		v, EROFS_LOCKED_MAGIC))
-		return false;
 	preempt_disable();
-#else
-	preempt_disable();
-	if (atomic_read(&grp->refcount) != v) {
+	if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
 		preempt_enable();
 		return false;
 	}
-#endif
 	return true;
 }
 
-static inline void erofs_workgroup_unfreeze(
-	struct erofs_workgroup *grp, int v)
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+					    int orig_val)
 {
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-	atomic_set(&grp->refcount, v);
-#endif
+	/*
+	 * other observers should notice all modifications
+	 * in the freezing period.
+	 */
+	smp_mb();
+	atomic_set(&grp->refcount, orig_val);
 	preempt_enable();
 }
 
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+{
+	return atomic_cond_read_relaxed(&grp->refcount,
+					VAL != EROFS_LOCKED_MAGIC);
+}
+#else
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+						 int val)
+{
+	preempt_disable();
+	/* no need to spin on UP platforms, let's just disable preemption. */
+	if (val != atomic_read(&grp->refcount)) {
+		preempt_enable();
+		return false;
+	}
+	return true;
+}
+
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+					    int orig_val)
+{
+	preempt_enable();
+}
+
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+{
+	int v = atomic_read(&grp->refcount);
+
+	/* workgroup is never freezed on uniprocessor systems */
+	DBG_BUGON(v == EROFS_LOCKED_MAGIC);
+	return v;
+}
+#endif
+
 static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
 {
-	const int locked = (int)EROFS_LOCKED_MAGIC;
 	int o;
 
 repeat:
-	o = atomic_read(&grp->refcount);
-
-	/* spin if it is temporarily locked at the reclaim path */
-	if (unlikely(o == locked)) {
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-		do
-			cpu_relax();
-		while (atomic_read(&grp->refcount) == locked);
-#endif
-		goto repeat;
-	}
+	o = erofs_wait_on_workgroup_freezed(grp);
 
 	if (unlikely(o <= 0))
 		return -1;
@@ -240,6 +260,7 @@
 }
 
 #define __erofs_workgroup_get(grp)	atomic_inc(&(grp)->refcount)
+#define __erofs_workgroup_put(grp)	atomic_dec(&(grp)->refcount)
 
 extern int erofs_workgroup_put(struct erofs_workgroup *grp);
 
@@ -307,12 +328,17 @@
 	return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
 }
 
-#define inode_set_inited_xattr(inode)   (EROFS_V(inode)->flags |= 1)
-#define inode_has_inited_xattr(inode)   (EROFS_V(inode)->flags & 1)
+/* atomic flag definitions */
+#define EROFS_V_EA_INITED_BIT	0
+
+/* bitlock definitions (arranged in reverse order) */
+#define EROFS_V_BL_XATTR_BIT	(BITS_PER_LONG - 1)
 
 struct erofs_vnode {
 	erofs_nid_t nid;
-	unsigned int flags;
+
+	/* atomic flags (including bitlocks) */
+	unsigned long flags;
 
 	unsigned char data_mapping_mode;
 	/* inline size in bytes */
@@ -465,8 +491,9 @@
 };
 
 
-static inline struct page *erofs_get_inline_page(struct inode *inode,
-	erofs_blk_t blkaddr)
+static inline struct page *
+erofs_get_inline_page(struct inode *inode,
+		      erofs_blk_t blkaddr)
 {
 	return erofs_get_meta_page(inode->i_sb,
 		blkaddr, S_ISDIR(inode->i_mode));
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
index 546a471..023f64f 100644
--- a/drivers/staging/erofs/namei.c
+++ b/drivers/staging/erofs/namei.c
@@ -15,74 +15,77 @@
 
 #include <trace/events/erofs.h>
 
-/* based on the value of qn->len is accurate */
-static inline int dirnamecmp(struct qstr *qn,
-	struct qstr *qd, unsigned *matched)
+struct erofs_qstr {
+	const unsigned char *name;
+	const unsigned char *end;
+};
+
+/* based on the end of qn is accurate and it must have the trailing '\0' */
+static inline int dirnamecmp(const struct erofs_qstr *qn,
+			     const struct erofs_qstr *qd,
+			     unsigned int *matched)
 {
-	unsigned i = *matched, len = min(qn->len, qd->len);
-loop:
-	if (unlikely(i >= len)) {
-		*matched = i;
-		if (qn->len < qd->len) {
-			/*
-			 * actually (qn->len == qd->len)
-			 * when qd->name[i] == '\0'
-			 */
-			return qd->name[i] == '\0' ? 0 : -1;
+	unsigned int i = *matched;
+
+	/*
+	 * on-disk error, let's only BUG_ON in the debugging mode.
+	 * otherwise, it will return 1 to just skip the invalid name
+	 * and go on (in consideration of the lookup performance).
+	 */
+	DBG_BUGON(qd->name > qd->end);
+
+	/* qd could not have trailing '\0' */
+	/* However it is absolutely safe if < qd->end */
+	while (qd->name + i < qd->end && qd->name[i] != '\0') {
+		if (qn->name[i] != qd->name[i]) {
+			*matched = i;
+			return qn->name[i] > qd->name[i] ? 1 : -1;
 		}
-		return (qn->len > qd->len);
+		++i;
 	}
-
-	if (qn->name[i] != qd->name[i]) {
-		*matched = i;
-		return qn->name[i] > qd->name[i] ? 1 : -1;
-	}
-
-	++i;
-	goto loop;
+	*matched = i;
+	/* See comments in __d_alloc on the terminating NUL character */
+	return qn->name[i] == '\0' ? 0 : 1;
 }
 
-static struct erofs_dirent *find_target_dirent(
-	struct qstr *name,
-	u8 *data, int maxsize)
+#define nameoff_from_disk(off, sz)	(le16_to_cpu(off) & ((sz) - 1))
+
+static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
+					       u8 *data,
+					       unsigned int dirblksize,
+					       const int ndirents)
 {
-	unsigned ndirents, head, back;
-	unsigned startprfx, endprfx;
+	int head, back;
+	unsigned int startprfx, endprfx;
 	struct erofs_dirent *const de = (struct erofs_dirent *)data;
 
-	/* make sure that maxsize is valid */
-	BUG_ON(maxsize < sizeof(struct erofs_dirent));
-
-	ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
-
-	/* corrupted dir (may be unnecessary...) */
-	BUG_ON(!ndirents);
-
-	head = 0;
+	/* since the 1st dirent has been evaluated previously */
+	head = 1;
 	back = ndirents - 1;
 	startprfx = endprfx = 0;
 
 	while (head <= back) {
-		unsigned mid = head + (back - head) / 2;
-		unsigned nameoff = le16_to_cpu(de[mid].nameoff);
-		unsigned matched = min(startprfx, endprfx);
-
-		struct qstr dname = QSTR_INIT(data + nameoff,
-			unlikely(mid >= ndirents - 1) ?
-				maxsize - nameoff :
-				le16_to_cpu(de[mid + 1].nameoff) - nameoff);
+		const int mid = head + (back - head) / 2;
+		const int nameoff = nameoff_from_disk(de[mid].nameoff,
+						      dirblksize);
+		unsigned int matched = min(startprfx, endprfx);
+		struct erofs_qstr dname = {
+			.name = data + nameoff,
+			.end = unlikely(mid >= ndirents - 1) ?
+				data + dirblksize :
+				data + nameoff_from_disk(de[mid + 1].nameoff,
+							 dirblksize)
+		};
 
 		/* string comparison without already matched prefix */
 		int ret = dirnamecmp(name, &dname, &matched);
 
-		if (unlikely(!ret))
+		if (unlikely(!ret)) {
 			return de + mid;
-		else if (ret > 0) {
+		} else if (ret > 0) {
 			head = mid + 1;
 			startprfx = matched;
-		} else if (unlikely(mid < 1))	/* fix "mid" overflow */
-			break;
-		else {
+		} else {
 			back = mid - 1;
 			endprfx = matched;
 		}
@@ -91,12 +94,12 @@
 	return ERR_PTR(-ENOENT);
 }
 
-static struct page *find_target_block_classic(
-	struct inode *dir,
-	struct qstr *name, int *_diff)
+static struct page *find_target_block_classic(struct inode *dir,
+					      struct erofs_qstr *name,
+					      int *_ndirents)
 {
-	unsigned startprfx, endprfx;
-	unsigned head, back;
+	unsigned int startprfx, endprfx;
+	int head, back;
 	struct address_space *const mapping = dir->i_mapping;
 	struct page *candidate = ERR_PTR(-ENOENT);
 
@@ -105,41 +108,43 @@
 	back = inode_datablocks(dir) - 1;
 
 	while (head <= back) {
-		unsigned mid = head + (back - head) / 2;
+		const int mid = head + (back - head) / 2;
 		struct page *page = read_mapping_page(mapping, mid, NULL);
 
-		if (IS_ERR(page)) {
-exact_out:
-			if (!IS_ERR(candidate)) /* valid candidate */
-				put_page(candidate);
-			return page;
-		} else {
-			int diff;
-			unsigned ndirents, matched;
-			struct qstr dname;
+		if (!IS_ERR(page)) {
 			struct erofs_dirent *de = kmap_atomic(page);
-			unsigned nameoff = le16_to_cpu(de->nameoff);
+			const int nameoff = nameoff_from_disk(de->nameoff,
+							      EROFS_BLKSIZ);
+			const int ndirents = nameoff / sizeof(*de);
+			int diff;
+			unsigned int matched;
+			struct erofs_qstr dname;
 
-			ndirents = nameoff / sizeof(*de);
-
-			/* corrupted dir (should have one entry at least) */
-			BUG_ON(!ndirents || nameoff > PAGE_SIZE);
+			if (unlikely(!ndirents)) {
+				DBG_BUGON(1);
+				kunmap_atomic(de);
+				put_page(page);
+				page = ERR_PTR(-EIO);
+				goto out;
+			}
 
 			matched = min(startprfx, endprfx);
 
 			dname.name = (u8 *)de + nameoff;
-			dname.len = ndirents == 1 ?
-				/* since the rest of the last page is 0 */
-				EROFS_BLKSIZ - nameoff
-				: le16_to_cpu(de[1].nameoff) - nameoff;
+			if (ndirents == 1)
+				dname.end = (u8 *)de + EROFS_BLKSIZ;
+			else
+				dname.end = (u8 *)de +
+					nameoff_from_disk(de[1].nameoff,
+							  EROFS_BLKSIZ);
 
 			/* string comparison without already matched prefix */
 			diff = dirnamecmp(name, &dname, &matched);
 			kunmap_atomic(de);
 
 			if (unlikely(!diff)) {
-				*_diff = 0;
-				goto exact_out;
+				*_ndirents = 0;
+				goto out;
 			} else if (diff > 0) {
 				head = mid + 1;
 				startprfx = matched;
@@ -147,45 +152,51 @@
 				if (likely(!IS_ERR(candidate)))
 					put_page(candidate);
 				candidate = page;
+				*_ndirents = ndirents;
 			} else {
 				put_page(page);
 
-				if (unlikely(mid < 1))	/* fix "mid" overflow */
-					break;
-
 				back = mid - 1;
 				endprfx = matched;
 			}
+			continue;
 		}
+out:		/* free if the candidate is valid */
+		if (!IS_ERR(candidate))
+			put_page(candidate);
+		return page;
 	}
-	*_diff = 1;
 	return candidate;
 }
 
 int erofs_namei(struct inode *dir,
-	struct qstr *name,
-	erofs_nid_t *nid, unsigned *d_type)
+		struct qstr *name,
+		erofs_nid_t *nid, unsigned int *d_type)
 {
-	int diff;
+	int ndirents;
 	struct page *page;
-	u8 *data;
+	void *data;
 	struct erofs_dirent *de;
+	struct erofs_qstr qn;
 
 	if (unlikely(!dir->i_size))
 		return -ENOENT;
 
-	diff = 1;
-	page = find_target_block_classic(dir, name, &diff);
+	qn.name = name->name;
+	qn.end = name->name + name->len;
+
+	ndirents = 0;
+	page = find_target_block_classic(dir, &qn, &ndirents);
 
 	if (unlikely(IS_ERR(page)))
 		return PTR_ERR(page);
 
 	data = kmap_atomic(page);
 	/* the target page has been mapped */
-	de = likely(diff) ?
-		/* since the rest of the last page is 0 */
-		find_target_dirent(name, data, EROFS_BLKSIZ) :
-		(struct erofs_dirent *)data;
+	if (ndirents)
+		de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
+	else
+		de = (struct erofs_dirent *)data;
 
 	if (likely(!IS_ERR(de))) {
 		*nid = le64_to_cpu(de->nid);
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 2df9768..b0583cd 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -40,7 +40,6 @@
 
 static void erofs_exit_inode_cache(void)
 {
-	BUG_ON(erofs_inode_cachep == NULL);
 	kmem_cache_destroy(erofs_inode_cachep);
 }
 
@@ -265,8 +264,8 @@
 	int ret = 1;	/* 0 - busy */
 	struct address_space *const mapping = page->mapping;
 
-	BUG_ON(!PageLocked(page));
-	BUG_ON(mapping->a_ops != &managed_cache_aops);
+	DBG_BUGON(!PageLocked(page));
+	DBG_BUGON(mapping->a_ops != &managed_cache_aops);
 
 	if (PagePrivate(page))
 		ret = erofs_try_to_free_cached_page(mapping, page);
@@ -279,10 +278,10 @@
 {
 	const unsigned int stop = length + offset;
 
-	BUG_ON(!PageLocked(page));
+	DBG_BUGON(!PageLocked(page));
 
-	/* Check for overflow */
-	BUG_ON(stop > PAGE_SIZE || stop < length);
+	/* Check for potential overflow in debug mode */
+	DBG_BUGON(stop > PAGE_SIZE || stop < length);
 
 	if (offset == 0 && stop == PAGE_SIZE)
 		while (!managed_cache_releasepage(page, GFP_NOFS))
@@ -404,12 +403,6 @@
 
 	erofs_register_super(sb);
 
-	/*
-	 * We already have a positive dentry, which was instantiated
-	 * by d_make_root. Just need to d_rehash it.
-	 */
-	d_rehash(sb->s_root);
-
 	if (!silent)
 		infoln("mounted on %s with opts: %s.", dev_name,
 			(char *)data);
@@ -625,7 +618,7 @@
 
 static int erofs_remount(struct super_block *sb, int *flags, char *data)
 {
-	BUG_ON(!sb_rdonly(sb));
+	DBG_BUGON(!sb_rdonly(sb));
 
 	*flags |= SB_RDONLY;
 	return 0;
diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
index 0956615..23856ba 100644
--- a/drivers/staging/erofs/unzip_pagevec.h
+++ b/drivers/staging/erofs/unzip_pagevec.h
@@ -150,7 +150,7 @@
 	erofs_vtptr_t t;
 
 	if (unlikely(ctor->index >= ctor->nr)) {
-		BUG_ON(ctor->next == NULL);
+		DBG_BUGON(!ctor->next);
 		z_erofs_pagevec_ctor_pagedown(ctor, true);
 	}
 
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 14da8cc..f44662d 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -18,9 +18,6 @@
 
 void z_erofs_exit_zip_subsystem(void)
 {
-	BUG_ON(z_erofs_workqueue == NULL);
-	BUG_ON(z_erofs_workgroup_cachep == NULL);
-
 	destroy_workqueue(z_erofs_workqueue);
 	kmem_cache_destroy(z_erofs_workgroup_cachep);
 }
@@ -60,15 +57,30 @@
 	Z_EROFS_VLE_WORK_SECONDARY,
 	Z_EROFS_VLE_WORK_PRIMARY,
 	/*
-	 * The current work has at least been linked with the following
-	 * processed chained works, which means if the processing page
-	 * is the tail partial page of the work, the current work can
-	 * safely use the whole page, as illustrated below:
-	 * +--------------+-------------------------------------------+
-	 * |  tail page   |      head page (of the previous work)     |
-	 * +--------------+-------------------------------------------+
-	 *   /\  which belongs to the current work
-	 * [  (*) this page can be used for the current work itself.  ]
+	 * The current work was the tail of an exist chain, and the previous
+	 * processed chained works are all decided to be hooked up to it.
+	 * A new chain should be created for the remaining unprocessed works,
+	 * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
+	 * the next work cannot reuse the whole page in the following scenario:
+	 *  ________________________________________________________________
+	 * |      tail (partial) page     |       head (partial) page       |
+	 * |  (belongs to the next work)  |  (belongs to the current work)  |
+	 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
+	 */
+	Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
+	/*
+	 * The current work has been linked with the processed chained works,
+	 * and could be also linked with the potential remaining works, which
+	 * means if the processing page is the tail partial page of the work,
+	 * the current work can safely use the whole page (since the next work
+	 * is under control) for in-place decompression, as illustrated below:
+	 *  ________________________________________________________________
+	 * |  tail (partial) page  |          head (partial) page           |
+	 * | (of the current work) |         (of the previous work)         |
+	 * |  PRIMARY_FOLLOWED or  |                                        |
+	 * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
+	 *
+	 * [  (*) the above page can be used for the current work itself.  ]
 	 */
 	Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
 	Z_EROFS_VLE_WORK_MAX
@@ -237,10 +249,10 @@
 	return ret ? 0 : -EAGAIN;
 }
 
-static inline bool try_to_claim_workgroup(
-	struct z_erofs_vle_workgroup *grp,
-	z_erofs_vle_owned_workgrp_t *owned_head,
-	bool *hosted)
+static enum z_erofs_vle_work_role
+try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
+		       z_erofs_vle_owned_workgrp_t *owned_head,
+		       bool *hosted)
 {
 	DBG_BUGON(*hosted == true);
 
@@ -254,6 +266,9 @@
 
 		*owned_head = grp;
 		*hosted = true;
+		/* lucky, I am the followee :) */
+		return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+
 	} else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
 		/*
 		 * type 2, link to the end of a existing open chain,
@@ -263,12 +278,11 @@
 		if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
 			Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
 			goto retry;
-
 		*owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
-	} else
-		return false;	/* :( better luck next time */
+		return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
+	}
 
-	return true;	/* lucky, I am the followee :) */
+	return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
 }
 
 static struct z_erofs_vle_work *
@@ -293,12 +307,9 @@
 	*grp_ret = grp = container_of(egrp,
 		struct z_erofs_vle_workgroup, obj);
 
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
 	work = z_erofs_vle_grab_work(grp, pageofs);
+	/* if multiref is disabled, `primary' is always true */
 	primary = true;
-#else
-	BUG();
-#endif
 
 	DBG_BUGON(work->pageofs != pageofs);
 
@@ -343,12 +354,8 @@
 	*hosted = false;
 	if (!primary)
 		*role = Z_EROFS_VLE_WORK_SECONDARY;
-	/* claim the workgroup if possible */
-	else if (try_to_claim_workgroup(grp, owned_head, hosted))
-		*role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
-	else
-		*role = Z_EROFS_VLE_WORK_PRIMARY;
-
+	else	/* claim the workgroup if possible */
+		*role = try_to_claim_workgroup(grp, owned_head, hosted);
 	return work;
 }
 
@@ -365,12 +372,12 @@
 	struct z_erofs_vle_workgroup *grp = *grp_ret;
 	struct z_erofs_vle_work *work;
 
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
-	BUG_ON(grp != NULL);
-#else
-	if (grp != NULL)
-		goto skip;
-#endif
+	/* if multiref is disabled, grp should never be nullptr */
+	if (unlikely(grp)) {
+		DBG_BUGON(1);
+		return ERR_PTR(-EINVAL);
+	}
+
 	/* no available workgroup, let's allocate one */
 	grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
 	if (unlikely(grp == NULL))
@@ -393,13 +400,7 @@
 	*hosted = true;
 
 	newgrp = true;
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
-skip:
-	/* currently unimplemented */
-	BUG();
-#else
 	work = z_erofs_vle_grab_primary_work(grp);
-#endif
 	work->pageofs = pageofs;
 
 	mutex_init(&work->lock);
@@ -431,6 +432,9 @@
 	}
 }
 
+#define builder_is_hooked(builder) \
+	((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
+
 #define builder_is_followed(builder) \
 	((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
 
@@ -595,7 +599,7 @@
 	struct z_erofs_vle_work_builder *const builder = &fe->builder;
 	const loff_t offset = page_offset(page);
 
-	bool tight = builder_is_followed(builder);
+	bool tight = builder_is_hooked(builder);
 	struct z_erofs_vle_work *work = builder->work;
 
 #ifdef EROFS_FS_HAS_MANAGED_CACHE
@@ -606,7 +610,7 @@
 
 	enum z_erofs_page_type page_type;
 	unsigned cur, end, spiltted, index;
-	int err;
+	int err = 0;
 
 	/* register locked file pages as online pages in pack */
 	z_erofs_onlinepage_init(page);
@@ -618,13 +622,17 @@
 
 	/* lucky, within the range of the current map_blocks */
 	if (offset + cur >= map->m_la &&
-		offset + cur < map->m_la + map->m_llen)
+		offset + cur < map->m_la + map->m_llen) {
+		/* didn't get a valid unzip work previously (very rare) */
+		if (!builder->work)
+			goto restart_now;
 		goto hitted;
+	}
 
 	/* go ahead the next map_blocks */
 	debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
 
-	if (!z_erofs_vle_work_iter_end(builder))
+	if (z_erofs_vle_work_iter_end(builder))
 		fe->initial = false;
 
 	map->m_la = offset + cur;
@@ -633,12 +641,12 @@
 	if (unlikely(err))
 		goto err_out;
 
-	/* deal with hole (FIXME! broken now) */
+restart_now:
 	if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
 		goto hitted;
 
 	DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
-	BUG_ON(erofs_blkoff(map->m_pa));
+	DBG_BUGON(erofs_blkoff(map->m_pa));
 
 	err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
 	if (unlikely(err))
@@ -659,7 +667,7 @@
 		builder->role = Z_EROFS_VLE_WORK_PRIMARY;
 #endif
 
-	tight &= builder_is_followed(builder);
+	tight &= builder_is_hooked(builder);
 	work = builder->work;
 hitted:
 	cur = end - min_t(unsigned, offset + end - map->m_la, end);
@@ -674,6 +682,9 @@
 			(tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
 				Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
 
+	if (cur)
+		tight &= builder_is_followed(builder);
+
 retry:
 	err = z_erofs_vle_work_add_page(builder, page, page_type);
 	/* should allocate an additional staging page for pagevec */
@@ -683,7 +694,7 @@
 
 		err = z_erofs_vle_work_add_page(builder,
 			newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
-		if (!err)
+		if (likely(!err))
 			goto retry;
 	}
 
@@ -694,9 +705,10 @@
 
 	/* FIXME! avoid the last relundant fixup & endio */
 	z_erofs_onlinepage_fixup(page, index, true);
-	++spiltted;
 
-	/* also update nr_pages and increase queued_pages */
+	/* bump up the number of spiltted parts of a page */
+	++spiltted;
+	/* also update nr_pages */
 	work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
 next_part:
 	/* can be used for verification */
@@ -706,16 +718,18 @@
 	if (end > 0)
 		goto repeat;
 
+out:
 	/* FIXME! avoid the last relundant fixup & endio */
 	z_erofs_onlinepage_endio(page);
 
 	debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
 		__func__, page, spiltted, map->m_llen);
-	return 0;
-
-err_out:
-	/* TODO: the missing error handing cases */
 	return err;
+
+	/* if some error occurred while processing this page */
+err_out:
+	SetPageError(page);
+	goto out;
 }
 
 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
@@ -724,13 +738,18 @@
 	struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
 	bool background = tagptr_unfold_tags(t);
 
-	if (atomic_add_return(bios, &io->pending_bios))
-		return;
+	if (!background) {
+		unsigned long flags;
 
-	if (background)
+		spin_lock_irqsave(&io->u.wait.lock, flags);
+		if (!atomic_add_return(bios, &io->pending_bios))
+			wake_up_locked(&io->u.wait);
+		spin_unlock_irqrestore(&io->u.wait.lock, flags);
+		return;
+	}
+
+	if (!atomic_add_return(bios, &io->pending_bios))
 		queue_work(z_erofs_workqueue, &io->u.work);
-	else
-		wake_up(&io->u.wait);
 }
 
 static inline void z_erofs_vle_read_endio(struct bio *bio)
@@ -747,7 +766,7 @@
 		bool cachemngd = false;
 
 		DBG_BUGON(PageUptodate(page));
-		BUG_ON(page->mapping == NULL);
+		DBG_BUGON(!page->mapping);
 
 #ifdef EROFS_FS_HAS_MANAGED_CACHE
 		if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
@@ -791,10 +810,8 @@
 	const unsigned clusterpages = erofs_clusterpages(sbi);
 
 	struct z_erofs_pagevec_ctor ctor;
-	unsigned nr_pages;
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
-	unsigned sparsemem_pages = 0;
-#endif
+	unsigned int nr_pages;
+	unsigned int sparsemem_pages = 0;
 	struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
 	struct page **pages, **compressed_pages, *page;
 	unsigned i, llen;
@@ -806,12 +823,8 @@
 	int err;
 
 	might_sleep();
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
 	work = z_erofs_vle_grab_primary_work(grp);
-#else
-	BUG();
-#endif
-	BUG_ON(!READ_ONCE(work->nr_pages));
+	DBG_BUGON(!READ_ONCE(work->nr_pages));
 
 	mutex_lock(&work->lock);
 	nr_pages = work->nr_pages;
@@ -860,14 +873,12 @@
 		else
 			pagenr = z_erofs_onlinepage_index(page);
 
-		BUG_ON(pagenr >= nr_pages);
+		DBG_BUGON(pagenr >= nr_pages);
+		DBG_BUGON(pages[pagenr]);
 
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
-		BUG_ON(pages[pagenr] != NULL);
-		++sparsemem_pages;
-#endif
 		pages[pagenr] = page;
 	}
+	sparsemem_pages = i;
 
 	z_erofs_pagevec_ctor_exit(&ctor, true);
 
@@ -886,9 +897,8 @@
 		if (z_erofs_is_stagingpage(page))
 			continue;
 #ifdef EROFS_FS_HAS_MANAGED_CACHE
-		else if (page->mapping == mngda) {
-			BUG_ON(PageLocked(page));
-			BUG_ON(!PageUptodate(page));
+		if (page->mapping == mngda) {
+			DBG_BUGON(!PageUptodate(page));
 			continue;
 		}
 #endif
@@ -896,11 +906,9 @@
 		/* only non-head page could be reused as a compressed page */
 		pagenr = z_erofs_onlinepage_index(page);
 
-		BUG_ON(pagenr >= nr_pages);
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
-		BUG_ON(pages[pagenr] != NULL);
+		DBG_BUGON(pagenr >= nr_pages);
+		DBG_BUGON(pages[pagenr]);
 		++sparsemem_pages;
-#endif
 		pages[pagenr] = page;
 
 		overlapped = true;
@@ -909,9 +917,6 @@
 	llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
 
 	if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
-		/* FIXME! this should be fixed in the future */
-		BUG_ON(grp->llen != llen);
-
 		err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
 			pages, nr_pages, work->pageofs);
 		goto out;
@@ -920,18 +925,13 @@
 	if (llen > grp->llen)
 		llen = grp->llen;
 
-	err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
-		clusterpages, pages, llen, work->pageofs,
-		z_erofs_onlinepage_endio);
+	err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
+					    pages, llen, work->pageofs);
 	if (err != -ENOTSUPP)
-		goto out_percpu;
+		goto out;
 
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
-	if (sparsemem_pages >= nr_pages) {
-		BUG_ON(sparsemem_pages > nr_pages);
+	if (sparsemem_pages >= nr_pages)
 		goto skip_allocpage;
-	}
-#endif
 
 	for (i = 0; i < nr_pages; ++i) {
 		if (pages[i] != NULL)
@@ -940,9 +940,7 @@
 		pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
 	}
 
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
 skip_allocpage:
-#endif
 	vout = erofs_vmap(pages, nr_pages);
 
 	err = z_erofs_vle_unzip_vmap(compressed_pages,
@@ -951,21 +949,7 @@
 	erofs_vunmap(vout, nr_pages);
 
 out:
-	for (i = 0; i < nr_pages; ++i) {
-		page = pages[i];
-		DBG_BUGON(page->mapping == NULL);
-
-		/* recycle all individual staging pages */
-		if (z_erofs_gather_if_stagingpage(page_pool, page))
-			continue;
-
-		if (unlikely(err < 0))
-			SetPageError(page);
-
-		z_erofs_onlinepage_endio(page);
-	}
-
-out_percpu:
+	/* must handle all compressed pages before endding pages */
 	for (i = 0; i < clusterpages; ++i) {
 		page = compressed_pages[i];
 
@@ -979,6 +963,23 @@
 		WRITE_ONCE(compressed_pages[i], NULL);
 	}
 
+	for (i = 0; i < nr_pages; ++i) {
+		page = pages[i];
+		if (!page)
+			continue;
+
+		DBG_BUGON(page->mapping == NULL);
+
+		/* recycle all individual staging pages */
+		if (z_erofs_gather_if_stagingpage(page_pool, page))
+			continue;
+
+		if (unlikely(err < 0))
+			SetPageError(page);
+
+		z_erofs_onlinepage_endio(page);
+	}
+
 	if (pages == z_pagemap_global)
 		mutex_unlock(&z_pagemap_global_lock);
 	else if (unlikely(pages != pages_onstack))
@@ -1026,7 +1027,7 @@
 		struct z_erofs_vle_unzip_io_sb, io.u.work);
 	LIST_HEAD(page_pool);
 
-	BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+	DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
 	z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
 
 	put_pages_list(&page_pool);
@@ -1355,7 +1356,6 @@
 			continue;
 		}
 
-		BUG_ON(PagePrivate(page));
 		set_page_private(page, (unsigned long)head);
 		head = page;
 	}
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
index 3939985..684ff06 100644
--- a/drivers/staging/erofs/unzip_vle.h
+++ b/drivers/staging/erofs/unzip_vle.h
@@ -47,13 +47,6 @@
 #define Z_EROFS_VLE_INLINE_PAGEVECS     3
 
 struct z_erofs_vle_work {
-	/* struct z_erofs_vle_work *left, *right; */
-
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
-	struct list_head list;
-
-	atomic_t refcount;
-#endif
 	struct mutex lock;
 
 	/* I: decompression offset in page */
@@ -107,10 +100,8 @@
 	grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
 }
 
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
-#error multiref decompression is unimplemented yet
-#else
 
+/* definitions if multiref is disabled */
 #define z_erofs_vle_grab_primary_work(grp)	(&(grp)->work)
 #define z_erofs_vle_grab_work(grp, pageofs)	(&(grp)->work)
 #define z_erofs_vle_work_workgroup(wrk, primary)	\
@@ -118,7 +109,6 @@
 		struct z_erofs_vle_workgroup, work) : \
 		({ BUG(); (void *)NULL; }))
 
-#endif
 
 #define Z_EROFS_WORKGROUP_SIZE       sizeof(struct z_erofs_vle_workgroup)
 
@@ -228,8 +218,7 @@
 
 extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
 	unsigned clusterpages, struct page **pages,
-	unsigned outlen, unsigned short pageofs,
-	void (*endio)(struct page *));
+	unsigned int outlen, unsigned short pageofs);
 
 extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
 	unsigned clusterpages, void *vaddr, unsigned llen,
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index f5b665f..055420e 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -57,7 +57,7 @@
 			if (compressed_pages[j] != page)
 				continue;
 
-			BUG_ON(mirrored[j]);
+			DBG_BUGON(mirrored[j]);
 			memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
 			mirrored[j] = true;
 			break;
@@ -105,8 +105,7 @@
 				  unsigned clusterpages,
 				  struct page **pages,
 				  unsigned outlen,
-				  unsigned short pageofs,
-				  void (*endio)(struct page *))
+				  unsigned short pageofs)
 {
 	void *vin, *vout;
 	unsigned nr_pages, i, j;
@@ -128,31 +127,30 @@
 	ret = z_erofs_unzip_lz4(vin, vout + pageofs,
 		clusterpages * PAGE_SIZE, outlen);
 
-	if (ret >= 0) {
-		outlen = ret;
-		ret = 0;
-	}
+	if (ret < 0)
+		goto out;
+	ret = 0;
 
 	for (i = 0; i < nr_pages; ++i) {
 		j = min((unsigned)PAGE_SIZE - pageofs, outlen);
 
 		if (pages[i] != NULL) {
-			if (ret < 0)
-				SetPageError(pages[i]);
-			else if (clusterpages == 1 && pages[i] == compressed_pages[0])
+			if (clusterpages == 1 &&
+			    pages[i] == compressed_pages[0]) {
 				memcpy(vin + pageofs, vout + pageofs, j);
-			else {
+			} else {
 				void *dst = kmap_atomic(pages[i]);
 
 				memcpy(dst + pageofs, vout + pageofs, j);
 				kunmap_atomic(dst);
 			}
-			endio(pages[i]);
 		}
 		vout += PAGE_SIZE;
 		outlen -= j;
 		pageofs = 0;
 	}
+
+out:
 	preempt_enable();
 
 	if (clusterpages == 1)
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index 595cf90..2d96820 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -23,9 +23,6 @@
 		list_del(&page->lru);
 	} else {
 		page = alloc_pages(gfp | __GFP_NOFAIL, 0);
-
-		BUG_ON(page == NULL);
-		BUG_ON(page->mapping != NULL);
 	}
 	return page;
 }
@@ -60,7 +57,7 @@
 		/* decrease refcount added by erofs_workgroup_put */
 		if (unlikely(oldcount == 1))
 			atomic_long_dec(&erofs_global_shrink_cnt);
-		BUG_ON(index != grp->index);
+		DBG_BUGON(index != grp->index);
 	}
 	rcu_read_unlock();
 	return grp;
@@ -73,8 +70,11 @@
 	struct erofs_sb_info *sbi;
 	int err;
 
-	/* grp->refcount should not < 1 */
-	BUG_ON(!atomic_read(&grp->refcount));
+	/* grp shouldn't be broken or used before */
+	if (unlikely(atomic_read(&grp->refcount) != 1)) {
+		DBG_BUGON(1);
+		return -EINVAL;
+	}
 
 	err = radix_tree_preload(GFP_NOFS);
 	if (err)
@@ -87,12 +87,21 @@
 		grp = (void *)((unsigned long)grp |
 			1UL << RADIX_TREE_EXCEPTIONAL_SHIFT);
 
-	err = radix_tree_insert(&sbi->workstn_tree,
-		grp->index, grp);
+	/*
+	 * Bump up reference count before making this workgroup
+	 * visible to other users in order to avoid potential UAF
+	 * without serialized by erofs_workstn_lock.
+	 */
+	__erofs_workgroup_get(grp);
 
-	if (!err) {
-		__erofs_workgroup_get(grp);
-	}
+	err = radix_tree_insert(&sbi->workstn_tree,
+				grp->index, grp);
+	if (unlikely(err))
+		/*
+		 * it's safe to decrease since the workgroup isn't visible
+		 * and refcount >= 2 (cannot be freezed).
+		 */
+		__erofs_workgroup_put(grp);
 
 	erofs_workstn_unlock(sbi);
 	radix_tree_preload_end();
@@ -101,19 +110,99 @@
 
 extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
 
+static void  __erofs_workgroup_free(struct erofs_workgroup *grp)
+{
+	atomic_long_dec(&erofs_global_shrink_cnt);
+	erofs_workgroup_free_rcu(grp);
+}
+
 int erofs_workgroup_put(struct erofs_workgroup *grp)
 {
 	int count = atomic_dec_return(&grp->refcount);
 
 	if (count == 1)
 		atomic_long_inc(&erofs_global_shrink_cnt);
-	else if (!count) {
-		atomic_long_dec(&erofs_global_shrink_cnt);
-		erofs_workgroup_free_rcu(grp);
-	}
+	else if (!count)
+		__erofs_workgroup_free(grp);
 	return count;
 }
 
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+/* for cache-managed case, customized reclaim paths exist */
+static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
+{
+	erofs_workgroup_unfreeze(grp, 0);
+	__erofs_workgroup_free(grp);
+}
+
+bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+				    struct erofs_workgroup *grp,
+				    bool cleanup)
+{
+	void *entry;
+
+	/*
+	 * for managed cache enabled, the refcount of workgroups
+	 * themselves could be < 0 (freezed). So there is no guarantee
+	 * that all refcount > 0 if managed cache is enabled.
+	 */
+	if (!erofs_workgroup_try_to_freeze(grp, 1))
+		return false;
+
+	/*
+	 * note that all cached pages should be unlinked
+	 * before delete it from the radix tree.
+	 * Otherwise some cached pages of an orphan old workgroup
+	 * could be still linked after the new one is available.
+	 */
+	if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
+		erofs_workgroup_unfreeze(grp, 1);
+		return false;
+	}
+
+	/*
+	 * it is impossible to fail after the workgroup is freezed,
+	 * however in order to avoid some race conditions, add a
+	 * DBG_BUGON to observe this in advance.
+	 */
+	entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
+	DBG_BUGON((void *)((unsigned long)entry &
+			   ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
+
+	/*
+	 * if managed cache is enable, the last refcount
+	 * should indicate the related workstation.
+	 */
+	erofs_workgroup_unfreeze_final(grp);
+	return true;
+}
+
+#else
+/* for nocache case, no customized reclaim path at all */
+bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+				    struct erofs_workgroup *grp,
+				    bool cleanup)
+{
+	int cnt = atomic_read(&grp->refcount);
+	void *entry;
+
+	DBG_BUGON(cnt <= 0);
+	DBG_BUGON(cleanup && cnt != 1);
+
+	if (cnt > 1)
+		return false;
+
+	entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
+	DBG_BUGON((void *)((unsigned long)entry &
+			   ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
+
+	/* (rarely) could be grabbed again when freeing */
+	erofs_workgroup_put(grp);
+	return true;
+}
+
+#endif
+
 unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
 				       unsigned long nr_shrink,
 				       bool cleanup)
@@ -130,44 +219,16 @@
 		batch, first_index, PAGEVEC_SIZE);
 
 	for (i = 0; i < found; ++i) {
-		int cnt;
 		struct erofs_workgroup *grp = (void *)
 			((unsigned long)batch[i] &
 				~RADIX_TREE_EXCEPTIONAL_ENTRY);
 
 		first_index = grp->index + 1;
 
-		cnt = atomic_read(&grp->refcount);
-		BUG_ON(cnt <= 0);
-
-		if (cleanup)
-			BUG_ON(cnt != 1);
-
-#ifndef EROFS_FS_HAS_MANAGED_CACHE
-		else if (cnt > 1)
-#else
-		if (!erofs_workgroup_try_to_freeze(grp, 1))
-#endif
+		/* try to shrink each valid workgroup */
+		if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
 			continue;
 
-		if (radix_tree_delete(&sbi->workstn_tree,
-			grp->index) != grp) {
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-skip:
-			erofs_workgroup_unfreeze(grp, 1);
-#endif
-			continue;
-		}
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-		if (erofs_try_to_free_all_cached_pages(sbi, grp))
-			goto skip;
-
-		erofs_workgroup_unfreeze(grp, 1);
-#endif
-		/* (rarely) grabbed again when freeing */
-		erofs_workgroup_put(grp);
-
 		++freed;
 		if (unlikely(!--nr_shrink))
 			break;
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 0e9cfec..2db99cf 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -24,36 +24,77 @@
 
 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
 {
-	/* only init_inode_xattrs use non-atomic once */
+	/* the only user of kunmap() is 'init_inode_xattrs' */
 	if (unlikely(!atomic))
 		kunmap(it->page);
 	else
 		kunmap_atomic(it->kaddr);
+
 	unlock_page(it->page);
 	put_page(it->page);
 }
 
-static void init_inode_xattrs(struct inode *inode)
+static inline void xattr_iter_end_final(struct xattr_iter *it)
 {
+	if (!it->page)
+		return;
+
+	xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
+{
+	struct erofs_vnode *const vi = EROFS_V(inode);
 	struct xattr_iter it;
 	unsigned i;
 	struct erofs_xattr_ibody_header *ih;
 	struct erofs_sb_info *sbi;
-	struct erofs_vnode *vi;
 	bool atomic_map;
+	int ret = 0;
 
-	if (likely(inode_has_inited_xattr(inode)))
-		return;
+	/* the most case is that xattrs of this inode are initialized. */
+	if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
+		return 0;
 
-	vi = EROFS_V(inode);
-	BUG_ON(!vi->xattr_isize);
+	if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
+		return -ERESTARTSYS;
+
+	/* someone has initialized xattrs for us? */
+	if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
+		goto out_unlock;
+
+	/*
+	 * bypass all xattr operations if ->xattr_isize is not greater than
+	 * sizeof(struct erofs_xattr_ibody_header), in detail:
+	 * 1) it is not enough to contain erofs_xattr_ibody_header then
+	 *    ->xattr_isize should be 0 (it means no xattr);
+	 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
+	 *    undefined right now (maybe use later with some new sb feature).
+	 */
+	if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
+		errln("xattr_isize %d of nid %llu is not supported yet",
+		      vi->xattr_isize, vi->nid);
+		ret = -ENOTSUPP;
+		goto out_unlock;
+	} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
+		if (unlikely(vi->xattr_isize)) {
+			DBG_BUGON(1);
+			ret = -EIO;
+			goto out_unlock;	/* xattr ondisk layout error */
+		}
+		ret = -ENOATTR;
+		goto out_unlock;
+	}
 
 	sbi = EROFS_I_SB(inode);
 	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
 	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
 
 	it.page = erofs_get_inline_page(inode, it.blkaddr);
-	BUG_ON(IS_ERR(it.page));
+	if (IS_ERR(it.page)) {
+		ret = PTR_ERR(it.page);
+		goto out_unlock;
+	}
 
 	/* read in shared xattr array (non-atomic, see kmalloc below) */
 	it.kaddr = kmap(it.page);
@@ -62,9 +103,13 @@
 	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
 
 	vi->xattr_shared_count = ih->h_shared_count;
-	vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
-		vi->xattr_shared_count, sizeof(unsigned),
-		GFP_KERNEL | __GFP_NOFAIL);
+	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+						sizeof(uint), GFP_KERNEL);
+	if (!vi->xattr_shared_xattrs) {
+		xattr_iter_end(&it, atomic_map);
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
 
 	/* let's skip ibody header */
 	it.ofs += sizeof(struct erofs_xattr_ibody_header);
@@ -77,7 +122,12 @@
 
 			it.page = erofs_get_meta_page(inode->i_sb,
 				++it.blkaddr, S_ISDIR(inode->i_mode));
-			BUG_ON(IS_ERR(it.page));
+			if (IS_ERR(it.page)) {
+				kfree(vi->xattr_shared_xattrs);
+				vi->xattr_shared_xattrs = NULL;
+				ret = PTR_ERR(it.page);
+				goto out_unlock;
+			}
 
 			it.kaddr = kmap_atomic(it.page);
 			atomic_map = true;
@@ -89,7 +139,11 @@
 	}
 	xattr_iter_end(&it, atomic_map);
 
-	inode_set_inited_xattr(inode);
+	set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
+
+out_unlock:
+	clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
+	return ret;
 }
 
 struct xattr_iter_handlers {
@@ -99,18 +153,25 @@
 	void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
 };
 
-static void xattr_iter_fixup(struct xattr_iter *it)
+static inline int xattr_iter_fixup(struct xattr_iter *it)
 {
-	if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
-		xattr_iter_end(it, true);
+	if (it->ofs < EROFS_BLKSIZ)
+		return 0;
 
-		it->blkaddr += erofs_blknr(it->ofs);
-		it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
-		BUG_ON(IS_ERR(it->page));
+	xattr_iter_end(it, true);
 
-		it->kaddr = kmap_atomic(it->page);
-		it->ofs = erofs_blkoff(it->ofs);
+	it->blkaddr += erofs_blknr(it->ofs);
+	it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
+	if (IS_ERR(it->page)) {
+		int err = PTR_ERR(it->page);
+
+		it->page = NULL;
+		return err;
 	}
+
+	it->kaddr = kmap_atomic(it->page);
+	it->ofs = erofs_blkoff(it->ofs);
+	return 0;
 }
 
 static int inline_xattr_iter_begin(struct xattr_iter *it,
@@ -132,21 +193,24 @@
 	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
 
 	it->page = erofs_get_inline_page(inode, it->blkaddr);
-	BUG_ON(IS_ERR(it->page));
-	it->kaddr = kmap_atomic(it->page);
+	if (IS_ERR(it->page))
+		return PTR_ERR(it->page);
 
+	it->kaddr = kmap_atomic(it->page);
 	return vi->xattr_isize - xattr_header_sz;
 }
 
 static int xattr_foreach(struct xattr_iter *it,
-	struct xattr_iter_handlers *op, unsigned *tlimit)
+	const struct xattr_iter_handlers *op, unsigned int *tlimit)
 {
 	struct erofs_xattr_entry entry;
 	unsigned value_sz, processed, slice;
 	int err;
 
 	/* 0. fixup blkaddr, ofs, ipage */
-	xattr_iter_fixup(it);
+	err = xattr_iter_fixup(it);
+	if (err)
+		return err;
 
 	/*
 	 * 1. read xattr entry to the memory,
@@ -178,7 +242,9 @@
 		if (it->ofs >= EROFS_BLKSIZ) {
 			BUG_ON(it->ofs > EROFS_BLKSIZ);
 
-			xattr_iter_fixup(it);
+			err = xattr_iter_fixup(it);
+			if (err)
+				goto out;
 			it->ofs = 0;
 		}
 
@@ -210,7 +276,10 @@
 	while (processed < value_sz) {
 		if (it->ofs >= EROFS_BLKSIZ) {
 			BUG_ON(it->ofs > EROFS_BLKSIZ);
-			xattr_iter_fixup(it);
+
+			err = xattr_iter_fixup(it);
+			if (err)
+				goto out;
 			it->ofs = 0;
 		}
 
@@ -270,7 +339,7 @@
 	memcpy(it->buffer + processed, buf, len);
 }
 
-static struct xattr_iter_handlers find_xattr_handlers = {
+static const struct xattr_iter_handlers find_xattr_handlers = {
 	.entry = xattr_entrymatch,
 	.name = xattr_namematch,
 	.alloc_buffer = xattr_checkbuffer,
@@ -291,8 +360,11 @@
 		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
 		if (ret >= 0)
 			break;
+
+		if (ret != -ENOATTR)	/* -ENOMEM, -EIO, etc. */
+			break;
 	}
-	xattr_iter_end(&it->it, true);
+	xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_size;
 }
@@ -315,8 +387,10 @@
 				xattr_iter_end(&it->it, true);
 
 			it->it.page = erofs_get_meta_page(inode->i_sb,
-				blkaddr, false);
-			BUG_ON(IS_ERR(it->it.page));
+							  blkaddr, false);
+			if (IS_ERR(it->it.page))
+				return PTR_ERR(it->it.page);
+
 			it->it.kaddr = kmap_atomic(it->it.page);
 			it->it.blkaddr = blkaddr;
 		}
@@ -324,9 +398,12 @@
 		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
 		if (ret >= 0)
 			break;
+
+		if (ret != -ENOATTR)	/* -ENOMEM, -EIO, etc. */
+			break;
 	}
 	if (vi->xattr_shared_count)
-		xattr_iter_end(&it->it, true);
+		xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_size;
 }
@@ -351,7 +428,9 @@
 	if (unlikely(name == NULL))
 		return -EINVAL;
 
-	init_inode_xattrs(inode);
+	ret = init_inode_xattrs(inode);
+	if (ret)
+		return ret;
 
 	it.index = index;
 
@@ -374,7 +453,6 @@
 		struct dentry *unused, struct inode *inode,
 		const char *name, void *buffer, size_t size)
 {
-	struct erofs_vnode *const vi = EROFS_V(inode);
 	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
 
 	switch (handler->flags) {
@@ -392,9 +470,6 @@
 		return -EINVAL;
 	}
 
-	if (!vi->xattr_isize)
-		return -ENOATTR;
-
 	return erofs_getxattr(inode, handler->flags, name, buffer, size);
 }
 
@@ -494,7 +569,7 @@
 	return 1;
 }
 
-static struct xattr_iter_handlers list_xattr_handlers = {
+static const struct xattr_iter_handlers list_xattr_handlers = {
 	.entry = xattr_entrylist,
 	.name = xattr_namelist,
 	.alloc_buffer = xattr_skipvalue,
@@ -516,7 +591,7 @@
 		if (ret < 0)
 			break;
 	}
-	xattr_iter_end(&it->it, true);
+	xattr_iter_end_final(&it->it);
 	return ret < 0 ? ret : it->buffer_ofs;
 }
 
@@ -538,8 +613,10 @@
 				xattr_iter_end(&it->it, true);
 
 			it->it.page = erofs_get_meta_page(inode->i_sb,
-				blkaddr, false);
-			BUG_ON(IS_ERR(it->it.page));
+							  blkaddr, false);
+			if (IS_ERR(it->it.page))
+				return PTR_ERR(it->it.page);
+
 			it->it.kaddr = kmap_atomic(it->it.page);
 			it->it.blkaddr = blkaddr;
 		}
@@ -549,7 +626,7 @@
 			break;
 	}
 	if (vi->xattr_shared_count)
-		xattr_iter_end(&it->it, true);
+		xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_ofs;
 }
@@ -560,7 +637,9 @@
 	int ret;
 	struct listxattr_iter it;
 
-	init_inode_xattrs(d_inode(dentry));
+	ret = init_inode_xattrs(d_inode(dentry));
+	if (ret)
+		return ret;
 
 	it.dentry = dentry;
 	it.buffer = buffer;
diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/staging/fsl-dpaa2/rtc/rtc.c
index 0d52cb8..318a33c 100644
--- a/drivers/staging/fsl-dpaa2/rtc/rtc.c
+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
@@ -142,7 +142,10 @@
 
 	err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
 	if (err) {
-		dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+		if (err == -ENXIO)
+			err = -EPROBE_DEFER;
+		else
+			dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
 		goto err_exit;
 	}
 
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index b736275..6a48ad0 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -256,7 +256,9 @@
 	if (ret)
 		return ret;
 
-	__ad7280_read32(st, &tmp);
+	ret = __ad7280_read32(st, &tmp);
+	if (ret)
+		return ret;
 
 	if (ad7280_check_crc(st, tmp))
 		return -EIO;
@@ -294,7 +296,9 @@
 
 	ad7280_delay(st);
 
-	__ad7280_read32(st, &tmp);
+	ret = __ad7280_read32(st, &tmp);
+	if (ret)
+		return ret;
 
 	if (ad7280_check_crc(st, tmp))
 		return -EIO;
@@ -327,7 +331,9 @@
 	ad7280_delay(st);
 
 	for (i = 0; i < cnt; i++) {
-		__ad7280_read32(st, &tmp);
+		ret = __ad7280_read32(st, &tmp);
+		if (ret)
+			return ret;
 
 		if (ad7280_check_crc(st, tmp))
 			return -EIO;
@@ -370,7 +376,10 @@
 		return ret;
 
 	for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
-		__ad7280_read32(st, &val);
+		ret = __ad7280_read32(st, &val);
+		if (ret)
+			return ret;
+
 		if (val == 0)
 			return n - 1;
 
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 16d7207..8bcb5d5 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -87,12 +87,16 @@
 			   long m)
 {
 	struct ad7780_state *st = iio_priv(indio_dev);
+	int voltage_uv;
 
 	switch (m) {
 	case IIO_CHAN_INFO_RAW:
 		return ad_sigma_delta_single_conversion(indio_dev, chan, val);
 	case IIO_CHAN_INFO_SCALE:
-		*val = st->int_vref_mv * st->gain;
+		voltage_uv = regulator_get_voltage(st->reg);
+		if (voltage_uv < 0)
+			return voltage_uv;
+		*val = (voltage_uv / 1000) * st->gain;
 		*val2 = chan->scan_type.realbits - 1;
 		return IIO_VAL_FRACTIONAL_LOG2;
 	case IIO_CHAN_INFO_OFFSET:
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index 5958694..51cda91 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -85,7 +85,12 @@
 	/* need 600ns between CS and the first falling edge of SCLK */
 	spi->max_speed_hz = 830000;
 	spi->mode = SPI_MODE_3;
-	spi_setup(spi);
+	ret = spi_setup(spi);
+
+	if (ret < 0) {
+		dev_err(&spi->dev, "spi_setup failed!\n");
+		return ret;
+	}
 
 	return 0;
 }
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index c85a805..a497ec1 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1255,6 +1255,10 @@
 
 	/* create cdev */
 	device->cdev = cdev_alloc();
+	if (!device->cdev) {
+		dev_dbg(device->dev, "allocation of cdev failed");
+		goto cdev_failed;
+	}
 	device->cdev->owner = THIS_MODULE;
 	cdev_init(device->cdev, &pi433_fops);
 	retval = cdev_add(device->cdev, device->devt, 1);
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 2a48b09..470ea2c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -154,7 +154,7 @@
 
 	pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
 
-	crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
+	crypto_ops = lib80211_get_crypto_ops("WEP");
 
 	if (!crypto_ops)
 		return;
@@ -210,7 +210,7 @@
 		void *crypto_private = NULL;
 		int status = _SUCCESS;
 		const int keyindex = prxattrib->key_index;
-		struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
+		struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP");
 		char iv[4], icv[4];
 
 		if (!crypto_ops) {
@@ -1292,7 +1292,7 @@
 			struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
 			void *crypto_private = NULL;
 			u8 *key, *pframe = skb->data;
-			struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp");
+			struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP");
 			struct security_priv *psecuritypriv = &padapter->securitypriv;
 			char iv[8], icv[8];
 
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 28cbd6b..dfee698 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -35,6 +35,7 @@
 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
 	{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
 	{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+	{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
 	{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
 	{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index bcc8dfa..9efb4dc 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -850,18 +850,18 @@
 #define IP_FMT "%pI4"
 #define IP_ARG(x) (x)
 
-extern __inline int is_multicast_mac_addr(const u8 *addr)
+static inline int is_multicast_mac_addr(const u8 *addr)
 {
         return ((addr[0] != 0xff) && (0x01 & addr[0]));
 }
 
-extern __inline int is_broadcast_mac_addr(const u8 *addr)
+static inline int is_broadcast_mac_addr(const u8 *addr)
 {
 	return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) &&   \
 		(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
 }
 
-extern __inline int is_zero_mac_addr(const u8 *addr)
+static inline int is_zero_mac_addr(const u8 *addr)
 {
 	return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) &&   \
 		(addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index eac63aa..93742db 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -265,7 +265,8 @@
 		return;
 	}
 
-	speakup_tty->ops->send_xchar(speakup_tty, ch);
+	if (speakup_tty->ops->send_xchar)
+		speakup_tty->ops->send_xchar(speakup_tty, ch);
 	mutex_unlock(&speakup_tty_mutex);
 }
 
@@ -277,7 +278,8 @@
 		return;
 	}
 
-	speakup_tty->ops->tiocmset(speakup_tty, set, clear);
+	if (speakup_tty->ops->tiocmset)
+		speakup_tty->ops->tiocmset(speakup_tty, set, clear);
 	mutex_unlock(&speakup_tty_mutex);
 }
 
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 3b8d237..649caae 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1090,8 +1090,8 @@
 		vif->wilc = *wilc;
 		vif->ndev = ndev;
 		wl->vif[i] = vif;
-		wl->vif_num = i;
-		vif->idx = wl->vif_num;
+		wl->vif_num = i + 1;
+		vif->idx = i;
 
 		ndev->netdev_ops = &wilc_netdev_ops;
 
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index b2080d8..e52c3bd 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -831,6 +831,7 @@
 	if (!g_sdio.irq_gpio) {
 		int i;
 
+		cmd.read_write = 0;
 		cmd.function = 1;
 		cmd.address = 0x04;
 		cmd.data = 0;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 8de1601..b19c960 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -598,9 +598,12 @@
 	mutex_unlock(&cdev_list_lock);
 }
 
+static void __cxgbit_free_conn(struct cxgbit_sock *csk);
+
 void cxgbit_free_np(struct iscsi_np *np)
 {
 	struct cxgbit_np *cnp = np->np_context;
+	struct cxgbit_sock *csk, *tmp;
 
 	cnp->com.state = CSK_STATE_DEAD;
 	if (cnp->com.cdev)
@@ -608,6 +611,13 @@
 	else
 		cxgbit_free_all_np(cnp);
 
+	spin_lock_bh(&cnp->np_accept_lock);
+	list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
+		list_del_init(&csk->accept_node);
+		__cxgbit_free_conn(csk);
+	}
+	spin_unlock_bh(&cnp->np_accept_lock);
+
 	np->np_context = NULL;
 	cxgbit_put_cnp(cnp);
 }
@@ -631,8 +641,11 @@
 
 static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
 {
+	struct cxgbit_sock *csk = handle;
+
 	pr_debug("%s cxgbit_device %p\n", __func__, handle);
 	kfree_skb(skb);
+	cxgbit_put_csk(csk);
 }
 
 static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
@@ -705,9 +718,9 @@
 			      csk->tid, 600, __func__);
 }
 
-void cxgbit_free_conn(struct iscsi_conn *conn)
+static void __cxgbit_free_conn(struct cxgbit_sock *csk)
 {
-	struct cxgbit_sock *csk = conn->context;
+	struct iscsi_conn *conn = csk->conn;
 	bool release = false;
 
 	pr_debug("%s: state %d\n",
@@ -716,7 +729,7 @@
 	spin_lock_bh(&csk->lock);
 	switch (csk->com.state) {
 	case CSK_STATE_ESTABLISHED:
-		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+		if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
 			csk->com.state = CSK_STATE_CLOSING;
 			cxgbit_send_halfclose(csk);
 		} else {
@@ -741,6 +754,11 @@
 		cxgbit_put_csk(csk);
 }
 
+void cxgbit_free_conn(struct iscsi_conn *conn)
+{
+	__cxgbit_free_conn(conn->context);
+}
+
 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 {
 	csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
@@ -803,6 +821,7 @@
 	spin_unlock_bh(&cdev->cskq.lock);
 
 	cxgbit_free_skb(csk);
+	cxgbit_put_cnp(csk->cnp);
 	cxgbit_put_cdev(cdev);
 
 	kfree(csk);
@@ -1190,7 +1209,7 @@
 	rpl5->opt0 = cpu_to_be64(opt0);
 	rpl5->opt2 = cpu_to_be32(opt2);
 	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
-	t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
+	t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
 	cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 }
 
@@ -1351,6 +1370,7 @@
 		goto rel_skb;
 	}
 
+	cxgbit_get_cnp(cnp);
 	cxgbit_get_cdev(cdev);
 
 	spin_lock(&cdev->cskq.lock);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index f3f8856..c011c82 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -58,6 +58,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	kref_init(&cdev->kref);
+	spin_lock_init(&cdev->np_lock);
 
 	cdev->lldi = *lldi;
 
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index cb0461a1..93424db 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -108,12 +108,17 @@
 
 	buf[7] = 0x2; /* CmdQue=1 */
 
-	memcpy(&buf[8], "LIO-ORG ", 8);
-	memset(&buf[16], 0x20, 16);
+	/*
+	 * ASCII data fields described as being left-aligned shall have any
+	 * unused bytes at the end of the field (i.e., highest offset) and the
+	 * unused bytes shall be filled with ASCII space characters (20h).
+	 */
+	memset(&buf[8], 0x20, 8 + 16 + 4);
+	memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
 	memcpy(&buf[16], dev->t10_wwn.model,
-	       min_t(size_t, strlen(dev->t10_wwn.model), 16));
+	       strnlen(dev->t10_wwn.model, 16));
 	memcpy(&buf[32], dev->t10_wwn.revision,
-	       min_t(size_t, strlen(dev->t10_wwn.revision), 4));
+	       strnlen(dev->t10_wwn.revision, 4));
 	buf[4] = 31; /* Set additional length to 31 */
 
 	return 0;
@@ -251,7 +256,9 @@
 	buf[off] = 0x2; /* ASCII */
 	buf[off+1] = 0x1; /* T10 Vendor ID */
 	buf[off+2] = 0x0;
-	memcpy(&buf[off+4], "LIO-ORG", 8);
+	/* left align Vendor ID and pad with spaces */
+	memset(&buf[off+4], 0x20, 8);
+	memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
 	/* Extra Byte for NULL Terminator */
 	id_len++;
 	/* Identifier Length */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index fc3093d2..f1b730b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -224,19 +224,28 @@
 	sub_api_initialized = 1;
 }
 
+static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
+{
+	struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
+
+	wake_up(&sess->cmd_list_wq);
+}
+
 /**
  * transport_init_session - initialize a session object
  * @se_sess: Session object pointer.
  *
  * The caller must have zero-initialized @se_sess before calling this function.
  */
-void transport_init_session(struct se_session *se_sess)
+int transport_init_session(struct se_session *se_sess)
 {
 	INIT_LIST_HEAD(&se_sess->sess_list);
 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
 	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
 	spin_lock_init(&se_sess->sess_cmd_lock);
 	init_waitqueue_head(&se_sess->cmd_list_wq);
+	return percpu_ref_init(&se_sess->cmd_count,
+			       target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
 }
 EXPORT_SYMBOL(transport_init_session);
 
@@ -247,6 +256,7 @@
 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
 {
 	struct se_session *se_sess;
+	int ret;
 
 	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
 	if (!se_sess) {
@@ -254,7 +264,11 @@
 				" se_sess_cache\n");
 		return ERR_PTR(-ENOMEM);
 	}
-	transport_init_session(se_sess);
+	ret = transport_init_session(se_sess);
+	if (ret < 0) {
+		kmem_cache_free(se_sess_cache, se_sess);
+		return ERR_PTR(ret);
+	}
 	se_sess->sup_prot_ops = sup_prot_ops;
 
 	return se_sess;
@@ -581,6 +595,7 @@
 		sbitmap_queue_free(&se_sess->sess_tag_pool);
 		kvfree(se_sess->sess_cmd_map);
 	}
+	percpu_ref_exit(&se_sess->cmd_count);
 	kmem_cache_free(se_sess_cache, se_sess);
 }
 EXPORT_SYMBOL(transport_free_session);
@@ -2724,6 +2739,7 @@
 	}
 	se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
 	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
+	percpu_ref_get(&se_sess->cmd_count);
 out:
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
@@ -2754,8 +2770,6 @@
 	if (se_sess) {
 		spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 		list_del_init(&se_cmd->se_cmd_list);
-		if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
-			wake_up(&se_sess->cmd_list_wq);
 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 	}
 
@@ -2763,6 +2777,8 @@
 	se_cmd->se_tfo->release_cmd(se_cmd);
 	if (compl)
 		complete(compl);
+
+	percpu_ref_put(&se_sess->cmd_count);
 }
 
 /**
@@ -2891,6 +2907,8 @@
 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 	se_sess->sess_tearing_down = 1;
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+	percpu_ref_kill(&se_sess->cmd_count);
 }
 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
 
@@ -2905,17 +2923,14 @@
 
 	WARN_ON_ONCE(!se_sess->sess_tearing_down);
 
-	spin_lock_irq(&se_sess->sess_cmd_lock);
 	do {
-		ret = wait_event_lock_irq_timeout(
-				se_sess->cmd_list_wq,
-				list_empty(&se_sess->sess_cmd_list),
-				se_sess->sess_cmd_lock, 180 * HZ);
+		ret = wait_event_timeout(se_sess->cmd_list_wq,
+				percpu_ref_is_zero(&se_sess->cmd_count),
+				180 * HZ);
 		list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
 			target_show_cmd("session shutdown: still waiting for ",
 					cmd);
 	} while (ret <= 0);
-	spin_unlock_irq(&se_sess->sess_cmd_lock);
 }
 EXPORT_SYMBOL(target_wait_for_sess_cmds);
 
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9cd404a..ac76201 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -148,7 +148,7 @@
 	size_t ring_size;
 
 	struct mutex cmdr_lock;
-	struct list_head cmdr_queue;
+	struct list_head qfull_queue;
 
 	uint32_t dbi_max;
 	uint32_t dbi_thresh;
@@ -159,6 +159,7 @@
 
 	struct timer_list cmd_timer;
 	unsigned int cmd_time_out;
+	struct list_head inflight_queue;
 
 	struct timer_list qfull_timer;
 	int qfull_time_out;
@@ -179,7 +180,7 @@
 struct tcmu_cmd {
 	struct se_cmd *se_cmd;
 	struct tcmu_dev *tcmu_dev;
-	struct list_head cmdr_queue_entry;
+	struct list_head queue_entry;
 
 	uint16_t cmd_id;
 
@@ -192,6 +193,7 @@
 	unsigned long deadline;
 
 #define TCMU_CMD_BIT_EXPIRED 0
+#define TCMU_CMD_BIT_INFLIGHT 1
 	unsigned long flags;
 };
 /*
@@ -586,7 +588,7 @@
 	if (!tcmu_cmd)
 		return NULL;
 
-	INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
+	INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
 	tcmu_cmd->se_cmd = se_cmd;
 	tcmu_cmd->tcmu_dev = udev;
 
@@ -915,11 +917,13 @@
 		return 0;
 
 	tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
-	mod_timer(timer, tcmu_cmd->deadline);
+	if (!timer_pending(timer))
+		mod_timer(timer, tcmu_cmd->deadline);
+
 	return 0;
 }
 
-static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
+static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
 {
 	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
 	unsigned int tmo;
@@ -942,7 +946,7 @@
 	if (ret)
 		return ret;
 
-	list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
+	list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
 	pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
 		 tcmu_cmd->cmd_id, udev->name);
 	return 0;
@@ -999,7 +1003,7 @@
 	base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
 	command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
 
-	if (!list_empty(&udev->cmdr_queue))
+	if (!list_empty(&udev->qfull_queue))
 		goto queue;
 
 	mb = udev->mb_addr;
@@ -1096,13 +1100,16 @@
 	UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
 	tcmu_flush_dcache_range(mb, sizeof(*mb));
 
+	list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
+	set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
+
 	/* TODO: only if FLUSH and FUA? */
 	uio_event_notify(&udev->uio_info);
 
 	return 0;
 
 queue:
-	if (add_to_cmdr_queue(tcmu_cmd)) {
+	if (add_to_qfull_queue(tcmu_cmd)) {
 		*scsi_err = TCM_OUT_OF_RESOURCES;
 		return -1;
 	}
@@ -1145,6 +1152,8 @@
 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
 		goto out;
 
+	list_del_init(&cmd->queue_entry);
+
 	tcmu_cmd_reset_dbi_cur(cmd);
 
 	if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
@@ -1194,9 +1203,29 @@
 	tcmu_free_cmd(cmd);
 }
 
+static void tcmu_set_next_deadline(struct list_head *queue,
+				   struct timer_list *timer)
+{
+	struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
+	unsigned long deadline = 0;
+
+	list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
+		if (!time_after(jiffies, tcmu_cmd->deadline)) {
+			deadline = tcmu_cmd->deadline;
+			break;
+		}
+	}
+
+	if (deadline)
+		mod_timer(timer, deadline);
+	else
+		del_timer(timer);
+}
+
 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
 {
 	struct tcmu_mailbox *mb;
+	struct tcmu_cmd *cmd;
 	int handled = 0;
 
 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
@@ -1210,7 +1239,6 @@
 	while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
 
 		struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
-		struct tcmu_cmd *cmd;
 
 		tcmu_flush_dcache_range(entry, sizeof(*entry));
 
@@ -1243,7 +1271,7 @@
 		/* no more pending commands */
 		del_timer(&udev->cmd_timer);
 
-		if (list_empty(&udev->cmdr_queue)) {
+		if (list_empty(&udev->qfull_queue)) {
 			/*
 			 * no more pending or waiting commands so try to
 			 * reclaim blocks if needed.
@@ -1252,6 +1280,8 @@
 			    tcmu_global_max_blocks)
 				schedule_delayed_work(&tcmu_unmap_work, 0);
 		}
+	} else if (udev->cmd_time_out) {
+		tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
 	}
 
 	return handled;
@@ -1271,7 +1301,7 @@
 	if (!time_after(jiffies, cmd->deadline))
 		return 0;
 
-	is_running = list_empty(&cmd->cmdr_queue_entry);
+	is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
 	se_cmd = cmd->se_cmd;
 
 	if (is_running) {
@@ -1288,12 +1318,11 @@
 		 */
 		scsi_status = SAM_STAT_CHECK_CONDITION;
 	} else {
-		list_del_init(&cmd->cmdr_queue_entry);
-
 		idr_remove(&udev->commands, id);
 		tcmu_free_cmd(cmd);
 		scsi_status = SAM_STAT_TASK_SET_FULL;
 	}
+	list_del_init(&cmd->queue_entry);
 
 	pr_debug("Timing out cmd %u on dev %s that is %s.\n",
 		 id, udev->name, is_running ? "inflight" : "queued");
@@ -1372,7 +1401,8 @@
 
 	INIT_LIST_HEAD(&udev->node);
 	INIT_LIST_HEAD(&udev->timedout_entry);
-	INIT_LIST_HEAD(&udev->cmdr_queue);
+	INIT_LIST_HEAD(&udev->qfull_queue);
+	INIT_LIST_HEAD(&udev->inflight_queue);
 	idr_init(&udev->commands);
 
 	timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
@@ -1383,7 +1413,7 @@
 	return &udev->se_dev;
 }
 
-static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
+static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
 {
 	struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
 	LIST_HEAD(cmds);
@@ -1391,15 +1421,15 @@
 	sense_reason_t scsi_ret;
 	int ret;
 
-	if (list_empty(&udev->cmdr_queue))
+	if (list_empty(&udev->qfull_queue))
 		return true;
 
 	pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
 
-	list_splice_init(&udev->cmdr_queue, &cmds);
+	list_splice_init(&udev->qfull_queue, &cmds);
 
-	list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
-		list_del_init(&tcmu_cmd->cmdr_queue_entry);
+	list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
+		list_del_init(&tcmu_cmd->queue_entry);
 
 	        pr_debug("removing cmd %u on dev %s from queue\n",
 		         tcmu_cmd->cmd_id, udev->name);
@@ -1437,14 +1467,13 @@
 			 * cmd was requeued, so just put all cmds back in
 			 * the queue
 			 */
-			list_splice_tail(&cmds, &udev->cmdr_queue);
+			list_splice_tail(&cmds, &udev->qfull_queue);
 			drained = false;
-			goto done;
+			break;
 		}
 	}
-	if (list_empty(&udev->cmdr_queue))
-		del_timer(&udev->qfull_timer);
-done:
+
+	tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
 	return drained;
 }
 
@@ -1454,7 +1483,7 @@
 
 	mutex_lock(&udev->cmdr_lock);
 	tcmu_handle_completions(udev);
-	run_cmdr_queue(udev, false);
+	run_qfull_queue(udev, false);
 	mutex_unlock(&udev->cmdr_lock);
 
 	return 0;
@@ -1982,7 +2011,7 @@
 	/* complete IO that has executed successfully */
 	tcmu_handle_completions(udev);
 	/* fail IO waiting to be queued */
-	run_cmdr_queue(udev, true);
+	run_qfull_queue(udev, true);
 
 unlock:
 	mutex_unlock(&udev->cmdr_lock);
@@ -1997,7 +2026,7 @@
 	mutex_lock(&udev->cmdr_lock);
 
 	idr_for_each_entry(&udev->commands, cmd, i) {
-		if (!list_empty(&cmd->cmdr_queue_entry))
+		if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
 			continue;
 
 		pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
@@ -2006,6 +2035,7 @@
 
 		idr_remove(&udev->commands, i);
 		if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+			list_del_init(&cmd->queue_entry);
 			if (err_level == 1) {
 				/*
 				 * Userspace was not able to start the
@@ -2666,6 +2696,10 @@
 
 		mutex_lock(&udev->cmdr_lock);
 		idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
+
+		tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+		tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
+
 		mutex_unlock(&udev->cmdr_lock);
 
 		spin_lock_bh(&timed_out_udevs_lock);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 2718a93..7cdb5d7 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -480,6 +480,8 @@
 
 int target_xcopy_setup_pt(void)
 {
+	int ret;
+
 	xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
 	if (!xcopy_wq) {
 		pr_err("Unable to allocate xcopy_wq\n");
@@ -497,7 +499,9 @@
 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
 	memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
-	transport_init_session(&xcopy_pt_sess);
+	ret = transport_init_session(&xcopy_pt_sess);
+	if (ret < 0)
+		return ret;
 
 	xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
 	xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index df35fc0..43626e1 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -19,7 +19,7 @@
 struct optee_supp_req {
 	struct list_head link;
 
-	bool busy;
+	bool in_queue;
 	u32 func;
 	u32 ret;
 	size_t num_params;
@@ -54,7 +54,6 @@
 
 	/* Abort all request retrieved by supplicant */
 	idr_for_each_entry(&supp->idr, req, id) {
-		req->busy = false;
 		idr_remove(&supp->idr, id);
 		req->ret = TEEC_ERROR_COMMUNICATION;
 		complete(&req->c);
@@ -63,6 +62,7 @@
 	/* Abort all queued requests */
 	list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
 		list_del(&req->link);
+		req->in_queue = false;
 		req->ret = TEEC_ERROR_COMMUNICATION;
 		complete(&req->c);
 	}
@@ -103,6 +103,7 @@
 	/* Insert the request in the request list */
 	mutex_lock(&supp->mutex);
 	list_add_tail(&req->link, &supp->reqs);
+	req->in_queue = true;
 	mutex_unlock(&supp->mutex);
 
 	/* Tell an eventual waiter there's a new request */
@@ -130,9 +131,10 @@
 			 * will serve all requests in a timely manner and
 			 * interrupting then wouldn't make sense.
 			 */
-			interruptable = !req->busy;
-			if (!req->busy)
+			if (req->in_queue) {
 				list_del(&req->link);
+				req->in_queue = false;
+			}
 		}
 		mutex_unlock(&supp->mutex);
 
@@ -176,7 +178,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	list_del(&req->link);
-	req->busy = true;
+	req->in_queue = false;
 
 	return req;
 }
@@ -318,7 +320,6 @@
 	if ((num_params - nm) != req->num_params)
 		return ERR_PTR(-EINVAL);
 
-	req->busy = false;
 	idr_remove(&supp->idr, id);
 	supp->req_id = -1;
 	*num_meta = nm;
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 23ad4f9..24b006a 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -27,6 +27,8 @@
 #include <linux/platform_device.h>
 #include <linux/thermal.h>
 
+#include "../thermal_hwmon.h"
+
 #define BCM2835_TS_TSENSCTL			0x00
 #define BCM2835_TS_TSENSSTAT			0x04
 
@@ -275,6 +277,15 @@
 
 	platform_set_drvdata(pdev, tz);
 
+	/*
+	 * Thermal_zone doesn't enable hwmon as default,
+	 * enable it here
+	 */
+	tz->tzp->no_hwmon = false;
+	err = thermal_add_hwmon_sysfs(tz);
+	if (err)
+		goto err_tz;
+
 	bcm2835_thermal_debugfs(pdev);
 
 	return 0;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 3037b9d..27d178b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/cpu_cooling.h>
+#include <linux/energy_model.h>
 #include <linux/of_device.h>
 
 #include <trace/events/thermal.h>
@@ -49,19 +50,6 @@
  */
 
 /**
- * struct freq_table - frequency table along with power entries
- * @frequency:	frequency in KHz
- * @power:	power in mW
- *
- * This structure is built when the cooling device registers and helps
- * in translating frequency to power and vice versa.
- */
-struct freq_table {
-	u32 frequency;
-	u32 power;
-};
-
-/**
  * struct time_in_idle - Idle time stats
  * @time: previous reading of the absolute time that this cpu was idle
  * @timestamp: wall time of the last invocation of get_cpu_idle_time_us()
@@ -86,12 +74,13 @@
  *	frequency.
  * @max_level: maximum cooling level. One less than total number of valid
  *	cpufreq frequencies.
- * @freq_table: Freq table in descending order of frequencies
+ * @em: Reference on the Energy Model of the device
  * @cdev: thermal_cooling_device pointer to keep track of the
  *	registered cooling device.
  * @policy: cpufreq policy.
  * @node: list_head to link all cpufreq_cooling_device together.
  * @idle_time: idle time stats
+ * @plat_ops: platform mitigation ops.
  *
  * This structure is required for keeping information of each registered
  * cpufreq_cooling_device.
@@ -104,7 +93,7 @@
 	unsigned int cpufreq_floor_state;
 	unsigned int floor_freq;
 	unsigned int max_level;
-	struct freq_table *freq_table;	/* In descending order */
+	struct em_perf_domain *em;
 	struct thermal_cooling_device *cdev;
 	struct cpufreq_policy *policy;
 	struct list_head node;
@@ -118,26 +107,6 @@
 /* Below code defines functions to be used for cpufreq as cooling device */
 
 /**
- * get_level: Find the level for a particular frequency
- * @cpufreq_cdev: cpufreq_cdev for which the property is required
- * @freq: Frequency
- *
- * Return: level corresponding to the frequency.
- */
-static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
-			       unsigned int freq)
-{
-	struct freq_table *freq_table = cpufreq_cdev->freq_table;
-	unsigned long level;
-
-	for (level = 1; level <= cpufreq_cdev->max_level; level++)
-		if (freq > freq_table[level].frequency)
-			break;
-
-	return level - 1;
-}
-
-/**
  * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
  * @nb:	struct notifier_block * with callback info.
  * @event: value showing cpufreq event for which this function invoked.
@@ -195,105 +164,52 @@
 	return NOTIFY_OK;
 }
 
+#ifdef CONFIG_ENERGY_MODEL
 /**
- * update_freq_table() - Update the freq table with power numbers
- * @cpufreq_cdev:	the cpufreq cooling device in which to update the table
- * @capacitance: dynamic power coefficient for these cpus
+ * get_level: Find the level for a particular frequency
+ * @cpufreq_cdev: cpufreq_cdev for which the property is required
+ * @freq: Frequency
  *
- * Update the freq table with power numbers.  This table will be used in
- * cpu_power_to_freq() and cpu_freq_to_power() to convert between power and
- * frequency efficiently.  Power is stored in mW, frequency in KHz.  The
- * resulting table is in descending order.
- *
- * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
- * or -ENOMEM if we run out of memory.
+ * Return: level corresponding to the frequency.
  */
-static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
-			     u32 capacitance)
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
+			       unsigned int freq)
 {
-	struct freq_table *freq_table = cpufreq_cdev->freq_table;
-	struct dev_pm_opp *opp;
-	struct device *dev = NULL;
-	int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i;
+	int i;
 
-	dev = get_cpu_device(cpu);
-	if (unlikely(!dev)) {
-		dev_warn(&cpufreq_cdev->cdev->device,
-			 "No cpu device for cpu %d\n", cpu);
-		return -ENODEV;
+	for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+		if (freq > cpufreq_cdev->em->table[i].frequency)
+			break;
 	}
 
-	num_opps = dev_pm_opp_get_opp_count(dev);
-	if (num_opps < 0)
-		return num_opps;
-
-	/*
-	 * The cpufreq table is also built from the OPP table and so the count
-	 * should match.
-	 */
-	if (num_opps != cpufreq_cdev->max_level + 1) {
-		dev_warn(dev, "Number of OPPs not matching with max_levels\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i <= cpufreq_cdev->max_level; i++) {
-		unsigned long freq = freq_table[i].frequency * 1000;
-		u32 freq_mhz = freq_table[i].frequency / 1000;
-		u64 power;
-		u32 voltage_mv;
-
-		/*
-		 * Find ceil frequency as 'freq' may be slightly lower than OPP
-		 * freq due to truncation while converting to kHz.
-		 */
-		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-		if (IS_ERR(opp)) {
-			dev_err(dev, "failed to get opp for %lu frequency\n",
-				freq);
-			return -EINVAL;
-		}
-
-		voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
-		dev_pm_opp_put(opp);
-
-		/*
-		 * Do the multiplication with MHz and millivolt so as
-		 * to not overflow.
-		 */
-		power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
-		do_div(power, 1000000000);
-
-		/* power is stored in mW */
-		freq_table[i].power = power;
-	}
-
-	return 0;
+	return cpufreq_cdev->max_level - i - 1;
 }
 
+
 static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
 			     u32 freq)
 {
 	int i;
-	struct freq_table *freq_table = cpufreq_cdev->freq_table;
 
-	for (i = 1; i <= cpufreq_cdev->max_level; i++)
-		if (freq > freq_table[i].frequency)
+	for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+		if (freq > cpufreq_cdev->em->table[i].frequency)
 			break;
+	}
 
-	return freq_table[i - 1].power;
+	return cpufreq_cdev->em->table[i + 1].power;
 }
 
 static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
 			     u32 power)
 {
 	int i;
-	struct freq_table *freq_table = cpufreq_cdev->freq_table;
 
-	for (i = 1; i <= cpufreq_cdev->max_level; i++)
-		if (power > freq_table[i].power)
+	for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+		if (power > cpufreq_cdev->em->table[i].power)
 			break;
+	}
 
-	return freq_table[i - 1].frequency;
+	return cpufreq_cdev->em->table[i + 1].frequency;
 }
 
 /**
@@ -343,6 +259,7 @@
 	raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
 	return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
 }
+#endif
 
 /* cpufreq cooling device callback functions are defined below */
 
@@ -386,6 +303,50 @@
 }
 
 /**
+ * cpufreq_get_cur_state - callback function to get the current cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the current cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
+
+	*state = cpufreq_cdev->cpufreq_state;
+
+	return 0;
+}
+
+static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev,
+			      unsigned long state)
+{
+	struct cpufreq_policy *policy;
+	unsigned long idx;
+
+#ifdef CONFIG_ENERGY_MODEL
+	/* Use the Energy Model table if available */
+	if (cpufreq_cdev->em) {
+		idx = cpufreq_cdev->max_level - state;
+		return cpufreq_cdev->em->table[idx].frequency;
+	}
+#endif
+
+	/* Otherwise, fallback on the CPUFreq table */
+	policy = cpufreq_cdev->policy;
+	if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+		idx = cpufreq_cdev->max_level - state;
+	else
+		idx = state;
+
+	return policy->freq_table[idx].frequency;
+}
+
+/**
  * cpufreq_set_min_state - callback function to set the device floor state.
  * @cdev: thermal cooling device pointer.
  * @state: set this variable to the current cooling state.
@@ -409,7 +370,7 @@
 		return 0;
 
 	cpufreq_cdev->cpufreq_floor_state = state;
-	floor_freq = cpufreq_cdev->freq_table[state].frequency;
+	floor_freq = get_state_freq(cpufreq_cdev, state);
 	cpufreq_cdev->floor_freq = floor_freq;
 
 	/*
@@ -427,26 +388,6 @@
 }
 
 /**
- * cpufreq_get_cur_state - callback function to get the current cooling state.
- * @cdev: thermal cooling device pointer.
- * @state: fill this variable with the current cooling state.
- *
- * Callback for the thermal cooling device to return the cpufreq
- * current cooling state.
- *
- * Return: 0 on success, an error code otherwise.
- */
-static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
-				 unsigned long *state)
-{
-	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
-
-	*state = cpufreq_cdev->cpufreq_state;
-
-	return 0;
-}
-
-/**
  * cpufreq_set_cur_state - callback function to set the current cooling state.
  * @cdev: thermal cooling device pointer.
  * @state: set this variable to the current cooling state.
@@ -470,7 +411,7 @@
 	if (cpufreq_cdev->cpufreq_state == state)
 		return 0;
 
-	clip_freq = cpufreq_cdev->freq_table[state].frequency;
+	clip_freq = get_state_freq(cpufreq_cdev, state);
 	cpufreq_cdev->cpufreq_state = state;
 	cpufreq_cdev->clipped_freq = clip_freq;
 
@@ -488,6 +429,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_ENERGY_MODEL
 /**
  * cpufreq_get_requested_power() - get the current power
  * @cdev:	&thermal_cooling_device pointer
@@ -578,7 +520,7 @@
 			       struct thermal_zone_device *tz,
 			       unsigned long state, u32 *power)
 {
-	unsigned int freq, num_cpus;
+	unsigned int freq, num_cpus, idx;
 	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
 
 	/* Request state should be less than max_level */
@@ -587,7 +529,8 @@
 
 	num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
 
-	freq = cpufreq_cdev->freq_table[state].frequency;
+	idx = cpufreq_cdev->max_level - state;
+	freq = cpufreq_cdev->em->table[idx].frequency;
 	*power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
 
 	return 0;
@@ -634,6 +577,18 @@
 	return 0;
 }
 
+static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
+	.get_max_state		= cpufreq_get_max_state,
+	.get_cur_state		= cpufreq_get_cur_state,
+	.set_cur_state		= cpufreq_set_cur_state,
+	.set_min_state		= cpufreq_set_min_state,
+	.get_min_state		= cpufreq_get_min_state,
+	.get_requested_power	= cpufreq_get_requested_power,
+	.state2power		= cpufreq_state2power,
+	.power2state		= cpufreq_power2state,
+};
+#endif
+
 /* Bind cpufreq callbacks to thermal cooling device ops */
 
 static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
@@ -644,40 +599,17 @@
 	.get_min_state = cpufreq_get_min_state,
 };
 
-static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
-	.get_max_state		= cpufreq_get_max_state,
-	.get_cur_state		= cpufreq_get_cur_state,
-	.set_cur_state		= cpufreq_set_cur_state,
-	.get_requested_power	= cpufreq_get_requested_power,
-	.state2power		= cpufreq_state2power,
-	.power2state		= cpufreq_power2state,
-};
-
 /* Notifier for cpufreq policy change */
 static struct notifier_block thermal_cpufreq_notifier_block = {
 	.notifier_call = cpufreq_thermal_notifier,
 };
 
-static unsigned int find_next_max(struct cpufreq_frequency_table *table,
-				  unsigned int prev_max)
-{
-	struct cpufreq_frequency_table *pos;
-	unsigned int max = 0;
-
-	cpufreq_for_each_valid_entry(pos, table) {
-		if (pos->frequency > max && pos->frequency < prev_max)
-			max = pos->frequency;
-	}
-
-	return max;
-}
-
 /**
  * __cpufreq_cooling_register - helper function to create cpufreq cooling device
  * @np: a valid struct device_node to the cooling device device tree node
  * @policy: cpufreq policy
  * Normally this should be same as cpufreq policy->related_cpus.
- * @capacitance: dynamic power coefficient for these cpus
+ * @try_model: true if a power model should be used
  * @plat_ops: function that does the mitigation by changing the
  *                   frequencies (Optional). By default, cpufreq framework will
  *                   be notified of the new limits.
@@ -692,14 +624,13 @@
  */
 static struct thermal_cooling_device *
 __cpufreq_cooling_register(struct device_node *np,
-			struct cpufreq_policy *policy, u32 capacitance,
+			struct cpufreq_policy *policy, bool try_model,
 			struct cpu_cooling_ops *plat_ops)
 {
 	struct thermal_cooling_device *cdev;
 	struct cpufreq_cooling_device *cpufreq_cdev;
 	char dev_name[THERMAL_NAME_LENGTH];
-	unsigned int freq, i, num_cpus;
-	int ret;
+	unsigned int i, num_cpus;
 	struct thermal_cooling_device_ops *cooling_ops;
 	bool first;
 
@@ -732,53 +663,35 @@
 	/* max_level is an index, not a counter */
 	cpufreq_cdev->max_level = i - 1;
 
-	cpufreq_cdev->freq_table = kmalloc_array(i,
-					sizeof(*cpufreq_cdev->freq_table),
-					GFP_KERNEL);
-	if (!cpufreq_cdev->freq_table) {
-		cdev = ERR_PTR(-ENOMEM);
-		goto free_idle_time;
-	}
+#ifdef CONFIG_ENERGY_MODEL
+	if (try_model) {
+		struct em_perf_domain *em = em_cpu_get(policy->cpu);
+
+		if (!em || !cpumask_equal(policy->related_cpus,
+					  to_cpumask(em->cpus))) {
+			cdev = ERR_PTR(-EINVAL);
+			goto free_idle_time;
+		}
+		cpufreq_cdev->em = em;
+		cooling_ops = &cpufreq_power_cooling_ops;
+	} else
+#endif
+		cooling_ops = &cpufreq_cooling_ops;
 
 	cpufreq_cdev->id = policy->cpu;
 
 	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
 		 cpufreq_cdev->id);
-
-	/* Fill freq-table in descending order of frequencies */
-	for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) {
-		freq = find_next_max(policy->freq_table, freq);
-		cpufreq_cdev->freq_table[i].frequency = freq;
-
-		/* Warn for duplicate entries */
-		if (!freq)
-			pr_warn("%s: table has duplicate entries\n", __func__);
-		else
-			pr_debug("%s: freq:%u KHz\n", __func__, freq);
-	}
-
-	if (capacitance) {
-		ret = update_freq_table(cpufreq_cdev, capacitance);
-		if (ret) {
-			cdev = ERR_PTR(ret);
-			goto free_table;
-		}
-
-		cooling_ops = &cpufreq_power_cooling_ops;
-	} else {
-		cooling_ops = &cpufreq_cooling_ops;
-	}
-
 	cpufreq_cdev->plat_ops = plat_ops;
 
 	cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
 						  cooling_ops);
 	if (IS_ERR(cdev))
-		goto free_table;
+		goto free_idle_time;
 
-	cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
-	cpufreq_cdev->floor_freq =
-		cpufreq_cdev->freq_table[cpufreq_cdev->max_level].frequency;
+	cpufreq_cdev->clipped_freq = get_state_freq(cpufreq_cdev, 0);
+	cpufreq_cdev->floor_freq = get_state_freq(cpufreq_cdev,
+					cpufreq_cdev->max_level);
 	cpufreq_cdev->cpufreq_floor_state = cpufreq_cdev->max_level;
 	cpufreq_cdev->cdev = cdev;
 
@@ -794,8 +707,6 @@
 
 	return cdev;
 
-free_table:
-	kfree(cpufreq_cdev->freq_table);
 free_idle_time:
 	kfree(cpufreq_cdev->idle_time);
 free_cdev:
@@ -817,7 +728,7 @@
 struct thermal_cooling_device *
 cpufreq_cooling_register(struct cpufreq_policy *policy)
 {
-	return __cpufreq_cooling_register(NULL, policy, 0, NULL);
+	return __cpufreq_cooling_register(NULL, policy, false, NULL);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
 
@@ -845,7 +756,6 @@
 {
 	struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
 	struct thermal_cooling_device *cdev = NULL;
-	u32 capacitance = 0;
 
 	if (!np) {
 		pr_err("cpu_cooling: OF node not available for cpu%d\n",
@@ -854,11 +764,7 @@
 	}
 
 	if (of_find_property(np, "#cooling-cells", NULL)) {
-		of_property_read_u32(np, "dynamic-power-coefficient",
-				     &capacitance);
-
-		cdev = __cpufreq_cooling_register(np, policy, capacitance,
-						NULL);
+		cdev = __cpufreq_cooling_register(np, policy, true, NULL);
 		if (IS_ERR(cdev)) {
 			pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
 			       policy->cpu, PTR_ERR(cdev));
@@ -943,7 +849,6 @@
 
 	thermal_cooling_device_unregister(cpufreq_cdev->cdev);
 	kfree(cpufreq_cdev->idle_time);
-	kfree(cpufreq_cdev->freq_table);
 	kfree(cpufreq_cdev);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 284cf2c..8e1cf4d 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -84,7 +84,12 @@
 	struct pci_dev *pci_dev; \
 	struct platform_device *pdev; \
 	struct proc_thermal_device *proc_dev; \
-\
+	\
+	if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
+		dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
+		return 0; \
+	} \
+	\
 	if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
 		pdev = to_platform_device(dev); \
 		proc_dev = platform_get_drvdata(pdev); \
@@ -298,11 +303,6 @@
 	*priv = proc_priv;
 
 	ret = proc_thermal_read_ppcc(proc_priv);
-	if (!ret) {
-		ret = sysfs_create_group(&dev->kobj,
-					 &power_limit_attribute_group);
-
-	}
 	if (ret)
 		return ret;
 
@@ -316,8 +316,7 @@
 
 	proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
 	if (IS_ERR(proc_priv->int340x_zone)) {
-		ret = PTR_ERR(proc_priv->int340x_zone);
-		goto remove_group;
+		return PTR_ERR(proc_priv->int340x_zone);
 	} else
 		ret = 0;
 
@@ -331,9 +330,6 @@
 
 remove_zone:
 	int340x_thermal_zone_remove(proc_priv->int340x_zone);
-remove_group:
-	sysfs_remove_group(&proc_priv->dev->kobj,
-			   &power_limit_attribute_group);
 
 	return ret;
 }
@@ -364,7 +360,10 @@
 	platform_set_drvdata(pdev, proc_priv);
 	proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
 
-	return 0;
+	dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
+
+	return sysfs_create_group(&pdev->dev.kobj,
+					 &power_limit_attribute_group);
 }
 
 static int int3401_remove(struct platform_device *pdev)
@@ -423,7 +422,7 @@
 		proc_priv->soc_dts = intel_soc_dts_iosf_init(
 					INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
 
-		if (proc_priv->soc_dts && pdev->irq) {
+		if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
 			ret = pci_enable_msi(pdev);
 			if (!ret) {
 				ret = request_threaded_irq(pdev->irq, NULL,
@@ -441,7 +440,10 @@
 			dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
 	}
 
-	return 0;
+	dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
+
+	return sysfs_create_group(&pdev->dev.kobj,
+					 &power_limit_attribute_group);
 }
 
 static void  proc_thermal_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/thermal/qcom/qmi_sensors.c b/drivers/thermal/qcom/qmi_sensors.c
index 25e9850..98259a8 100644
--- a/drivers/thermal/qcom/qmi_sensors.c
+++ b/drivers/thermal/qcom/qmi_sensors.c
@@ -32,6 +32,7 @@
 enum qmi_ts_sensor {
 	QMI_TS_PA,
 	QMI_TS_PA_1,
+	QMI_TS_PA_2,
 	QMI_TS_QFE_PA_0,
 	QMI_TS_QFE_WTR_0,
 	QMI_TS_MODEM_MODEM,
@@ -75,6 +76,7 @@
 static char sensor_clients[QMI_TS_MAX_NR][QMI_CLIENT_NAME_LENGTH] = {
 	{"pa"},
 	{"pa_1"},
+	{"pa_2"},
 	{"qfe_pa0"},
 	{"qfe_wtr0"},
 	{"modem_tsens"},
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index bf1c628..e22fc60 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -26,7 +26,7 @@
 
 static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
 {
-	int temp, adc_hi, adc_lo;
+	int temp, temp_hi, temp_lo, adc_hi, adc_lo;
 	int i;
 
 	for (i = 0; i < gti->nlookup_table; i++) {
@@ -36,13 +36,17 @@
 
 	if (i == 0) {
 		temp = gti->lookup_table[0];
-	} else if (i >= (gti->nlookup_table - 1)) {
+	} else if (i >= gti->nlookup_table) {
 		temp = gti->lookup_table[2 * (gti->nlookup_table - 1)];
 	} else {
 		adc_hi = gti->lookup_table[2 * i - 1];
 		adc_lo = gti->lookup_table[2 * i + 1];
-		temp = gti->lookup_table[2 * i];
-		temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo);
+
+		temp_hi = gti->lookup_table[2 * i - 2];
+		temp_lo = gti->lookup_table[2 * i];
+
+		temp = temp_hi + mult_frac(temp_lo - temp_hi, val - adc_hi,
+					   adc_lo - adc_hi);
 	}
 
 	return temp;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6120a82..de12162 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -470,16 +470,20 @@
 	store_temperature(tz, temp);
 }
 
-static void thermal_zone_device_reset(struct thermal_zone_device *tz)
+static void thermal_zone_device_init(struct thermal_zone_device *tz)
 {
 	struct thermal_instance *pos;
-
 	tz->temperature = THERMAL_TEMP_INVALID;
-	tz->passive = 0;
 	list_for_each_entry(pos, &tz->thermal_instances, tz_node)
 		pos->initialized = false;
 }
 
+static void thermal_zone_device_reset(struct thermal_zone_device *tz)
+{
+	tz->passive = 0;
+	thermal_zone_device_init(tz);
+}
+
 void thermal_zone_device_update_temp(struct thermal_zone_device *tz,
 				enum thermal_notify_event event, int temp)
 {
@@ -1596,7 +1600,7 @@
 	case PM_POST_SUSPEND:
 		atomic_set(&in_suspend, 0);
 		list_for_each_entry(tz, &thermal_tz_list, node) {
-			thermal_zone_device_reset(tz);
+			thermal_zone_device_init(tz);
 			thermal_zone_device_update(tz,
 						   THERMAL_EVENT_UNSPECIFIED);
 		}
diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
index 019f6f8..a160b9d 100644
--- a/drivers/thermal/thermal_hwmon.h
+++ b/drivers/thermal/thermal_hwmon.h
@@ -19,13 +19,13 @@
 int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
 void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
 #else
-static int
+static inline int
 thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 {
 	return 0;
 }
 
-static void
+static inline void
 thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
 {
 }
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index dabb391..bb63519 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -597,6 +597,7 @@
 				/* too large for caller's buffer */
 				ret = -EOVERFLOW;
 			} else {
+				__set_current_state(TASK_RUNNING);
 				if (copy_to_user(buf, rbuf->buf, rbuf->count))
 					ret = -EFAULT;
 				else
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index f80a300..48bd694 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3420,6 +3420,11 @@
 serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
 {
 	int num_iomem, num_port, first_port = -1, i;
+	int rc;
+
+	rc = serial_pci_is_class_communication(dev);
+	if (rc)
+		return rc;
 
 	/*
 	 * Should we try to make guesses for multiport serial devices later?
@@ -3647,10 +3652,6 @@
 
 	board = &pci_boards[ent->driver_data];
 
-	rc = serial_pci_is_class_communication(dev);
-	if (rc)
-		return rc;
-
 	rc = serial_pci_is_blacklisted(dev);
 	if (rc)
 		return rc;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index ebd33c0..89ade21 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2780,6 +2780,7 @@
 		.name	= "sbsa-uart",
 		.of_match_table = of_match_ptr(sbsa_uart_of_match),
 		.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
+		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
 	},
 };
 
@@ -2808,6 +2809,7 @@
 	.drv = {
 		.name	= "uart-pl011",
 		.pm	= &pl011_dev_pm_ops,
+		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
 	},
 	.id_table	= pl011_ids,
 	.probe		= pl011_probe,
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 3f8d127..50b6746 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1477,6 +1477,8 @@
 			else
 				cr1 &= ~UARTCR1_PT;
 		}
+	} else {
+		cr1 &= ~UARTCR1_PE;
 	}
 
 	/* ask the core to calculate the divisor */
@@ -1688,10 +1690,12 @@
 			else
 				ctrl &= ~UARTCTRL_PT;
 		}
+	} else {
+		ctrl &= ~UARTCTRL_PE;
 	}
 
 	/* ask the core to calculate the divisor */
-	baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+	baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
 
 	spin_lock_irqsave(&sport->port.lock, flags);
 
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 04fec08..0593b4f 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1931,6 +1931,13 @@
 	geni_serial_write_term_regs(uport, port->loopback, tx_trans_cfg,
 		tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
 		stop_bit_len, ser_clk_cfg);
+
+	if (termios->c_cflag & CRTSCTS) {
+		geni_write_reg_nolog(0x0, uport->membase, SE_UART_MANUAL_RFR);
+		IPC_LOG_MSG(port->ipc_log_misc, "%s: Manual flow off\n",
+				__func__);
+	}
+
 	IPC_LOG_MSG(port->ipc_log_misc, "%s: baud %d\n", __func__, baud);
 	IPC_LOG_MSG(port->ipc_log_misc, "Tx: trans_cfg%d parity %d\n",
 						tx_trans_cfg, tx_parity_cfg);
@@ -2407,6 +2414,8 @@
 	if (ret)
 		goto exit_geni_serial_probe;
 
+	dev_port->serial_rsc.ctrl_dev = &pdev->dev;
+
 	if (of_property_read_u32(pdev->dev.of_node, "qcom,wakeup-byte",
 					&wake_char)) {
 		dev_dbg(&pdev->dev, "No Wakeup byte specified\n");
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index fd80d99..0bdf168 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -919,6 +919,7 @@
 	.driver		= {
 		.name	= PIC32_DEV_NAME,
 		.of_match_table	= of_match_ptr(pic32_serial_dt_ids),
+		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_PIC32),
 	},
 };
 
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 1515074..35d1f6fa 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -221,7 +221,7 @@
 	unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
 	u32 geni_ios;
 
-	if (uart_console(uport) || !uart_cts_enabled(uport)) {
+	if (uart_console(uport)) {
 		mctrl |= TIOCM_CTS;
 	} else {
 		geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
@@ -237,7 +237,7 @@
 {
 	u32 uart_manual_rfr = 0;
 
-	if (uart_console(uport) || !uart_cts_enabled(uport))
+	if (uart_console(uport))
 		return;
 
 	if (!(mctrl & TIOCM_RTS))
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 2f8fa18..c6058b5 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1365,11 +1365,14 @@
 	wr_regl(port, S3C2410_ULCON, ulcon);
 	wr_regl(port, S3C2410_UBRDIV, quot);
 
+	port->status &= ~UPSTAT_AUTOCTS;
+
 	umcon = rd_regl(port, S3C2410_UMCON);
 	if (termios->c_cflag & CRTSCTS) {
 		umcon |= S3C2410_UMCOM_AFC;
 		/* Disable RTS when RX FIFO contains 63 bytes */
 		umcon &= ~S3C2412_UMCON_AFC_8;
+		port->status = UPSTAT_AUTOCTS;
 	} else {
 		umcon &= ~S3C2410_UMCOM_AFC;
 	}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 80bb56f..f0b354b 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -130,6 +130,9 @@
 	struct uart_port *port;
 	unsigned long flags;
 
+	if (!state)
+		return;
+
 	port = uart_port_lock(state, flags);
 	__uart_start(tty);
 	uart_port_unlock(port, flags);
@@ -205,10 +208,15 @@
 	if (!state->xmit.buf) {
 		state->xmit.buf = (unsigned char *) page;
 		uart_circ_clear(&state->xmit);
+		uart_port_unlock(uport, flags);
 	} else {
+		uart_port_unlock(uport, flags);
+		/*
+		 * Do not free() the page under the port lock, see
+		 * uart_shutdown().
+		 */
 		free_page(page);
 	}
-	uart_port_unlock(uport, flags);
 
 	retval = uport->ops->startup(uport);
 	if (retval == 0) {
@@ -268,6 +276,7 @@
 	struct uart_port *uport = uart_port_check(state);
 	struct tty_port *port = &state->port;
 	unsigned long flags = 0;
+	char *xmit_buf = NULL;
 
 	/*
 	 * Set the TTY IO error marker
@@ -298,14 +307,18 @@
 	tty_port_set_suspended(port, 0);
 
 	/*
-	 * Free the transmit buffer page.
+	 * Do not free() the transmit buffer page under the port lock since
+	 * this can create various circular locking scenarios. For instance,
+	 * console driver may need to allocate/free a debug object, which
+	 * can endup in printk() recursion.
 	 */
 	uart_port_lock(state, flags);
-	if (state->xmit.buf) {
-		free_page((unsigned long)state->xmit.buf);
-		state->xmit.buf = NULL;
-	}
+	xmit_buf = state->xmit.buf;
+	state->xmit.buf = NULL;
 	uart_port_unlock(uport, flags);
+
+	if (xmit_buf)
+		free_page((unsigned long)xmit_buf);
 }
 
 /**
@@ -540,10 +553,12 @@
 	int ret = 0;
 
 	circ = &state->xmit;
-	if (!circ->buf)
-		return 0;
-
 	port = uart_port_lock(state, flags);
+	if (!circ->buf) {
+		uart_port_unlock(port, flags);
+		return 0;
+	}
+
 	if (port && uart_circ_chars_free(circ) != 0) {
 		circ->buf[circ->head] = c;
 		circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
@@ -576,11 +591,13 @@
 		return -EL3HLT;
 	}
 
-	circ = &state->xmit;
-	if (!circ->buf)
-		return 0;
-
 	port = uart_port_lock(state, flags);
+	circ = &state->xmit;
+	if (!circ->buf) {
+		uart_port_unlock(port, flags);
+		return 0;
+	}
+
 	while (port) {
 		c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
 		if (count < c)
@@ -713,6 +730,9 @@
 	upstat_t mask = UPSTAT_SYNC_FIFO;
 	struct uart_port *port;
 
+	if (!state)
+		return;
+
 	port = uart_port_ref(state);
 	if (!port)
 		return;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index effba6c..859b173 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1331,7 +1331,7 @@
 	dma_release_channel(chan);
 }
 
-static void sci_submit_rx(struct sci_port *s)
+static int sci_submit_rx(struct sci_port *s, bool port_lock_held)
 {
 	struct dma_chan *chan = s->chan_rx;
 	struct uart_port *port = &s->port;
@@ -1359,19 +1359,22 @@
 	s->active_rx = s->cookie_rx[0];
 
 	dma_async_issue_pending(chan);
-	return;
+	return 0;
 
 fail:
+	/* Switch to PIO */
+	if (!port_lock_held)
+		spin_lock_irqsave(&port->lock, flags);
 	if (i)
 		dmaengine_terminate_async(chan);
 	for (i = 0; i < 2; i++)
 		s->cookie_rx[i] = -EINVAL;
 	s->active_rx = -EINVAL;
-	/* Switch to PIO */
-	spin_lock_irqsave(&port->lock, flags);
 	s->chan_rx = NULL;
 	sci_start_rx(port);
-	spin_unlock_irqrestore(&port->lock, flags);
+	if (!port_lock_held)
+		spin_unlock_irqrestore(&port->lock, flags);
+	return -EAGAIN;
 }
 
 static void work_fn_tx(struct work_struct *work)
@@ -1491,7 +1494,7 @@
 	}
 
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
-		sci_submit_rx(s);
+		sci_submit_rx(s, true);
 
 	/* Direct new serial port interrupts back to CPU */
 	scr = serial_port_in(port, SCSCR);
@@ -1617,7 +1620,7 @@
 		s->chan_rx_saved = s->chan_rx = chan;
 
 		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
-			sci_submit_rx(s);
+			sci_submit_rx(s, false);
 	}
 }
 
@@ -1666,8 +1669,10 @@
 			disable_irq_nosync(irq);
 			scr |= SCSCR_RDRQE;
 		} else {
+			if (sci_submit_rx(s, false) < 0)
+				goto handle_pio;
+
 			scr &= ~SCSCR_RIE;
-			sci_submit_rx(s);
 		}
 		serial_port_out(port, SCSCR, scr);
 		/* Clear current interrupt */
@@ -1679,6 +1684,8 @@
 
 		return IRQ_HANDLED;
 	}
+
+handle_pio:
 #endif
 
 	if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) {
@@ -1914,7 +1921,7 @@
 
 static void sci_free_irq(struct sci_port *port)
 {
-	int i;
+	int i, j;
 
 	/*
 	 * Intentionally in reverse order so we iterate over the muxed
@@ -1930,6 +1937,13 @@
 		if (unlikely(irq < 0))
 			continue;
 
+		/* Check if already freed (irq was muxed) */
+		for (j = 0; j < i; j++)
+			if (port->irqs[j] == irq)
+				j = i + 1;
+		if (j > i)
+			continue;
+
 		free_irq(port->irqs[i], port);
 		kfree(port->irqstr[i]);
 
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 6cf3e9b..3e774756 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1394,22 +1394,43 @@
 static enum su_type su_get_type(struct device_node *dp)
 {
 	struct device_node *ap = of_find_node_by_path("/aliases");
+	enum su_type rc = SU_PORT_PORT;
 
 	if (ap) {
 		const char *keyb = of_get_property(ap, "keyboard", NULL);
 		const char *ms = of_get_property(ap, "mouse", NULL);
+		struct device_node *match;
 
 		if (keyb) {
-			if (dp == of_find_node_by_path(keyb))
-				return SU_PORT_KBD;
+			match = of_find_node_by_path(keyb);
+
+			/*
+			 * The pointer is used as an identifier not
+			 * as a pointer, we can drop the refcount on
+			 * the of__node immediately after getting it.
+			 */
+			of_node_put(match);
+
+			if (dp == match) {
+				rc = SU_PORT_KBD;
+				goto out;
+			}
 		}
 		if (ms) {
-			if (dp == of_find_node_by_path(ms))
-				return SU_PORT_MS;
+			match = of_find_node_by_path(ms);
+
+			of_node_put(match);
+
+			if (dp == match) {
+				rc = SU_PORT_MS;
+				goto out;
+			}
 		}
 	}
 
-	return SU_PORT_PORT;
+out:
+	of_node_put(ap);
+	return rc;
 }
 
 static int su_probe(struct platform_device *op)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index a48f19b..87d8dd9 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -125,7 +125,7 @@
 #define CDNS_UART_IXR_RXTRIG	0x00000001 /* RX FIFO trigger interrupt */
 #define CDNS_UART_IXR_RXFULL	0x00000004 /* RX FIFO full interrupt. */
 #define CDNS_UART_IXR_RXEMPTY	0x00000002 /* RX FIFO empty interrupt. */
-#define CDNS_UART_IXR_MASK	0x00001FFF /* Valid bit mask */
+#define CDNS_UART_IXR_RXMASK	0x000021e7 /* Valid RX bit mask */
 
 	/*
 	 * Do not enable parity error interrupt for the following
@@ -362,7 +362,7 @@
 		cdns_uart_handle_tx(dev_id);
 		isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
 	}
-	if (isrstatus & CDNS_UART_IXR_MASK)
+	if (isrstatus & CDNS_UART_IXR_RXMASK)
 		cdns_uart_handle_rx(dev_id, isrstatus);
 
 	spin_unlock(&port->lock);
@@ -1608,6 +1608,7 @@
 		.name = CDNS_UART_NAME,
 		.of_match_table = cdns_uart_of_match,
 		.pm = &cdns_uart_dev_pm_ops,
+		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART),
 		},
 };
 
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index d6f42b5..e7d192e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1255,7 +1255,8 @@
 static int tty_reopen(struct tty_struct *tty)
 {
 	struct tty_driver *driver = tty->driver;
-	int retval;
+	struct tty_ldisc *ld;
+	int retval = 0;
 
 	if (driver->type == TTY_DRIVER_TYPE_PTY &&
 	    driver->subtype == PTY_TYPE_MASTER)
@@ -1267,14 +1268,21 @@
 	if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
 		return -EBUSY;
 
-	tty->count++;
+	ld = tty_ldisc_ref_wait(tty);
+	if (ld) {
+		tty_ldisc_deref(ld);
+	} else {
+		retval = tty_ldisc_lock(tty, 5 * HZ);
+		if (retval)
+			return retval;
 
-	if (tty->ldisc)
-		return 0;
+		if (!tty->ldisc)
+			retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+		tty_ldisc_unlock(tty);
+	}
 
-	retval = tty_ldisc_reinit(tty, tty->termios.c_line);
-	if (retval)
-		tty->count--;
+	if (retval == 0)
+		tty->count++;
 
 	return retval;
 }
@@ -2180,7 +2188,8 @@
 	ld = tty_ldisc_ref_wait(tty);
 	if (!ld)
 		return -EIO;
-	ld->ops->receive_buf(tty, &ch, &mbz, 1);
+	if (ld->ops->receive_buf)
+		ld->ops->receive_buf(tty, &ch, &mbz, 1);
 	tty_ldisc_deref(ld);
 	return 0;
 }
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 0c98d88..b989ca2 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -293,6 +293,16 @@
 	if (!locked)
 		atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
 	list_del(&waiter.list);
+
+	/*
+	 * In case of timeout, wake up every reader who gave the right of way
+	 * to writer. Prevent separation readers into two groups:
+	 * one that helds semaphore and another that sleeps.
+	 * (in case of no contention with a writer)
+	 */
+	if (!locked && list_empty(&sem->write_wait))
+		__ldsem_wake_readers(sem);
+
 	raw_spin_unlock_irq(&sem->wait_lock);
 
 	__set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 476ec4b1..da33589 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1275,6 +1275,7 @@
 	if (con_is_visible(vc))
 		update_screen(vc);
 	vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
+	notify_update(vc);
 	return err;
 }
 
@@ -2767,8 +2768,8 @@
 	con_flush(vc, draw_from, draw_to, &draw_x);
 	vc_uniscr_debug_check(vc);
 	console_conditional_schedule();
-	console_unlock();
 	notify_update(vc);
+	console_unlock();
 	return n;
 }
 
@@ -2887,8 +2888,7 @@
 	unsigned char c;
 	static DEFINE_SPINLOCK(printing_lock);
 	const ushort *start;
-	ushort cnt = 0;
-	ushort myx;
+	ushort start_x, cnt;
 	int kmsg_console;
 
 	/* console busy or not yet initialized */
@@ -2901,10 +2901,6 @@
 	if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
 		vc = vc_cons[kmsg_console - 1].d;
 
-	/* read `x' only after setting currcons properly (otherwise
-	   the `x' macro will read the x of the foreground console). */
-	myx = vc->vc_x;
-
 	if (!vc_cons_allocated(fg_console)) {
 		/* impossible */
 		/* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
@@ -2919,53 +2915,41 @@
 		hide_cursor(vc);
 
 	start = (ushort *)vc->vc_pos;
-
-	/* Contrived structure to try to emulate original need_wrap behaviour
-	 * Problems caused when we have need_wrap set on '\n' character */
+	start_x = vc->vc_x;
+	cnt = 0;
 	while (count--) {
 		c = *b++;
 		if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
-			if (cnt > 0) {
-				if (con_is_visible(vc))
-					vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
-				vc->vc_x += cnt;
-				if (vc->vc_need_wrap)
-					vc->vc_x--;
-				cnt = 0;
-			}
+			if (cnt && con_is_visible(vc))
+				vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
+			cnt = 0;
 			if (c == 8) {		/* backspace */
 				bs(vc);
 				start = (ushort *)vc->vc_pos;
-				myx = vc->vc_x;
+				start_x = vc->vc_x;
 				continue;
 			}
 			if (c != 13)
 				lf(vc);
 			cr(vc);
 			start = (ushort *)vc->vc_pos;
-			myx = vc->vc_x;
+			start_x = vc->vc_x;
 			if (c == 10 || c == 13)
 				continue;
 		}
+		vc_uniscr_putc(vc, c);
 		scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
 		notify_write(vc, c);
 		cnt++;
-		if (myx == vc->vc_cols - 1) {
+		if (vc->vc_x == vc->vc_cols - 1) {
 			vc->vc_need_wrap = 1;
-			continue;
-		}
-		vc->vc_pos += 2;
-		myx++;
-	}
-	if (cnt > 0) {
-		if (con_is_visible(vc))
-			vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
-		vc->vc_x += cnt;
-		if (vc->vc_x == vc->vc_cols) {
-			vc->vc_x--;
-			vc->vc_need_wrap = 1;
+		} else {
+			vc->vc_pos += 2;
+			vc->vc_x++;
 		}
 	}
+	if (cnt && con_is_visible(vc))
+		vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
 	set_cursor(vc);
 	notify_update(vc);
 
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 953dff4..3f2aa75 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -207,8 +207,4 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called ulpi.
 
-config USB_ROLE_SWITCH
-	tristate
-	select USB_COMMON
-
 endif # USB_SUPPORT
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 40c64c7..08b8aa5 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -581,6 +581,13 @@
 	if (retval)
 		goto error_init_termios;
 
+	/*
+	 * Suppress initial echoing for some devices which might send data
+	 * immediately after acm driver has been installed.
+	 */
+	if (acm->quirks & DISABLE_ECHO)
+		tty->termios.c_lflag &= ~ECHO;
+
 	tty->driver_data = acm;
 
 	return 0;
@@ -1672,6 +1679,9 @@
 	{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
 	},
+	{ USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
+	.driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
+	},
 	{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
 	},
@@ -1870,6 +1880,13 @@
 	.driver_info = IGNORE_DEVICE,
 	},
 
+	{ USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
+	.driver_info = SEND_ZERO_PACKET,
+	},
+	{ USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
+	.driver_info = SEND_ZERO_PACKET,
+	},
+
 	/* control interfaces without any protocol set */
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
 		USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ca06b20..515aad0 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -140,3 +140,4 @@
 #define QUIRK_CONTROL_LINE_STATE	BIT(6)
 #define CLEAR_HALT_CONDITIONS		BIT(7)
 #define SEND_ZERO_PACKET		BIT(8)
+#define DISABLE_ECHO			BIT(9)
diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
index fb4d5ef..0a7c45e 100644
--- a/drivers/usb/common/Makefile
+++ b/drivers/usb/common/Makefile
@@ -9,4 +9,3 @@
 
 obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o
 obj-$(CONFIG_USB_ULPI_BUS)	+= ulpi.o
-obj-$(CONFIG_USB_ROLE_SWITCH)	+= roles.o
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 2ee1111..0bd1fc3 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1122,6 +1122,16 @@
 						   USB_PORT_FEAT_ENABLE);
 		}
 
+		/*
+		 * Add debounce if USB3 link is in polling/link training state.
+		 * Link will automatically transition to Enabled state after
+		 * link training completes.
+		 */
+		if (hub_is_superspeed(hdev) &&
+		    ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+						USB_SS_PORT_LS_POLLING))
+			need_debounce_delay = true;
+
 		/* Clear status-change flags; we'll debounce later */
 		if (portchange & USB_PORT_STAT_C_CONNECTION) {
 			need_debounce_delay = true;
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index dc7f7fd..c12ac56 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -119,11 +119,6 @@
 	.attrs = ports_attrs,
 };
 
-static const struct attribute_group *ports_groups[] = {
-	&ports_group,
-	NULL
-};
-
 /***************************************
  * Adding & removing ports
  ***************************************/
@@ -307,6 +302,7 @@
 static int usbport_trig_activate(struct led_classdev *led_cdev)
 {
 	struct usbport_trig_data *usbport_data;
+	int err;
 
 	usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
 	if (!usbport_data)
@@ -315,6 +311,9 @@
 
 	/* List of ports */
 	INIT_LIST_HEAD(&usbport_data->ports);
+	err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
+	if (err)
+		goto err_free;
 	usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
 	usbport_trig_update_count(usbport_data);
 
@@ -322,8 +321,11 @@
 	usbport_data->nb.notifier_call = usbport_trig_notify;
 	led_set_trigger_data(led_cdev, usbport_data);
 	usb_register_notify(&usbport_data->nb);
-
 	return 0;
+
+err_free:
+	kfree(usbport_data);
+	return err;
 }
 
 static void usbport_trig_deactivate(struct led_classdev *led_cdev)
@@ -335,6 +337,8 @@
 		usbport_trig_remove_port(usbport_data, port);
 	}
 
+	sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
+
 	usb_unregister_notify(&usbport_data->nb);
 
 	kfree(usbport_data);
@@ -344,7 +348,6 @@
 	.name     = "usbport",
 	.activate = usbport_trig_activate,
 	.deactivate = usbport_trig_deactivate,
-	.groups = ports_groups,
 };
 
 static int __init usbport_trig_init(void)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 514c521..8bc35d5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -394,7 +394,8 @@
 	{ USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
 
 	/* Corsair K70 RGB */
-	{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+	{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
+	  USB_QUIRK_DELAY_CTRL_MSG },
 
 	/* Corsair Strafe */
 	{ USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 3f9bccc..c089ffa 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -366,7 +366,7 @@
 	u32 desc_list_sz;
 	u32 *n_bytes;
 	struct timer_list unreserve_timer;
-	struct timer_list wait_timer;
+	struct hrtimer wait_timer;
 	struct dwc2_tt *dwc_tt;
 	int ttport;
 	unsigned tt_buffer_dirty:1;
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 40839591..ea3aa64 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -59,7 +59,7 @@
 #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
 
 /* If we get a NAK, wait this long before retrying */
-#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1))
+#define DWC2_RETRY_WAIT_DELAY 1*1E6L
 
 /**
  * dwc2_periodic_channel_available() - Checks that a channel is available for a
@@ -1464,10 +1464,12 @@
  * qh back to the "inactive" list, then queues transactions.
  *
  * @t: Pointer to wait_timer in a qh.
+ *
+ * Return: HRTIMER_NORESTART to not automatically restart this timer.
  */
-static void dwc2_wait_timer_fn(struct timer_list *t)
+static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
 {
-	struct dwc2_qh *qh = from_timer(qh, t, wait_timer);
+	struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
 	struct dwc2_hsotg *hsotg = qh->hsotg;
 	unsigned long flags;
 
@@ -1491,6 +1493,7 @@
 	}
 
 	spin_unlock_irqrestore(&hsotg->lock, flags);
+	return HRTIMER_NORESTART;
 }
 
 /**
@@ -1521,7 +1524,8 @@
 	/* Initialize QH */
 	qh->hsotg = hsotg;
 	timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
-	timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0);
+	hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	qh->wait_timer.function = &dwc2_wait_timer_fn;
 	qh->ep_type = ep_type;
 	qh->ep_is_in = ep_is_in;
 
@@ -1690,7 +1694,7 @@
 	 * won't do anything anyway, but we want it to finish before we free
 	 * memory.
 	 */
-	del_timer_sync(&qh->wait_timer);
+	hrtimer_cancel(&qh->wait_timer);
 
 	dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
 
@@ -1716,6 +1720,7 @@
 {
 	int status;
 	u32 intr_mask;
+	ktime_t delay;
 
 	if (dbg_qh(qh))
 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
@@ -1734,8 +1739,8 @@
 			list_add_tail(&qh->qh_list_entry,
 				      &hsotg->non_periodic_sched_waiting);
 			qh->wait_timer_cancel = false;
-			mod_timer(&qh->wait_timer,
-				  jiffies + DWC2_RETRY_WAIT_DELAY + 1);
+			delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
+			hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
 		} else {
 			list_add_tail(&qh->qh_list_entry,
 				      &hsotg->non_periodic_sched_inactive);
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index bf7052e..dff2c6e 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -71,6 +71,13 @@
 	p->power_down = false;
 }
 
+static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
+{
+	struct dwc2_core_params *p = &hsotg->params;
+
+	p->power_down = 0;
+}
+
 static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
 {
 	struct dwc2_core_params *p = &hsotg->params;
@@ -110,6 +117,7 @@
 	p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
 		GAHBCFG_HBSTLEN_SHIFT;
+	p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
 }
 
 static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
@@ -150,7 +158,8 @@
 	{ .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params },
 	{ .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
 	{ .compatible = "snps,dwc2" },
-	{ .compatible = "samsung,s3c6400-hsotg" },
+	{ .compatible = "samsung,s3c6400-hsotg",
+	  .data = dwc2_set_s3c6400_params },
 	{ .compatible = "amlogic,meson8-usb",
 	  .data = dwc2_set_amlogic_params },
 	{ .compatible = "amlogic,meson8b-usb",
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index b2f36af..92e80bc 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -188,18 +188,41 @@
 	BUS_VOTE_MAX
 };
 
-struct usb_irq {
-	char *name;
-	int irq;
-	bool enable;
+struct usb_irq_info {
+	const char	*name;
+	unsigned long	irq_type;
+	bool		required;
 };
 
-static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
-	{"hs_phy_irq", 0},
-	{"pwr_event_irq", 0},
-	{"dp_hs_phy_irq", 0},
-	{"dm_hs_phy_irq", 0},
-	{"ss_phy_irq", 0},
+static const struct usb_irq_info usb_irq_info[USB_MAX_IRQ] = {
+	{ "hs_phy_irq",
+	  IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQ_TYPE_LEVEL_HIGH |
+		 IRQF_EARLY_RESUME,
+	  false,
+	},
+	{ "pwr_event_irq",
+	  IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQ_TYPE_LEVEL_HIGH |
+		 IRQF_EARLY_RESUME,
+	  true,
+	},
+	{ "dp_hs_phy_irq",
+	  IRQF_TRIGGER_RISING | IRQF_ONESHOT | IRQF_EARLY_RESUME,
+	  false,
+	},
+	{ "dm_hs_phy_irq",
+	  IRQF_TRIGGER_RISING | IRQF_ONESHOT | IRQF_EARLY_RESUME,
+	  false,
+	},
+	{ "ss_phy_irq",
+	  IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQ_TYPE_LEVEL_HIGH |
+		 IRQF_EARLY_RESUME,
+	  false,
+	},
+};
+
+struct usb_irq {
+	int irq;
+	bool enable;
 };
 
 static const char * const gsi_op_strings[] = {
@@ -2441,11 +2464,6 @@
 		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
 		dwc3_msm_config_gdsc(mdwc, 0);
 		clk_disable_unprepare(mdwc->sleep_clk);
-
-		if (mdwc->iommu_map) {
-			__depr_arm_iommu_detach_device(mdwc->dev);
-			dev_dbg(mdwc->dev, "IOMMU detached\n");
-		}
 	}
 
 	dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_NONE);
@@ -2602,16 +2620,6 @@
 
 	/* Recover from controller power collapse */
 	if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
-		if (mdwc->iommu_map) {
-			ret = __depr_arm_iommu_attach_device(mdwc->dev,
-					mdwc->iommu_map);
-			if (ret)
-				dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
-						ret);
-			else
-				dev_dbg(mdwc->dev, "attached to IOMMU\n");
-		}
-
 		dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
 
 		dwc3_msm_power_collapse_por(mdwc);
@@ -3157,60 +3165,6 @@
 	return 0;
 }
 
-#define SMMU_BASE	0x90000000 /* Device address range base */
-#define SMMU_SIZE	0x60000000 /* Device address range size */
-
-static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
-{
-	struct device_node *node = mdwc->dev->of_node;
-	int atomic_ctx = 1, s1_bypass;
-	int ret;
-
-	if (!of_property_read_bool(node, "iommus"))
-		return 0;
-
-	mdwc->iommu_map = __depr_arm_iommu_create_mapping(&platform_bus_type,
-			SMMU_BASE, SMMU_SIZE);
-	if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
-		ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
-		dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
-				ret);
-		return ret;
-	}
-	dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
-
-	ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
-			&atomic_ctx);
-	if (ret) {
-		dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
-			ret);
-		goto release_mapping;
-	}
-
-	s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
-	ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
-			DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
-	if (ret) {
-		dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
-			s1_bypass, ret);
-		goto release_mapping;
-	}
-
-	ret = __depr_arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
-	if (ret) {
-		dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
-		goto release_mapping;
-	}
-	dev_dbg(mdwc->dev, "attached to IOMMU\n");
-
-	return 0;
-
-release_mapping:
-	__depr_arm_iommu_release_mapping(mdwc->iommu_map);
-	mdwc->iommu_map = NULL;
-	return ret;
-}
-
 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
@@ -3416,7 +3370,6 @@
 	struct resource *res;
 	int ret = 0, size = 0, i;
 	u32 val;
-	unsigned long irq_type;
 
 	mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
 	if (!mdwc)
@@ -3471,18 +3424,14 @@
 		mdwc->lpm_to_suspend_delay = 0;
 	}
 
-	memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
 	for (i = 0; i < USB_MAX_IRQ; i++) {
-		irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
-			IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
 		mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
-					mdwc->wakeup_irq[i].name);
+					usb_irq_info[i].name);
 		if (mdwc->wakeup_irq[i].irq < 0) {
 			/* pwr_evnt_irq is only mandatory irq */
-			if (!strcmp(mdwc->wakeup_irq[i].name,
-						"pwr_event_irq")) {
+			if (usb_irq_info[i].required) {
 				dev_err(&pdev->dev, "get_irq for %s failed\n\n",
-						mdwc->wakeup_irq[i].name);
+						usb_irq_info[i].name);
 				ret = -EINVAL;
 				goto err;
 			}
@@ -3490,15 +3439,16 @@
 		} else {
 			irq_set_status_flags(mdwc->wakeup_irq[i].irq,
 						IRQ_NOAUTOEN);
+
 			ret = devm_request_threaded_irq(&pdev->dev,
 					mdwc->wakeup_irq[i].irq,
 					msm_dwc3_pwr_irq,
 					msm_dwc3_pwr_irq_thread,
-					irq_type,
-					mdwc->wakeup_irq[i].name, mdwc);
+					usb_irq_info[i].irq_type,
+					usb_irq_info[i].name, mdwc);
 			if (ret) {
 				dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
-						mdwc->wakeup_irq[i].name, ret);
+						usb_irq_info[i].name, ret);
 				goto err;
 			}
 		}
@@ -3601,16 +3551,12 @@
 				"qcom,use-pdc-interrupts");
 	dwc3_set_notifier(&dwc3_msm_notify_event);
 
-	ret = dwc3_msm_init_iommu(mdwc);
-	if (ret)
-		goto err;
-
 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
 		dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
 		if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
 			dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
 			ret = -EOPNOTSUPP;
-			goto uninit_iommu;
+			goto err;
 		}
 	}
 
@@ -3619,7 +3565,7 @@
 	if (!dwc3_node) {
 		dev_err(&pdev->dev, "failed to find dwc3 child\n");
 		ret = -ENODEV;
-		goto uninit_iommu;
+		goto err;
 	}
 
 	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
@@ -3627,7 +3573,7 @@
 		dev_err(&pdev->dev,
 				"failed to add create dwc3 core\n");
 		of_node_put(dwc3_node);
-		goto uninit_iommu;
+		goto err;
 	}
 
 	mdwc->dwc3 = of_find_device_by_node(dwc3_node);
@@ -3671,11 +3617,6 @@
 	mdwc->use_pwr_event_for_wakeup = dwc->maximum_speed >= USB_SPEED_SUPER
 					&& !mdwc->wakeup_irq[SS_PHY_IRQ].irq;
 
-
-	/* IOMMU will be reattached upon each resume/connect */
-	if (mdwc->iommu_map)
-		__depr_arm_iommu_detach_device(mdwc->dev);
-
 	/*
 	 * Clocks and regulators will not be turned on until the first time
 	 * runtime PM resume is called. This is to allow for booting up with
@@ -3760,11 +3701,6 @@
 	if (mdwc->bus_perf_client)
 		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
 
-uninit_iommu:
-	if (mdwc->iommu_map) {
-		__depr_arm_iommu_detach_device(mdwc->dev);
-		__depr_arm_iommu_release_mapping(mdwc->iommu_map);
-	}
 	of_platform_depopulate(&pdev->dev);
 err:
 	destroy_workqueue(mdwc->sm_usb_wq);
@@ -3849,12 +3785,6 @@
 
 	dwc3_msm_config_gdsc(mdwc, 0);
 
-	if (mdwc->iommu_map) {
-		if (!atomic_read(&dwc->in_lpm))
-			__depr_arm_iommu_detach_device(mdwc->dev);
-		__depr_arm_iommu_release_mapping(mdwc->iommu_map);
-	}
-
 	destroy_workqueue(mdwc->sm_usb_wq);
 	destroy_workqueue(mdwc->dwc3_wq);
 
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 8427958..fdc6e4e 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -170,20 +170,20 @@
 			 * put the gpio descriptors again here because the phy driver
 			 * might want to grab them, too.
 			 */
-			gpio = devm_gpiod_get_optional(&pdev->dev, "cs",
-						       GPIOD_OUT_LOW);
+			gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
 			if (IS_ERR(gpio))
 				return PTR_ERR(gpio);
 
 			gpiod_set_value_cansleep(gpio, 1);
+			gpiod_put(gpio);
 
-			gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
-						       GPIOD_OUT_LOW);
+			gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
 			if (IS_ERR(gpio))
 				return PTR_ERR(gpio);
 
 			if (gpio) {
 				gpiod_set_value_cansleep(gpio, 1);
+				gpiod_put(gpio);
 				usleep_range(10000, 11000);
 			}
 		}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e12e187..b57591c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1029,6 +1029,9 @@
 {
 	u8 tmp = index;
 
+	if (!dep->trb_pool)
+		return NULL;
+
 	if (!tmp)
 		tmp = DWC3_TRB_NUM - 1;
 
@@ -1072,8 +1075,6 @@
 	struct usb_gadget	*gadget = &dwc->gadget;
 	enum usb_device_speed	speed = gadget->speed;
 
-	dwc3_ep_inc_enq(dep);
-
 	trb->size = DWC3_TRB_SIZE_LENGTH(length);
 	trb->bpl = lower_32_bits(dma);
 	trb->bph = upper_32_bits(dma);
@@ -1143,16 +1144,20 @@
 				usb_endpoint_type(dep->endpoint.desc));
 	}
 
-	/* always enable Continue on Short Packet */
+	/*
+	 * Enable Continue on Short Packet
+	 * when endpoint is not a stream capable
+	 */
 	if (usb_endpoint_dir_out(dep->endpoint.desc)) {
-		trb->ctrl |= DWC3_TRB_CTRL_CSP;
+		if (!dep->stream_capable)
+			trb->ctrl |= DWC3_TRB_CTRL_CSP;
 
 		if (short_not_ok)
 			trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
 	}
 
 	if ((!no_interrupt && !chain) ||
-			(dwc3_calc_trbs_left(dep) == 0))
+			(dwc3_calc_trbs_left(dep) == 1))
 		trb->ctrl |= DWC3_TRB_CTRL_IOC;
 
 	if (chain)
@@ -1163,6 +1168,8 @@
 
 	trb->ctrl |= DWC3_TRB_CTRL_HWO;
 
+	dwc3_ep_inc_enq(dep);
+
 	trace_dwc3_prepare_trb(dep, trb);
 }
 
@@ -1266,7 +1273,7 @@
 	unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
 	unsigned int rem = length % maxp;
 
-	if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) {
+	if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
 		struct dwc3	*dwc = dep->dwc;
 		struct dwc3_trb	*trb;
 
@@ -1696,7 +1703,11 @@
 		else
 			trb = &dwc->ep0_trb[dep->trb_enqueue];
 
-		transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
+		if (trb)
+			transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
+		else
+			transfer_in_flight = false;
+
 		started = !list_empty(&dep->started_list);
 
 		if (!protocol && ((dep->direction && transfer_in_flight) ||
@@ -2199,10 +2210,8 @@
 
 		ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
 				msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
-		if (ret == 0) {
+		if (ret == 0)
 			dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
-			return -ETIMEDOUT;
-		}
 	}
 
 	spin_lock_irqsave(&dwc->lock, flags);
@@ -2412,6 +2421,7 @@
 
 	/* begin to receive SETUP packets */
 	dwc->ep0state = EP0_SETUP_PHASE;
+	dwc->link_state = DWC3_LINK_STATE_SS_DIS;
 	dwc3_ep0_out_start(dwc);
 
 	dwc3_gadget_enable_irq(dwc);
@@ -3952,6 +3962,8 @@
 	dwc3_disconnect_gadget(dwc);
 	__dwc3_gadget_stop(dwc);
 
+	synchronize_irq(dwc->irq_gadget);
+
 	return 0;
 }
 
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index f22714c..f27c5cb 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -251,9 +251,11 @@
 				s = "2x ";
 				break;
 			case 3:
+			default:
 				s = "3x ";
 				break;
 			}
+			break;
 		default:
 			s = "";
 		} s; }),
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 06ebb88..d3d4c98 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -241,6 +241,12 @@
 config USB_F_GSI
 	tristate
 
+config USB_F_MTP
+	tristate
+
+config USB_F_PTP
+	tristate
+
 # this first set of drivers all depend on bulk-capable hardware.
 
 config USB_CONFIGFS
@@ -588,6 +594,27 @@
 	  related functionalities using GSI hardware accelerated data
 	  path and control path.
 
+config USB_CONFIGFS_F_MTP
+	bool "MTP gadget"
+	select USB_F_MTP
+	depends on USB_CONFIGFS
+	help
+	  The Media Transfer Protocol (MTP) function mounts USB gadget
+	  as a media device but unlike Mass Storage Gadget, MTP operates
+	  at the file level. Thus exposing the relevant content but hiding
+	  the system/restricted files.
+
+config USB_CONFIGFS_F_PTP
+	bool "PTP gadget"
+	select USB_F_PTP
+	depends on USB_CONFIGFS && USB_CONFIGFS_F_MTP
+	help
+	  The Picture Transfer Protocol (PTP) function driver is a wrapper
+	  around MTP function driver. This function driver mounts USB gadget
+	  as a media device but unlike Mass Storage Gadget, PTP operates
+	  at the file level. Thus exposing the relevant content but hiding
+	  the system/restricted files.
+
 choice
 	tristate "USB Gadget precomposed configurations"
 	default USB_ETH
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 382dd21..730ba2b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -146,6 +146,7 @@
 			struct usb_function *f,
 			struct usb_ep *_ep)
 {
+	struct usb_composite_dev *cdev;
 	struct usb_endpoint_descriptor *chosen_desc = NULL;
 	struct usb_descriptor_header **speed_desc = NULL;
 
@@ -157,6 +158,8 @@
 	if (!g || !f || !_ep)
 		return -EIO;
 
+	cdev = get_gadget_data(g);
+
 	/* select desired speed */
 	switch (g->speed) {
 	case USB_SPEED_SUPER_PLUS:
@@ -182,6 +185,13 @@
 	default:
 		speed_desc = f->fs_descriptors;
 	}
+
+	if (!speed_desc) {
+		DBG(cdev, "%s desc not present for function %s\n",
+			usb_speed_string(g->speed), f->name);
+		return -EIO;
+	}
+
 	/* find descriptors */
 	for_each_ep_desc(speed_desc, d_spd) {
 		chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
@@ -225,12 +235,9 @@
 			_ep->maxburst = comp_desc->bMaxBurst + 1;
 			break;
 		default:
-			if (comp_desc->bMaxBurst != 0) {
-				struct usb_composite_dev *cdev;
-
-				cdev = get_gadget_data(g);
+			if (comp_desc->bMaxBurst != 0)
 				ERROR(cdev, "ep0 bMaxBurst must be 0\n");
-			}
+
 			_ep->maxburst = 1;
 			break;
 		}
@@ -954,6 +961,9 @@
 
 	/* when we return, be sure our power usage is valid */
 	power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
+	if (gadget->speed < USB_SPEED_SUPER)
+		power = min(power, 500U);
+
 done:
 	usb_gadget_vbus_draw(gadget, power);
 	if (result >= 0 && cdev->delayed_status)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index d1b0725..3309c1f 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -15,11 +15,16 @@
 #include <linux/kdev_t.h>
 #include <linux/usb/ch9.h>
 
+#ifdef CONFIG_USB_F_NCM
+#include <function/u_ncm.h>
+#endif
+
 #ifdef CONFIG_USB_CONFIGFS_F_ACC
 extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
 				const struct usb_ctrlrequest *ctrl);
 void acc_disconnect(void);
 #endif
+
 static struct class *android_class;
 static struct device *android_device;
 static int index;
@@ -1508,6 +1513,18 @@
 		}
 	}
 
+#ifdef CONFIG_USB_F_NCM
+	if (value < 0)
+		value = ncm_ctrlrequest(cdev, c);
+
+	/*
+	 * for mirror link command case, if it already been handled,
+	 * do not pass to composite_setup
+	 */
+	if (value == 0)
+		return value;
+#endif
+
 #ifdef CONFIG_USB_CONFIGFS_F_ACC
 	if (value < 0)
 		value = acc_ctrlrequest(cdev, c);
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 26283a9..7d81abb 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -65,3 +65,7 @@
 obj-$(CONFIG_USB_F_QDSS)	+= usb_f_qdss.o
 usb_f_gsi-y			:= f_gsi.o
 obj-$(CONFIG_USB_F_GSI)		+= usb_f_gsi.o
+usb_f_mtp-y			:= f_mtp.o
+obj-$(CONFIG_USB_F_MTP)		+= usb_f_mtp.o
+usb_f_ptp-y			:= f_ptp.o
+obj-$(CONFIG_USB_F_PTP)		+= usb_f_ptp.o
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 6be5525..586f79e 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -658,16 +658,16 @@
 	}
 
 	while (count > 0) {
-		if (!dev->online) {
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+		if (!dev->online || !dev->disconnected) {
 			pr_debug("acc_write dev->error\n");
 			r = -EIO;
 			break;
 		}
 
-		/* get an idle tx request to use */
-		req = 0;
-		ret = wait_event_interruptible(dev->write_wq,
-			((req = req_get(dev, &dev->tx_idle)) || !dev->online));
 		if (!req) {
 			r = ret;
 			break;
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 3c9747b..6060761 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -29,6 +29,7 @@
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/debugfs.h>
 #include <linux/cdev.h>
 #include <linux/spinlock.h>
 #include <linux/usb/gadget.h>
@@ -126,6 +127,11 @@
 	unsigned long		nbytes_to_host;
 	unsigned long           nbytes_to_port_bridge;
 	unsigned long		nbytes_from_port_bridge;
+
+	struct dentry		*debugfs_root;
+
+	/* To test remote wakeup using debugfs */
+	u8 debugfs_rw_enable;
 };
 
 struct f_cdev_opts {
@@ -147,6 +153,7 @@
 static void usb_cser_disconnect(struct f_cdev *port);
 static struct f_cdev *f_cdev_alloc(char *func_name, int portno);
 static void usb_cser_free_req(struct usb_ep *ep, struct usb_request *req);
+static void usb_cser_debugfs_exit(struct f_cdev *port);
 
 static struct usb_interface_descriptor cser_interface_desc = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
@@ -505,6 +512,32 @@
 	return rc;
 }
 
+static int usb_cser_func_suspend(struct usb_function *f, u8 options)
+{
+	bool func_wakeup_allowed;
+
+	func_wakeup_allowed =
+		((options & FUNC_SUSPEND_OPT_RW_EN_MASK) != 0);
+
+	f->func_wakeup_allowed = func_wakeup_allowed;
+	if (options & FUNC_SUSPEND_OPT_SUSP_MASK) {
+		if (!f->func_is_suspended)
+			f->func_is_suspended = true;
+	} else {
+		if (f->func_is_suspended)
+			f->func_is_suspended = false;
+	}
+	return 0;
+}
+
+static int usb_cser_get_status(struct usb_function *f)
+{
+	bool remote_wakeup_en_status = f->func_wakeup_allowed ? 1 : 0;
+
+	return (remote_wakeup_en_status << FUNC_WAKEUP_ENABLE_SHIFT) |
+		(1 << FUNC_WAKEUP_CAPABLE_SHIFT);
+}
+
 static void usb_cser_disable(struct usb_function *f)
 {
 	struct f_cdev	*port = func_to_port(f);
@@ -821,6 +854,7 @@
 	if (opts->port) {
 		device_destroy(fcdev_classp, MKDEV(major, opts->port->minor));
 		cdev_del(&opts->port->fcdev_cdev);
+		usb_cser_debugfs_exit(opts->port);
 	}
 	usb_cser_chardev_deinit();
 	kfree(opts->func_name);
@@ -1500,6 +1534,119 @@
 	.compat_ioctl = f_cdev_ioctl,
 };
 
+static ssize_t cser_rw_write(struct file *file, const char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct f_cdev *port = s->private;
+	u8 input;
+	struct cserial *cser;
+	struct usb_function *func;
+	struct usb_gadget *gadget;
+	int ret;
+
+	cser = &port->port_usb;
+	if (!cser) {
+		pr_err("cser is NULL\n");
+		return -EINVAL;
+	}
+
+	if (!port->is_connected) {
+		pr_debug("port disconnected\n");
+		return -ENODEV;
+	}
+
+	func = &cser->func;
+	if (!func) {
+		pr_err("func is NULL\n");
+		return -EINVAL;
+	}
+
+	if (ubuf == NULL) {
+		pr_debug("buffer is Null.\n");
+		goto err;
+	}
+
+	ret = kstrtou8_from_user(ubuf, count, 0, &input);
+	if (ret) {
+		pr_err("Invalid value. err:%d\n", ret);
+		goto err;
+	}
+
+	if (port->debugfs_rw_enable == !!input) {
+		if (!!input)
+			pr_debug("RW already enabled\n");
+		else
+			pr_debug("RW already disabled\n");
+		goto err;
+	}
+
+	port->debugfs_rw_enable = !!input;
+	if (port->debugfs_rw_enable) {
+		gadget = cser->func.config->cdev->gadget;
+		if (gadget->speed == USB_SPEED_SUPER &&
+			func->func_is_suspended) {
+			pr_debug("Calling usb_func_wakeup\n");
+			ret = usb_func_wakeup(func);
+		} else {
+			pr_debug("Calling usb_gadget_wakeup\n");
+			ret = usb_gadget_wakeup(gadget);
+		}
+
+		if ((ret == -EBUSY) || (ret == -EAGAIN))
+			pr_debug("RW delayed due to LPM exit.\n");
+		else if (ret)
+			pr_err("wakeup failed. ret=%d.\n", ret);
+	} else {
+		pr_debug("RW disabled.\n");
+	}
+err:
+	return count;
+}
+
+static int usb_cser_rw_show(struct seq_file *s, void *unused)
+{
+	struct f_cdev *port = s->private;
+
+	if (!port) {
+		pr_err("port is null\n");
+		return 0;
+	}
+
+	seq_printf(s, "%d\n", port->debugfs_rw_enable);
+
+	return 0;
+}
+
+static int debug_cdev_rw_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, usb_cser_rw_show, inode->i_private);
+}
+
+static const struct file_operations cser_rem_wakeup_fops = {
+	.open = debug_cdev_rw_open,
+	.read = seq_read,
+	.write = cser_rw_write,
+	.owner = THIS_MODULE,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static void usb_cser_debugfs_init(struct f_cdev *port)
+{
+	port->debugfs_root = debugfs_create_dir(port->name, NULL);
+	if (IS_ERR(port->debugfs_root))
+		return;
+
+	debugfs_create_file("remote_wakeup", 0600,
+			port->debugfs_root, port, &cser_rem_wakeup_fops);
+}
+
+static void usb_cser_debugfs_exit(struct f_cdev *port)
+{
+	debugfs_remove_recursive(port->debugfs_root);
+}
+
 static struct f_cdev *f_cdev_alloc(char *func_name, int portno)
 {
 	int ret;
@@ -1567,6 +1714,8 @@
 		goto err_create_dev;
 	}
 
+	usb_cser_debugfs_init(port);
+
 	pr_info("port_name:%s (%pK) portno:(%d)\n",
 			port->name, port, port->port_num);
 	return port;
@@ -1829,6 +1978,8 @@
 	port->port_usb.func.set_alt = usb_cser_set_alt;
 	port->port_usb.func.disable = usb_cser_disable;
 	port->port_usb.func.setup = usb_cser_setup;
+	port->port_usb.func.func_suspend = usb_cser_func_suspend;
+	port->port_usb.func.get_status = usb_cser_get_status;
 	port->port_usb.func.free_func = usb_cser_free_func;
 
 	return &port->port_usb.func;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 3137125..9b1224c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -38,6 +38,18 @@
 
 #define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */
 
+#define NUM_PAGES	10 /* # of pages for ipc logging */
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define ffs_log(fmt, ...) do { \
+	ipc_log_string(ffs->ipc_log, "%s: " fmt,  __func__, ##__VA_ARGS__); \
+	dynamic_pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+#else
+#define ffs_log(fmt, ...) \
+	ipc_log_string(ffs->ipc_log, "%s: " fmt,  __func__, ##__VA_ARGS__)
+#endif
+
 /* Reference counter handling */
 static void ffs_data_get(struct ffs_data *ffs);
 static void ffs_data_put(struct ffs_data *ffs);
@@ -275,6 +287,9 @@
 
 	spin_unlock_irq(&ffs->ev.waitq.lock);
 
+	ffs_log("enter: state %d setup_state %d flags %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	req->buf      = data;
 	req->length   = len;
 
@@ -299,11 +314,18 @@
 	}
 
 	ffs->setup_state = FFS_NO_SETUP;
+
+	ffs_log("exit: state %d setup_state %d flags %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	return req->status ? req->status : req->actual;
 }
 
 static int __ffs_ep0_stall(struct ffs_data *ffs)
 {
+	ffs_log("state %d setup_state %d flags %lu can_stall %d", ffs->state,
+		ffs->setup_state, ffs->flags, ffs->ev.can_stall);
+
 	if (ffs->ev.can_stall) {
 		pr_vdebug("ep0 stall\n");
 		usb_ep_set_halt(ffs->gadget->ep0);
@@ -324,6 +346,9 @@
 
 	ENTER();
 
+	ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	/* Fast check if setup was canceled */
 	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
 		return -EIDRM;
@@ -452,6 +477,9 @@
 		break;
 	}
 
+	ffs_log("exit:ret %zd state %d setup_state %d flags %lu", ret,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	mutex_unlock(&ffs->mutex);
 	return ret;
 }
@@ -486,6 +514,10 @@
 			ffs->ev.count * sizeof *ffs->ev.types);
 
 	spin_unlock_irq(&ffs->ev.waitq.lock);
+
+	ffs_log("state %d setup_state %d flags %lu #evt %zu", ffs->state,
+		ffs->setup_state, ffs->flags, n);
+
 	mutex_unlock(&ffs->mutex);
 
 	return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
@@ -501,6 +533,9 @@
 
 	ENTER();
 
+	ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	/* Fast check if setup was canceled */
 	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
 		return -EIDRM;
@@ -590,8 +625,12 @@
 
 	spin_unlock_irq(&ffs->ev.waitq.lock);
 done_mutex:
+	ffs_log("exit:ret %d state %d setup_state %d flags %lu", ret,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	mutex_unlock(&ffs->mutex);
 	kfree(data);
+
 	return ret;
 }
 
@@ -601,6 +640,9 @@
 
 	ENTER();
 
+	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	if (unlikely(ffs->state == FFS_CLOSING))
 		return -EBUSY;
 
@@ -616,6 +658,9 @@
 
 	ENTER();
 
+	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	ffs_data_closed(ffs);
 
 	return 0;
@@ -629,6 +674,9 @@
 
 	ENTER();
 
+	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
 		struct ffs_function *func = ffs->func;
 		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
@@ -647,6 +695,9 @@
 	__poll_t mask = EPOLLWRNORM;
 	int ret;
 
+	ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	poll_wait(file, &ffs->ev.waitq, wait);
 
 	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
@@ -677,6 +728,8 @@
 		break;
 	}
 
+	ffs_log("exit: mask %u", mask);
+
 	mutex_unlock(&ffs->mutex);
 
 	return mask;
@@ -753,10 +806,13 @@
 {
 	struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
 						   work);
+	struct ffs_data *ffs = io_data->ffs;
 	int ret = io_data->req->status ? io_data->req->status :
 					 io_data->req->actual;
 	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
 
+	ffs_log("enter: ret %d for %s", ret, io_data->read ? "read" : "write");
+
 	if (io_data->read && ret > 0) {
 		mm_segment_t oldfs = get_fs();
 
@@ -778,6 +834,8 @@
 		kfree(io_data->to_free);
 	kfree(io_data->buf);
 	kfree(io_data);
+
+	ffs_log("exit");
 }
 
 static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
@@ -788,6 +846,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	INIT_WORK(&io_data->work, ffs_user_copy_worker);
 	queue_work(ffs->io_completion_wq, &io_data->work);
 }
@@ -877,12 +937,15 @@
 static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 {
 	struct ffs_epfile *epfile = file->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	struct usb_request *req;
 	struct ffs_ep *ep;
 	char *data = NULL;
 	ssize_t ret, data_len = -EINVAL;
 	int halt;
 
+	ffs_log("enter: %s", epfile->name);
+
 	/* Are we still active? */
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
@@ -1000,6 +1063,8 @@
 
 		spin_unlock_irq(&epfile->ffs->eps_lock);
 
+		ffs_log("queued %d bytes on %s", data_len, epfile->name);
+
 		if (unlikely(wait_for_completion_interruptible(&done))) {
 			/*
 			 * To avoid race condition with ffs_epfile_io_complete,
@@ -1011,6 +1076,9 @@
 			interrupted = ep->status < 0;
 		}
 
+		ffs_log("%s:ep status %d for req %pK", epfile->name, ep->status,
+				req);
+
 		if (interrupted)
 			ret = -EINTR;
 		else if (io_data->read && ep->status > 0)
@@ -1039,6 +1107,8 @@
 			goto error_lock;
 		}
 
+		ffs_log("queued %d bytes on %s", data_len, epfile->name);
+
 		ret = -EIOCBQUEUED;
 		/*
 		 * Do not kfree the buffer in this function.  It will be freed
@@ -1053,6 +1123,9 @@
 	mutex_unlock(&epfile->mutex);
 error:
 	kfree(data);
+
+	ffs_log("exit: %s ret %zd", epfile->name, ret);
+
 	return ret;
 }
 
@@ -1060,9 +1133,14 @@
 ffs_epfile_open(struct inode *inode, struct file *file)
 {
 	struct ffs_epfile *epfile = inode->i_private;
+	struct ffs_data *ffs = epfile->ffs;
 
 	ENTER();
 
+	ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
+		epfile->ffs->state, epfile->ffs->setup_state,
+		epfile->ffs->flags);
+
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
@@ -1076,10 +1154,14 @@
 {
 	struct ffs_io_data *io_data = kiocb->private;
 	struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	int value;
 
 	ENTER();
 
+	ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+		epfile->ffs->setup_state, epfile->ffs->flags);
+
 	spin_lock_irq(&epfile->ffs->eps_lock);
 
 	if (likely(io_data && io_data->ep && io_data->req))
@@ -1089,16 +1171,22 @@
 
 	spin_unlock_irq(&epfile->ffs->eps_lock);
 
+	ffs_log("exit: value %d", value);
+
 	return value;
 }
 
 static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
 {
+	struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	struct ffs_io_data io_data, *p = &io_data;
 	ssize_t res;
 
 	ENTER();
 
+	ffs_log("enter");
+
 	if (!is_sync_kiocb(kiocb)) {
 		p = kmalloc(sizeof(io_data), GFP_KERNEL);
 		if (unlikely(!p))
@@ -1125,16 +1213,23 @@
 		kfree(p);
 	else
 		*from = p->data;
+
+	ffs_log("exit: ret %zd", res);
+
 	return res;
 }
 
 static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 {
+	struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	struct ffs_io_data io_data, *p = &io_data;
 	ssize_t res;
 
 	ENTER();
 
+	ffs_log("enter");
+
 	if (!is_sync_kiocb(kiocb)) {
 		p = kmalloc(sizeof(io_data), GFP_KERNEL);
 		if (unlikely(!p))
@@ -1173,6 +1268,9 @@
 	} else {
 		*to = p->data;
 	}
+
+	ffs_log("exit: ret %zd", res);
+
 	return res;
 }
 
@@ -1180,10 +1278,15 @@
 ffs_epfile_release(struct inode *inode, struct file *file)
 {
 	struct ffs_epfile *epfile = inode->i_private;
+	struct ffs_data *ffs = epfile->ffs;
 
 	ENTER();
 
 	__ffs_epfile_read_buffer_free(epfile);
+	ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
+			epfile->ffs->state, epfile->ffs->setup_state,
+			epfile->ffs->flags);
+
 	ffs_data_closed(epfile->ffs);
 
 	return 0;
@@ -1193,11 +1296,16 @@
 			     unsigned long value)
 {
 	struct ffs_epfile *epfile = file->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	struct ffs_ep *ep;
 	int ret;
 
 	ENTER();
 
+	ffs_log("%s: code 0x%08x value %#lx state %d setup_state %d flag %lu",
+		epfile->name, code, value, epfile->ffs->state,
+		epfile->ffs->setup_state, epfile->ffs->flags);
+
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
@@ -1263,6 +1371,8 @@
 	}
 	spin_unlock_irq(&epfile->ffs->eps_lock);
 
+	ffs_log("exit: %s: ret %d\n", epfile->name, ret);
+
 	return ret;
 }
 
@@ -1301,10 +1411,13 @@
 		  const struct inode_operations *iops,
 		  struct ffs_file_perms *perms)
 {
+	struct ffs_data	*ffs = sb->s_fs_info;
 	struct inode *inode;
 
 	ENTER();
 
+	ffs_log("enter");
+
 	inode = new_inode(sb);
 
 	if (likely(inode)) {
@@ -1338,6 +1451,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	dentry = d_alloc_name(sb->s_root, name);
 	if (unlikely(!dentry))
 		return NULL;
@@ -1349,6 +1464,7 @@
 	}
 
 	d_add(dentry, inode);
+
 	return dentry;
 }
 
@@ -1374,6 +1490,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	ffs->sb              = sb;
 	data->ffs_data       = NULL;
 	sb->s_fs_info        = ffs;
@@ -1541,6 +1659,7 @@
 		ffs_release_dev(data.ffs_data);
 		ffs_data_put(data.ffs_data);
 	}
+
 	return rv;
 }
 
@@ -1600,6 +1719,8 @@
 {
 	ENTER();
 
+	ffs_log("ref %u", refcount_read(&ffs->ref));
+
 	refcount_inc(&ffs->ref);
 }
 
@@ -1607,6 +1728,10 @@
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu opened %d ref %d",
+		ffs->state, ffs->setup_state, ffs->flags,
+		atomic_read(&ffs->opened), refcount_read(&ffs->ref));
+
 	refcount_inc(&ffs->ref);
 	if (atomic_add_return(1, &ffs->opened) == 1 &&
 			ffs->state == FFS_DEACTIVATED) {
@@ -1619,6 +1744,8 @@
 {
 	ENTER();
 
+	ffs_log("ref %u", refcount_read(&ffs->ref));
+
 	if (unlikely(refcount_dec_and_test(&ffs->ref))) {
 		pr_info("%s(): freeing\n", __func__);
 		ffs_data_clear(ffs);
@@ -1626,6 +1753,7 @@
 		       waitqueue_active(&ffs->ep0req_completion.wait) ||
 		       waitqueue_active(&ffs->wait));
 		destroy_workqueue(ffs->io_completion_wq);
+		ipc_log_context_destroy(ffs->ipc_log);
 		kfree(ffs->dev_name);
 		kfree(ffs);
 	}
@@ -1635,6 +1763,9 @@
 {
 	ENTER();
 
+	ffs_log("state %d setup_state %d flag %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	if (atomic_dec_and_test(&ffs->opened)) {
 		if (ffs->no_disconnect) {
 			ffs->state = FFS_DEACTIVATED;
@@ -1660,6 +1791,7 @@
 
 static struct ffs_data *ffs_data_new(const char *dev_name)
 {
+	char ipcname[24] = "usb_ffs_";
 	struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
 	if (unlikely(!ffs))
 		return NULL;
@@ -1684,6 +1816,11 @@
 	/* XXX REVISIT need to update it in some places, or do we? */
 	ffs->ev.can_stall = 1;
 
+	strlcat(ipcname, dev_name, sizeof(ipcname));
+	ffs->ipc_log = ipc_log_context_create(NUM_PAGES, ipcname, 0);
+	if (IS_ERR_OR_NULL(ffs->ipc_log))
+		ffs->ipc_log =  NULL;
+
 	return ffs;
 }
 
@@ -1691,6 +1828,11 @@
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
+	pr_debug("%s: ffs->gadget= %pK, ffs->flags= %lu\n",
+				__func__, ffs->gadget, ffs->flags);
 	ffs_closed(ffs);
 
 	BUG_ON(ffs->gadget);
@@ -1710,6 +1852,9 @@
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	ffs_data_clear(ffs);
 
 	ffs->epfiles = NULL;
@@ -1742,6 +1887,9 @@
 
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	if (WARN_ON(ffs->state != FFS_ACTIVE
 		 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
 		return -EBADFD;
@@ -1767,6 +1915,7 @@
 	}
 
 	ffs->gadget = cdev->gadget;
+
 	ffs_data_get(ffs);
 	return 0;
 }
@@ -1780,6 +1929,8 @@
 		ffs->ep0req = NULL;
 		ffs->gadget = NULL;
 		clear_bit(FFS_FL_BOUND, &ffs->flags);
+		ffs_log("state %d setup_state %d flag %lu gadget %pK\n",
+			ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
 		ffs_data_put(ffs);
 	}
 }
@@ -1791,6 +1942,9 @@
 
 	ENTER();
 
+	ffs_log("enter: eps_count %u state %d setup_state %d flag %lu",
+		ffs->eps_count, ffs->state, ffs->setup_state, ffs->flags);
+
 	count = ffs->eps_count;
 	epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
 	if (!epfiles)
@@ -1814,15 +1968,19 @@
 	}
 
 	ffs->epfiles = epfiles;
+
 	return 0;
 }
 
 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
 {
 	struct ffs_epfile *epfile = epfiles;
+	struct ffs_data *ffs = epfiles->ffs;
 
 	ENTER();
 
+	ffs_log("enter: count %u", count);
+
 	for (; count; --count, ++epfile) {
 		BUG_ON(mutex_is_locked(&epfile->mutex));
 		if (epfile->dentry) {
@@ -1838,10 +1996,14 @@
 static void ffs_func_eps_disable(struct ffs_function *func)
 {
 	struct ffs_ep *ep         = func->eps;
+	struct ffs_data *ffs      = func->ffs;
 	struct ffs_epfile *epfile = func->ffs->epfiles;
 	unsigned count            = func->ffs->eps_count;
 	unsigned long flags;
 
+	ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+		func->ffs->setup_state, func->ffs->flags);
+
 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
 	while (count--) {
 		/* pending requests get nuked */
@@ -1867,6 +2029,9 @@
 	unsigned long flags;
 	int ret = 0;
 
+	ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+		func->ffs->setup_state, func->ffs->flags);
+
 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
 	while(count--) {
 		ep->ep->driver_data = ep;
@@ -1883,7 +2048,9 @@
 			epfile->ep = ep;
 			epfile->in = usb_endpoint_dir_in(ep->ep->desc);
 			epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
+			ffs_log("usb_ep_enable %s", ep->ep->name);
 		} else {
+			ffs_log("usb_ep_enable %s ret %d", ep->ep->name, ret);
 			break;
 		}
 
@@ -1924,7 +2091,8 @@
 				    struct usb_os_desc_header *h, void *data,
 				    unsigned len, void *priv);
 
-static int __must_check ffs_do_single_desc(char *data, unsigned len,
+static int __must_check ffs_do_single_desc(struct ffs_data *ffs,
+					   char *data, unsigned int len,
 					   ffs_entity_callback entity,
 					   void *priv)
 {
@@ -1934,6 +2102,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	/* At least two bytes are required: length and type */
 	if (len < 2) {
 		pr_vdebug("descriptor too short\n");
@@ -2050,10 +2220,13 @@
 #undef __entity_check_STRING
 #undef __entity_check_ENDPOINT
 
+	ffs_log("exit: desc type %d length %d", _ds->bDescriptorType, length);
+
 	return length;
 }
 
-static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
+static int __must_check ffs_do_descs(struct ffs_data *ffs, unsigned int count,
+				     char *data, unsigned int len,
 				     ffs_entity_callback entity, void *priv)
 {
 	const unsigned _len = len;
@@ -2061,6 +2234,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	for (;;) {
 		int ret;
 
@@ -2078,7 +2253,7 @@
 		if (!data)
 			return _len - len;
 
-		ret = ffs_do_single_desc(data, len, entity, priv);
+		ret = ffs_do_single_desc(ffs, data, len, entity, priv);
 		if (unlikely(ret < 0)) {
 			pr_debug("%s returns %d\n", __func__, ret);
 			return ret;
@@ -2095,10 +2270,13 @@
 				void *priv)
 {
 	struct ffs_desc_helper *helper = priv;
+	struct ffs_data *ffs = helper->ffs;
 	struct usb_endpoint_descriptor *d;
 
 	ENTER();
 
+	ffs_log("enter: type %u", type);
+
 	switch (type) {
 	case FFS_DESCRIPTOR:
 		break;
@@ -2140,12 +2318,15 @@
 	return 0;
 }
 
-static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
+static int __ffs_do_os_desc_header(struct ffs_data *ffs,
+				   enum ffs_os_desc_type *next_type,
 				   struct usb_os_desc_header *desc)
 {
 	u16 bcd_version = le16_to_cpu(desc->bcdVersion);
 	u16 w_index = le16_to_cpu(desc->wIndex);
 
+	ffs_log("enter: bcd:%x w_index:%d", bcd_version, w_index);
+
 	if (bcd_version != 1) {
 		pr_vdebug("unsupported os descriptors version: %d",
 			  bcd_version);
@@ -2170,7 +2351,8 @@
  * Process all extended compatibility/extended property descriptors
  * of a feature descriptor
  */
-static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
+static int __must_check ffs_do_single_os_desc(struct ffs_data *ffs,
+					      char *data, unsigned int len,
 					      enum ffs_os_desc_type type,
 					      u16 feature_count,
 					      ffs_os_desc_callback entity,
@@ -2182,22 +2364,27 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u os desc type %d", len, type);
+
 	/* loop over all ext compat/ext prop descriptors */
 	while (feature_count--) {
 		ret = entity(type, h, data, len, priv);
 		if (unlikely(ret < 0)) {
-			pr_debug("bad OS descriptor, type: %d\n", type);
+			ffs_log("bad OS descriptor, type: %d\n", type);
 			return ret;
 		}
 		data += ret;
 		len -= ret;
 	}
+
+
 	return _len - len;
 }
 
 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
-static int __must_check ffs_do_os_descs(unsigned count,
-					char *data, unsigned len,
+static int __must_check ffs_do_os_descs(struct ffs_data *ffs,
+					unsigned int count, char *data,
+					unsigned int len,
 					ffs_os_desc_callback entity, void *priv)
 {
 	const unsigned _len = len;
@@ -2205,6 +2392,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	for (num = 0; num < count; ++num) {
 		int ret;
 		enum ffs_os_desc_type type;
@@ -2224,9 +2413,9 @@
 		if (le32_to_cpu(desc->dwLength) > len)
 			return -EINVAL;
 
-		ret = __ffs_do_os_desc_header(&type, desc);
+		ret = __ffs_do_os_desc_header(ffs, &type, desc);
 		if (unlikely(ret < 0)) {
-			pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
+			ffs_log("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
 				 num, ret);
 			return ret;
 		}
@@ -2244,16 +2433,17 @@
 		 * Process all function/property descriptors
 		 * of this Feature Descriptor
 		 */
-		ret = ffs_do_single_os_desc(data, len, type,
+		ret = ffs_do_single_os_desc(ffs, data, len, type,
 					    feature_count, entity, priv, desc);
 		if (unlikely(ret < 0)) {
-			pr_debug("%s returns %d\n", __func__, ret);
+			ffs_log("%s returns %d\n", __func__, ret);
 			return ret;
 		}
 
 		len -= ret;
 		data += ret;
 	}
+
 	return _len - len;
 }
 
@@ -2269,6 +2459,8 @@
 
 	ENTER();
 
+	ffs_log("enter: type %d len %u", type, len);
+
 	switch (type) {
 	case FFS_OS_DESC_EXT_COMPAT: {
 		struct usb_ext_compat_desc *d = data;
@@ -2333,6 +2525,7 @@
 		pr_vdebug("unknown descriptor: %d\n", type);
 		return -EINVAL;
 	}
+
 	return length;
 }
 
@@ -2346,6 +2539,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %zu", len);
+
 	if (get_unaligned_le32(data + 4) != len)
 		goto error;
 
@@ -2419,7 +2614,7 @@
 			continue;
 		helper.interfaces_count = 0;
 		helper.eps_count = 0;
-		ret = ffs_do_descs(counts[i], data, len,
+		ret = ffs_do_descs(ffs, counts[i], data, len,
 				   __ffs_data_do_entity, &helper);
 		if (ret < 0)
 			goto error;
@@ -2440,7 +2635,7 @@
 		len  -= ret;
 	}
 	if (os_descs_count) {
-		ret = ffs_do_os_descs(os_descs_count, data, len,
+		ret = ffs_do_os_descs(ffs, os_descs_count, data, len,
 				      __ffs_data_do_os_desc, ffs);
 		if (ret < 0)
 			goto error;
@@ -2478,6 +2673,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %zu", len);
+
 	if (unlikely(len < 16 ||
 		     get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
 		     get_unaligned_le32(data + 4) != len))
@@ -2610,6 +2807,9 @@
 	enum usb_functionfs_event_type rem_type1, rem_type2 = type;
 	int neg = 0;
 
+	ffs_log("enter: type %d state %d setup_state %d flag %lu", type,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	/*
 	 * Abort any unhandled setup
 	 *
@@ -2698,11 +2898,14 @@
 {
 	struct usb_endpoint_descriptor *ds = (void *)desc;
 	struct ffs_function *func = priv;
+	struct ffs_data *ffs = func->ffs;
 	struct ffs_ep *ffs_ep;
 	unsigned ep_desc_id;
 	int idx;
 	static const char *speed_names[] = { "full", "high", "super" };
 
+	ffs_log("enter");
+
 	if (type != FFS_DESCRIPTOR)
 		return 0;
 
@@ -2786,9 +2989,12 @@
 				   void *priv)
 {
 	struct ffs_function *func = priv;
+	struct ffs_data *ffs = func->ffs;
 	unsigned idx;
 	u8 newValue;
 
+	ffs_log("enter: type %d", type);
+
 	switch (type) {
 	default:
 	case FFS_DESCRIPTOR:
@@ -2833,6 +3039,9 @@
 
 	pr_vdebug("%02x -> %02x\n", *valuep, newValue);
 	*valuep = newValue;
+
+	ffs_log("exit: newValue %d", newValue);
+
 	return 0;
 }
 
@@ -2841,8 +3050,11 @@
 				      unsigned len, void *priv)
 {
 	struct ffs_function *func = priv;
+	struct ffs_data *ffs = func->ffs;
 	u8 length = 0;
 
+	ffs_log("enter: type %d", type);
+
 	switch (type) {
 	case FFS_OS_DESC_EXT_COMPAT: {
 		struct usb_ext_compat_desc *desc = data;
@@ -2921,6 +3133,7 @@
 	struct ffs_function *func = ffs_func_from_usb(f);
 	struct f_fs_opts *ffs_opts =
 		container_of(f->fi, struct f_fs_opts, func_inst);
+	struct ffs_data *ffs = ffs_opts->dev->ffs_data;
 	int ret;
 
 	ENTER();
@@ -2953,8 +3166,10 @@
 	 */
 	if (!ffs_opts->refcnt) {
 		ret = functionfs_bind(func->ffs, c->cdev);
-		if (ret)
+		if (ret) {
+			ffs_log("functionfs_bind returned %d", ret);
 			return ERR_PTR(ret);
+		}
 	}
 	ffs_opts->refcnt++;
 	func->function.strings = func->ffs->stringtabs;
@@ -3002,6 +3217,9 @@
 
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	/* Has descriptors only for speeds gadget does not support */
 	if (unlikely(!(full | high | super)))
 		return -ENOTSUPP;
@@ -3039,7 +3257,7 @@
 	 */
 	if (likely(full)) {
 		func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
-		fs_len = ffs_do_descs(ffs->fs_descs_count,
+		fs_len = ffs_do_descs(ffs, ffs->fs_descs_count,
 				      vla_ptr(vlabuf, d, raw_descs),
 				      d_raw_descs__sz,
 				      __ffs_func_bind_do_descs, func);
@@ -3053,7 +3271,7 @@
 
 	if (likely(high)) {
 		func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
-		hs_len = ffs_do_descs(ffs->hs_descs_count,
+		hs_len = ffs_do_descs(ffs, ffs->hs_descs_count,
 				      vla_ptr(vlabuf, d, raw_descs) + fs_len,
 				      d_raw_descs__sz - fs_len,
 				      __ffs_func_bind_do_descs, func);
@@ -3067,7 +3285,7 @@
 
 	if (likely(super)) {
 		func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
-		ss_len = ffs_do_descs(ffs->ss_descs_count,
+		ss_len = ffs_do_descs(ffs, ffs->ss_descs_count,
 				vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
 				d_raw_descs__sz - fs_len - hs_len,
 				__ffs_func_bind_do_descs, func);
@@ -3085,7 +3303,7 @@
 	 * endpoint numbers rewriting.  We can do that in one go
 	 * now.
 	 */
-	ret = ffs_do_descs(ffs->fs_descs_count +
+	ret = ffs_do_descs(ffs, ffs->fs_descs_count +
 			   (high ? ffs->hs_descs_count : 0) +
 			   (super ? ffs->ss_descs_count : 0),
 			   vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
@@ -3105,7 +3323,7 @@
 				vla_ptr(vlabuf, d, ext_compat) + i * 16;
 			INIT_LIST_HEAD(&desc->ext_prop);
 		}
-		ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+		ret = ffs_do_os_descs(ffs, ffs->ms_os_descs_count,
 				      vla_ptr(vlabuf, d, raw_descs) +
 				      fs_len + hs_len + ss_len,
 				      d_raw_descs__sz - fs_len - hs_len -
@@ -3119,10 +3337,12 @@
 
 	/* And we're done */
 	ffs_event_add(ffs, FUNCTIONFS_BIND);
+
 	return 0;
 
 error:
 	/* XXX Do we need to release all claimed endpoints here? */
+	ffs_log("exit: ret %d", ret);
 	return ret;
 }
 
@@ -3131,11 +3351,14 @@
 {
 	struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
 	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
 	int ret;
 
 	if (IS_ERR(ffs_opts))
 		return PTR_ERR(ffs_opts);
 
+	ffs_log("enter");
+
 	ret = _ffs_func_bind(c, f);
 	if (ret && !--ffs_opts->refcnt)
 		functionfs_unbind(func->ffs);
@@ -3150,6 +3373,9 @@
 {
 	struct ffs_data *ffs = container_of(work,
 		struct ffs_data, reset_work);
+
+	ffs_log("enter");
+
 	ffs_data_reset(ffs);
 }
 
@@ -3160,6 +3386,8 @@
 	struct ffs_data *ffs = func->ffs;
 	int ret = 0, intf;
 
+	ffs_log("enter: alt %d", (int)alt);
+
 	if (alt != (unsigned)-1) {
 		intf = ffs_func_revmap_intf(func, interface);
 		if (unlikely(intf < 0))
@@ -3189,11 +3417,16 @@
 	ret = ffs_func_eps_enable(func);
 	if (likely(ret >= 0))
 		ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+
 	return ret;
 }
 
 static void ffs_func_disable(struct usb_function *f)
 {
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+
+	ffs_log("enter");
 	ffs_func_set_alt(f, 0, (unsigned)-1);
 }
 
@@ -3213,6 +3446,11 @@
 	pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
 	pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
 
+	ffs_log("enter: state %d reqtype=%02x req=%02x wv=%04x wi=%04x wl=%04x",
+			ffs->state, creq->bRequestType, creq->bRequest,
+			le16_to_cpu(creq->wValue), le16_to_cpu(creq->wIndex),
+			le16_to_cpu(creq->wLength));
+
 	/*
 	 * Most requests directed to interface go through here
 	 * (notable exceptions are set/get interface) so we need to
@@ -3281,13 +3519,23 @@
 
 static void ffs_func_suspend(struct usb_function *f)
 {
+	struct ffs_data *ffs = ffs_func_from_usb(f)->ffs;
+
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
 }
 
 static void ffs_func_resume(struct usb_function *f)
 {
+	struct ffs_data *ffs = ffs_func_from_usb(f)->ffs;
+
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
 }
 
@@ -3360,7 +3608,9 @@
 	if (dev)
 		return dev;
 
-	return _ffs_do_find_dev(name);
+	dev = _ffs_do_find_dev(name);
+
+	return dev;
 }
 
 /* Configfs support *********************************************************/
@@ -3451,6 +3701,10 @@
 	unsigned long flags;
 
 	ENTER();
+
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	if (ffs->func == func) {
 		ffs_func_eps_disable(func);
 		ffs->func = NULL;
@@ -3481,6 +3735,9 @@
 	func->interfaces_nums = NULL;
 
 	ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+	ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
 }
 
 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
@@ -3568,6 +3825,7 @@
 		dev->single = true;
 
 	ffs_dev_unlock();
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(ffs_single_dev);
@@ -3593,6 +3851,7 @@
 	struct ffs_dev *ffs_dev;
 
 	ENTER();
+
 	ffs_dev_lock();
 
 	ffs_dev = _ffs_find_dev(dev_name);
@@ -3607,6 +3866,7 @@
 		ffs_dev->mounted = true;
 
 	ffs_dev_unlock();
+
 	return ffs_dev;
 }
 
@@ -3615,6 +3875,7 @@
 	struct ffs_dev *ffs_dev;
 
 	ENTER();
+
 	ffs_dev_lock();
 
 	ffs_dev = ffs_data->private_data;
@@ -3634,6 +3895,9 @@
 	int ret = 0;
 
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_dev_lock();
 
 	ffs_obj = ffs->private_data;
@@ -3658,6 +3922,9 @@
 	set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
 done:
 	ffs_dev_unlock();
+
+	ffs_log("exit: ret %d", ret);
+
 	return ret;
 }
 
@@ -3668,6 +3935,9 @@
 	struct config_item *ci;
 
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_dev_lock();
 
 	ffs_obj = ffs->private_data;
@@ -3693,11 +3963,16 @@
 	ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
 	ffs_dev_unlock();
 
-	if (test_bit(FFS_FL_BOUND, &ffs->flags))
+	if (test_bit(FFS_FL_BOUND, &ffs->flags)) {
 		unregister_gadget_item(ci);
+		ffs_log("unreg gadget done");
+	}
+
 	return;
 done:
 	ffs_dev_unlock();
+
+	ffs_log("exit error");
 }
 
 /* Misc helper functions ****************************************************/
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index b9bf791..3708033 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -23,7 +23,7 @@
 #define GSI_MBIM_CTRL_NAME "android_mbim"
 #define GSI_DPL_CTRL_NAME "dpl_ctrl"
 #define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
-#define GSI_MAX_CTRL_PKT_SIZE 4096
+#define GSI_MAX_CTRL_PKT_SIZE 8192
 #define GSI_CTRL_DTR (1 << 0)
 
 #define GSI_NUM_IN_RNDIS_BUFFERS 50
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 4713a1c..9b86d55 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1050,6 +1050,12 @@
 			goto fail_f_midi;
 	}
 
+	if (gadget_is_superspeed_plus(c->cdev->gadget)) {
+		f->ssp_descriptors = usb_copy_descriptors(midi_function);
+		if (!f->ssp_descriptors)
+			goto fail_f_midi;
+	}
+
 	kfree(midi_function);
 
 	return 0;
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
new file mode 100644
index 0000000..0e3a582
--- /dev/null
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -0,0 +1,1921 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#include "configfs.h"
+
+#define MTP_RX_BUFFER_INIT_SIZE    1048576
+#define MTP_TX_BUFFER_INIT_SIZE    1048576
+#define MTP_BULK_BUFFER_SIZE       16384
+#define INTR_BUFFER_SIZE           28
+#define MAX_INST_NAME_LEN          40
+#define MTP_MAX_FILE_SIZE          0xFFFFFFFFL
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE               0   /* initial state, disconnected */
+#define STATE_READY                 1   /* ready for userspace calls */
+#define STATE_BUSY                  2   /* processing userspace calls */
+#define STATE_CANCELED              3   /* transaction canceled by host */
+#define STATE_ERROR                 4   /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define MTP_TX_REQ_MAX 8
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID   0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL              0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA  0x65
+#define MTP_REQ_RESET               0x66
+#define MTP_REQ_GET_DEVICE_STATUS   0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK             0x2001
+#define MTP_RESPONSE_DEVICE_BUSY    0x2019
+#define DRIVER_NAME "mtp"
+
+#define MAX_ITERATION		100
+
+unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
+module_param(mtp_rx_req_len, uint, 0644);
+
+unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
+module_param(mtp_tx_req_len, uint, 0644);
+
+unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
+module_param(mtp_tx_reqs, uint, 0644);
+
+static const char mtp_shortname[] = DRIVER_NAME "_usb";
+
+struct mtp_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+	struct usb_ep *ep_intr;
+
+	int state;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+	/* to enforce only one ioctl at a time */
+	atomic_t ioctl_excl;
+
+	struct list_head tx_idle;
+	struct list_head intr_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	wait_queue_head_t intr_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+	 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+	 */
+	struct workqueue_struct *wq;
+	struct work_struct send_file_work;
+	struct work_struct receive_file_work;
+	struct file *xfer_file;
+	loff_t xfer_file_offset;
+	int64_t xfer_file_length;
+	unsigned xfer_send_header;
+	uint16_t xfer_command;
+	uint32_t xfer_transaction_id;
+	int xfer_result;
+	struct {
+		unsigned long vfs_rbytes;
+		unsigned long vfs_wbytes;
+		unsigned int vfs_rtime;
+		unsigned int vfs_wtime;
+	} perf[MAX_ITERATION];
+	unsigned int dbg_read_index;
+	unsigned int dbg_write_index;
+	struct mutex  read_mutex;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_STILL_IMAGE,
+	.bInterfaceSubClass     = 1,
+	.bInterfaceProtocol     = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_ss_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
+	.bLength                = sizeof(mtp_ss_in_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+	/* .bMaxBurst           = DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor mtp_ss_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
+	.bLength                = sizeof(mtp_ss_out_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+	/* .bMaxBurst           = DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize         = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+	.bInterval              = 6,
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
+	.bLength                = sizeof(mtp_intr_ss_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+	.wBytesPerInterval      = cpu_to_le16(INTR_BUFFER_SIZE),
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_ss_in_desc,
+	(struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+	(struct usb_descriptor_header *) &mtp_ss_out_desc,
+	(struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	(struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_ss_in_desc,
+	(struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+	(struct usb_descriptor_header *) &mtp_ss_out_desc,
+	(struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	(struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
+	NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+	/* Naming interface "MTP" so libmtp will recognize us */
+	[INTERFACE_STRING_INDEX].s	= "MTP",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+	&mtp_string_table,
+	NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+	18, /* sizeof(mtp_os_string) */
+	USB_DT_STRING,
+	/* Signature field: "MSFT100" */
+	'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+	/* vendor code */
+	1,
+	/* padding */
+	0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+	__le32	dwLength;
+	__u16	bcdVersion;
+	__le16	wIndex;
+	__u8	bCount;
+	__u8	reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+	__u8	bFirstInterfaceNumber;
+	__u8	bInterfaceCount;
+	__u8	compatibleID[8];
+	__u8	subCompatibleID[8];
+	__u8	reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct mtp_ext_config_desc {
+	struct mtp_ext_config_desc_header	header;
+	struct mtp_ext_config_desc_function    function;
+};
+
+static struct mtp_ext_config_desc mtp_ext_config_desc = {
+	.header = {
+		.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+		.bcdVersion = __constant_cpu_to_le16(0x0100),
+		.wIndex = __constant_cpu_to_le16(4),
+		.bCount = 1,
+	},
+	.function = {
+		.bFirstInterfaceNumber = 0,
+		.bInterfaceCount = 1,
+		.compatibleID = { 'M', 'T', 'P' },
+	},
+};
+
+struct mtp_device_status {
+	__le16	wLength;
+	__le16	wCode;
+};
+
+struct mtp_data_header {
+	/* length of packet, including this header */
+	__le32	length;
+	/* container type (2 for data packet) */
+	__le16	type;
+	/* MTP command code */
+	__le16	command;
+	/* MTP transaction ID */
+	__le32	transaction_id;
+};
+
+struct mtp_instance {
+	struct usb_function_instance func_inst;
+	const char *name;
+	struct mtp_dev *dev;
+	char mtp_ext_compat_id[16];
+	struct usb_os_desc mtp_os_desc;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+	return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0 && dev->state != STATE_OFFLINE)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	dev->rx_done = 1;
+	if (req->status != 0 && dev->state != STATE_OFFLINE)
+		dev->state = STATE_ERROR;
+
+	wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0 && dev->state != STATE_OFFLINE)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->intr_idle, req);
+
+	wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc,
+				struct usb_endpoint_descriptor *intr_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_intr = ep;
+
+retry_tx_alloc:
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < mtp_tx_reqs; i++) {
+		req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
+		if (!req) {
+			if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
+				goto fail;
+			while ((req = mtp_req_get(dev, &dev->tx_idle)))
+				mtp_request_free(req, dev->ep_in);
+			mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
+			mtp_tx_reqs = MTP_TX_REQ_MAX;
+			goto retry_tx_alloc;
+		}
+		req->complete = mtp_complete_in;
+		mtp_req_put(dev, &dev->tx_idle, req);
+	}
+
+	/*
+	 * The RX buffer should be aligned to EP max packet for
+	 * some controllers.  At bind time, we don't know the
+	 * operational speed.  Hence assuming super speed max
+	 * packet size.
+	 */
+	if (mtp_rx_req_len % 1024)
+		mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+
+retry_rx_alloc:
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
+		if (!req) {
+			if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
+				goto fail;
+			for (--i; i >= 0; i--)
+				mtp_request_free(dev->rx_req[i], dev->ep_out);
+			mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+			goto retry_rx_alloc;
+		}
+		req->complete = mtp_complete_out;
+		dev->rx_req[i] = req;
+	}
+	for (i = 0; i < INTR_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_intr;
+		mtp_req_put(dev, &dev->intr_idle, req);
+	}
+
+	return 0;
+
+fail:
+	pr_err("mtp_bind() could not allocate requests\n");
+	return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	ssize_t r = count, xfer, len;
+	int ret = 0;
+
+	DBG(cdev, "%s(%zu) state:%d\n", __func__, count, dev->state);
+
+	/* we will block until we're online */
+	DBG(cdev, "mtp_read: waiting for online state\n");
+	ret = wait_event_interruptible(dev->read_wq,
+		dev->state != STATE_OFFLINE);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+
+	len = ALIGN(count, dev->ep_out->maxpacket);
+	if (len > mtp_rx_req_len)
+		return -EINVAL;
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		return -ENODEV;
+	}
+
+	if (dev->ep_out->desc) {
+		if (!cdev) {
+			spin_unlock_irq(&dev->lock);
+			return -ENODEV;
+		}
+
+		len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
+		if (len > MTP_BULK_BUFFER_SIZE) {
+			spin_unlock_irq(&dev->lock);
+			return -EINVAL;
+		}
+	}
+
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+	mutex_lock(&dev->read_mutex);
+	if (dev->state == STATE_OFFLINE) {
+		r = -EIO;
+		mutex_unlock(&dev->read_mutex);
+		goto done;
+	}
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = len;
+	dev->rx_done = 0;
+	mutex_unlock(&dev->read_mutex);
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		DBG(cdev, "rx %pK queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq,
+				dev->rx_done || dev->state != STATE_BUSY);
+	if (dev->state == STATE_CANCELED) {
+		r = -ECANCELED;
+		if (!dev->rx_done)
+			usb_ep_dequeue(dev->ep_out, req);
+		spin_lock_irq(&dev->lock);
+		dev->state = STATE_CANCELED;
+		spin_unlock_irq(&dev->lock);
+		goto done;
+	}
+	if (ret < 0) {
+		r = ret;
+		usb_ep_dequeue(dev->ep_out, req);
+		goto done;
+	}
+	mutex_lock(&dev->read_mutex);
+	if (dev->state == STATE_BUSY) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		DBG(cdev, "rx %pK %d\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+	mutex_unlock(&dev->read_mutex);
+done:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "%s returning %zd state:%d\n", __func__, r, dev->state);
+	return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	ssize_t r = count;
+	unsigned xfer;
+	int sendZLP = 0;
+	int ret;
+
+	DBG(cdev, "%s(%zu) state:%d\n", __func__, count, dev->state);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		return -ENODEV;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+		sendZLP = 1;
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		if (dev->state != STATE_BUSY) {
+			DBG(cdev, "mtp_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = mtp_req_get(dev, &dev->tx_idle))
+				|| dev->state != STATE_BUSY));
+		if (!req) {
+			DBG(cdev, "%s request NULL ret:%d state:%d\n",
+				__func__, ret, dev->state);
+			r = ret;
+			break;
+		}
+
+		if (count > mtp_tx_req_len)
+			xfer = mtp_tx_req_len;
+		else
+			xfer = count;
+		if (xfer && copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "mtp_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "%s returning %zd state:%d\n", __func__, r, dev->state);
+	return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+	struct mtp_dev *dev = container_of(data, struct mtp_dev,
+						send_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	struct mtp_data_header *header;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int xfer, ret, hdr_size;
+	int r = 0;
+	int sendZLP = 0;
+	ktime_t start_time;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+	if (dev->xfer_send_header) {
+		hdr_size = sizeof(struct mtp_data_header);
+		count += hdr_size;
+	} else {
+		hdr_size = 0;
+	}
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+		sendZLP = 1;
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			(req = mtp_req_get(dev, &dev->tx_idle))
+			|| dev->state != STATE_BUSY);
+		if (dev->state == STATE_CANCELED) {
+			r = -ECANCELED;
+			break;
+		}
+		if (!req) {
+			DBG(cdev,
+				"%s request NULL ret:%d state:%d\n", __func__,
+				ret, dev->state);
+			r = ret;
+			break;
+		}
+
+		if (count > mtp_tx_req_len)
+			xfer = mtp_tx_req_len;
+		else
+			xfer = count;
+
+		if (hdr_size) {
+			/* prepend MTP data header */
+			header = (struct mtp_data_header *)req->buf;
+			/*
+                         * set file size with header according to
+                         * MTP Specification v1.0
+                         */
+			header->length = (count > MTP_MAX_FILE_SIZE) ?
+				MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
+			header->type = __cpu_to_le16(2); /* data packet */
+			header->command = __cpu_to_le16(dev->xfer_command);
+			header->transaction_id =
+					__cpu_to_le32(dev->xfer_transaction_id);
+		}
+		start_time = ktime_get();
+		ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+								&offset);
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+
+		xfer = ret + hdr_size;
+		dev->perf[dev->dbg_read_index].vfs_rtime =
+			ktime_to_us(ktime_sub(ktime_get(), start_time));
+		dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
+		dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
+		hdr_size = 0;
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "send_file_work: xfer error %d\n", ret);
+			if (dev->state != STATE_OFFLINE)
+				dev->state = STATE_ERROR;
+			r = -EIO;
+			break;
+		}
+
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	DBG(cdev, "%s returning %d state:%d\n", __func__, r, dev->state);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+	struct mtp_dev *dev = container_of(data, struct mtp_dev,
+						receive_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *read_req = NULL, *write_req = NULL;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int ret, cur_buf = 0;
+	int r = 0;
+	ktime_t start_time;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "receive_file_work(%lld)\n", count);
+	if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
+		DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
+						count, dev->ep_out->maxpacket);
+
+	while (count > 0 || write_req) {
+		if (count > 0) {
+			mutex_lock(&dev->read_mutex);
+			if (dev->state == STATE_OFFLINE) {
+				r = -EIO;
+				mutex_unlock(&dev->read_mutex);
+				break;
+			}
+			/* queue a request */
+			read_req = dev->rx_req[cur_buf];
+			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+			/* some h/w expects size to be aligned to ep's MTU */
+			read_req->length = mtp_rx_req_len;
+
+			dev->rx_done = 0;
+			mutex_unlock(&dev->read_mutex);
+			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+			if (ret < 0) {
+				r = -EIO;
+				if (dev->state != STATE_OFFLINE)
+					dev->state = STATE_ERROR;
+				break;
+			}
+		}
+
+		if (write_req) {
+			DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
+			start_time = ktime_get();
+			mutex_lock(&dev->read_mutex);
+			if (dev->state == STATE_OFFLINE) {
+				r = -EIO;
+				mutex_unlock(&dev->read_mutex);
+				break;
+			}
+			ret = vfs_write(filp, write_req->buf, write_req->actual,
+				&offset);
+			DBG(cdev, "vfs_write %d\n", ret);
+			if (ret != write_req->actual) {
+				r = -EIO;
+				mutex_unlock(&dev->read_mutex);
+				if (dev->state != STATE_OFFLINE)
+					dev->state = STATE_ERROR;
+				if (read_req && !dev->rx_done)
+					usb_ep_dequeue(dev->ep_out, read_req);
+				break;
+			}
+			mutex_unlock(&dev->read_mutex);
+			dev->perf[dev->dbg_write_index].vfs_wtime =
+				ktime_to_us(ktime_sub(ktime_get(), start_time));
+			dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
+			dev->dbg_write_index =
+				(dev->dbg_write_index + 1) % MAX_ITERATION;
+			write_req = NULL;
+		}
+
+		if (read_req) {
+			/* wait for our last read to complete */
+			ret = wait_event_interruptible(dev->read_wq,
+				dev->rx_done || dev->state != STATE_BUSY);
+			if (dev->state == STATE_CANCELED
+					|| dev->state == STATE_OFFLINE) {
+				if (dev->state == STATE_OFFLINE)
+					r = -EIO;
+				else
+					r = -ECANCELED;
+				if (!dev->rx_done)
+					usb_ep_dequeue(dev->ep_out, read_req);
+				break;
+			}
+			if (read_req->status) {
+				r = read_req->status;
+				break;
+			}
+
+			mutex_lock(&dev->read_mutex);
+			if (dev->state == STATE_OFFLINE) {
+				r = -EIO;
+				mutex_unlock(&dev->read_mutex);
+				break;
+			}
+			/* Check if we aligned the size due to MTU constraint */
+			if (count < read_req->length)
+				read_req->actual = (read_req->actual > count ?
+						count : read_req->actual);
+			/* if xfer_file_length is 0xFFFFFFFF, then we read until
+			 * we get a zero length packet
+			 */
+			if (count != 0xFFFFFFFF)
+				count -= read_req->actual;
+			if (read_req->actual < read_req->length) {
+				/*
+				 * short packet is used to signal EOF for
+				 * sizes > 4 gig
+				 */
+				DBG(cdev, "got short packet\n");
+				count = 0;
+			}
+
+			write_req = read_req;
+			read_req = NULL;
+			mutex_unlock(&dev->read_mutex);
+		}
+	}
+
+	DBG(cdev, "receive_file_work returning %d\n", r);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+	struct usb_request *req = NULL;
+	int ret;
+	int length = event->length;
+
+	DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
+
+	if (length < 0 || length > INTR_BUFFER_SIZE)
+		return -EINVAL;
+	if (dev->state == STATE_OFFLINE)
+		return -ENODEV;
+
+	ret = wait_event_interruptible_timeout(dev->intr_wq,
+			(req = mtp_req_get(dev, &dev->intr_idle)),
+			msecs_to_jiffies(1000));
+	if (!req)
+		return -ETIME;
+
+	if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+		mtp_req_put(dev, &dev->intr_idle, req);
+		return -EFAULT;
+	}
+	req->length = length;
+	ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+	if (ret)
+		mtp_req_put(dev, &dev->intr_idle, req);
+
+	return ret;
+}
+
+static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
+	struct mtp_file_range *mfr)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct file *filp = NULL;
+	struct work_struct *work;
+	int ret = -EINVAL;
+
+	if (mtp_lock(&dev->ioctl_excl)) {
+		DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
+		return -EBUSY;
+	}
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancellation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		ret = -ECANCELED;
+		goto out;
+	}
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		ret = -ENODEV;
+		goto out;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+	/* hold a reference to the file while we are working with it */
+	filp = fget(mfr->fd);
+	if (!filp) {
+		ret = -EBADF;
+		goto fail;
+	}
+
+	/* write the parameters */
+	dev->xfer_file = filp;
+	dev->xfer_file_offset = mfr->offset;
+	dev->xfer_file_length = mfr->length;
+	/* make sure write is done before parameters are read */
+	smp_wmb();
+
+	if (code == MTP_SEND_FILE_WITH_HEADER) {
+		work = &dev->send_file_work;
+		dev->xfer_send_header = 1;
+		dev->xfer_command = mfr->command;
+		dev->xfer_transaction_id = mfr->transaction_id;
+	} else if (code == MTP_SEND_FILE) {
+		work = &dev->send_file_work;
+		dev->xfer_send_header = 0;
+	} else {
+		work = &dev->receive_file_work;
+	}
+
+	/* We do the file transfer on a work queue so it will run
+	 * in kernel context, which is necessary for vfs_read and
+	 * vfs_write to use our buffers in the kernel address space.
+	 */
+	queue_work(dev->wq, work);
+	/* wait for operation to complete */
+	flush_workqueue(dev->wq);
+	fput(filp);
+
+	/* read the result */
+	smp_rmb();
+	ret = dev->xfer_result;
+
+fail:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		ret = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+out:
+	mtp_unlock(&dev->ioctl_excl);
+	DBG(dev->cdev, "ioctl returning %d\n", ret);
+	return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned int code, unsigned long value)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct mtp_file_range	mfr;
+	struct mtp_event	event;
+	int ret = -EINVAL;
+
+	switch (code) {
+	case MTP_SEND_FILE:
+	case MTP_RECEIVE_FILE:
+	case MTP_SEND_FILE_WITH_HEADER:
+		if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		ret = mtp_send_receive_ioctl(fp, code, &mfr);
+	break;
+	case MTP_SEND_EVENT:
+		if (mtp_lock(&dev->ioctl_excl))
+			return -EBUSY;
+		/* return here so we don't change dev->state below,
+		 * which would interfere with bulk transfer state.
+		 */
+		if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+			ret = -EFAULT;
+		else
+			ret = mtp_send_event(dev, &event);
+		mtp_unlock(&dev->ioctl_excl);
+	break;
+	default:
+		DBG(dev->cdev, "unknown ioctl code: %d\n", code);
+	}
+fail:
+	return ret;
+}
+
+/*
+ * 32 bit userspace calling into 64 bit kernel. handle ioctl code
+ * and userspace pointer
+ */
+#ifdef CONFIG_COMPAT
+static long compat_mtp_ioctl(struct file *fp, unsigned int code,
+	unsigned long value)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct mtp_file_range	mfr;
+	struct __compat_mtp_file_range	cmfr;
+	struct mtp_event	event;
+	struct __compat_mtp_event cevent;
+	unsigned int cmd;
+	bool send_file = false;
+	int ret = -EINVAL;
+
+	switch (code) {
+	case COMPAT_MTP_SEND_FILE:
+		cmd = MTP_SEND_FILE;
+		send_file = true;
+		break;
+	case COMPAT_MTP_RECEIVE_FILE:
+		cmd = MTP_RECEIVE_FILE;
+		send_file = true;
+		break;
+	case COMPAT_MTP_SEND_FILE_WITH_HEADER:
+		cmd = MTP_SEND_FILE_WITH_HEADER;
+		send_file = true;
+		break;
+	case COMPAT_MTP_SEND_EVENT:
+		cmd = MTP_SEND_EVENT;
+		break;
+	default:
+		DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
+		ret = -ENOIOCTLCMD;
+		goto fail;
+	}
+
+	if (send_file) {
+		if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		mfr.fd = cmfr.fd;
+		mfr.offset = cmfr.offset;
+		mfr.length = cmfr.length;
+		mfr.command = cmfr.command;
+		mfr.transaction_id = cmfr.transaction_id;
+		ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
+	} else {
+		if (mtp_lock(&dev->ioctl_excl))
+			return -EBUSY;
+		/* return here so we don't change dev->state below,
+		 * which would interfere with bulk transfer state.
+		 */
+		if (copy_from_user(&cevent, (void __user *)value,
+			sizeof(cevent))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		event.length = cevent.length;
+		event.data = compat_ptr(cevent.data);
+		ret = mtp_send_event(dev, &event);
+		mtp_unlock(&dev->ioctl_excl);
+	}
+fail:
+	return ret;
+}
+#endif
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_open\n");
+	if (mtp_lock(&_mtp_dev->open_excl)) {
+		pr_err("%s mtp_release not called returning EBUSY\n", __func__);
+		return -EBUSY;
+	}
+
+	/* clear any error condition */
+	if (_mtp_dev->state != STATE_OFFLINE)
+		_mtp_dev->state = STATE_READY;
+
+	fp->private_data = _mtp_dev;
+	return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_release\n");
+
+	mtp_unlock(&_mtp_dev->open_excl);
+	return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+	.owner = THIS_MODULE,
+	.read = mtp_read,
+	.write = mtp_write,
+	.unlocked_ioctl = mtp_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_mtp_ioctl,
+#endif
+	.open = mtp_open,
+	.release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = mtp_shortname,
+	.fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int	value = -EOPNOTSUPP;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long	flags;
+
+	VDBG(cdev, "mtp_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	/* Handle MTP OS string */
+	if (ctrl->bRequestType ==
+			(USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+			&& ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+			&& (w_value >> 8) == USB_DT_STRING
+			&& (w_value & 0xFF) == MTP_OS_STRING_ID) {
+		value = (w_length < sizeof(mtp_os_string)
+				? w_length : sizeof(mtp_os_string));
+		memcpy(cdev->req->buf, mtp_os_string, value);
+	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+		/* Handle MTP OS descriptor */
+		DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == 1
+				&& (ctrl->bRequestType & USB_DIR_IN)
+				&& (w_index == 4 || w_index == 5)) {
+			value = (w_length < sizeof(mtp_ext_config_desc) ?
+					w_length : sizeof(mtp_ext_config_desc));
+			memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+
+			/* update compatibleID if PTP */
+			if (dev->function.fs_descriptors == fs_ptp_descs) {
+				struct mtp_ext_config_desc *d = cdev->req->buf;
+
+				d->function.compatibleID[0] = 'P';
+			}
+		}
+	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+				&& w_value == 0) {
+			DBG(cdev, "MTP_REQ_CANCEL\n");
+
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->state == STATE_BUSY) {
+				dev->state = STATE_CANCELED;
+				wake_up(&dev->read_wq);
+				wake_up(&dev->write_wq);
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			/* We need to queue a request to read the remaining
+			 *  bytes, but we don't actually need to look at
+			 * the contents.
+			 */
+			value = w_length;
+		} else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+				&& w_index == 0 && w_value == 0) {
+			struct mtp_device_status *status = cdev->req->buf;
+
+			status->wLength =
+				__constant_cpu_to_le16(sizeof(*status));
+
+			DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			/* device status is "busy" until we report
+			 * the cancelation to userspace
+			 */
+			if (dev->state == STATE_CANCELED)
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+			else
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_OK);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			value = sizeof(*status);
+		}
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		int rc;
+
+		cdev->req->zero = value < w_length;
+		cdev->req->length = value;
+		rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (rc < 0)
+			ERROR(cdev, "%s: response queue error\n", __func__);
+	}
+	return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct mtp_dev	*dev = func_to_mtp(f);
+	int			id;
+	int			ret;
+	struct mtp_instance *fi_mtp;
+
+	dev->cdev = cdev;
+	DBG(cdev, "%s dev: %pK\n", __func__, dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	mtp_interface_desc.bInterfaceNumber = id;
+
+	if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+		ret = usb_string_id(c->cdev);
+		if (ret < 0)
+			return ret;
+		mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+		mtp_interface_desc.iInterface = ret;
+	}
+
+	fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
+
+	if (cdev->use_os_string) {
+		f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+					GFP_KERNEL);
+		if (!f->os_desc_table)
+			return -ENOMEM;
+		f->os_desc_n = 1;
+		f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
+	}
+
+	/* allocate endpoints */
+	ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+			&mtp_fullspeed_out_desc, &mtp_intr_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		mtp_highspeed_in_desc.bEndpointAddress =
+			mtp_fullspeed_in_desc.bEndpointAddress;
+		mtp_highspeed_out_desc.bEndpointAddress =
+			mtp_fullspeed_out_desc.bEndpointAddress;
+	}
+	/* support super speed hardware */
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		unsigned max_burst;
+
+		/* Calculate bMaxBurst, we know packet size is 1024 */
+		max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
+		mtp_ss_in_desc.bEndpointAddress =
+			mtp_fullspeed_in_desc.bEndpointAddress;
+		mtp_ss_in_comp_desc.bMaxBurst = max_burst;
+		mtp_ss_out_desc.bEndpointAddress =
+			mtp_fullspeed_out_desc.bEndpointAddress;
+		mtp_ss_out_comp_desc.bMaxBurst = max_burst;
+	}
+
+	fi_mtp->func_inst.f = &dev->function;
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+		gadget_is_superspeed(c->cdev->gadget) ? "super" :
+		(gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
+		f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct mtp_instance *fi_mtp;
+	struct usb_request *req;
+	int i;
+	fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
+	mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
+	mutex_lock(&dev->read_mutex);
+	while ((req = mtp_req_get(dev, &dev->tx_idle)))
+		mtp_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		mtp_request_free(dev->rx_req[i], dev->ep_out);
+	while ((req = mtp_req_get(dev, &dev->intr_idle)))
+		mtp_request_free(req, dev->ep_intr);
+	mutex_unlock(&dev->read_mutex);
+	spin_lock_irq(&dev->lock);
+	dev->state = STATE_OFFLINE;
+	dev->cdev = NULL;
+	spin_unlock_irq(&dev->lock);
+	kfree(f->os_desc_table);
+	f->os_desc_n = 0;
+	fi_mtp->func_inst.f = NULL;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_intr);
+	if (ret) {
+		usb_ep_disable(dev->ep_out);
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+	dev->state = STATE_READY;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "mtp_function_disable\n");
+	spin_lock_irq(&dev->lock);
+	dev->state = STATE_OFFLINE;
+	spin_unlock_irq(&dev->lock);
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+	usb_ep_disable(dev->ep_intr);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int debug_mtp_read_stats(struct seq_file *s, void *unused)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int i;
+	unsigned long flags;
+	unsigned int min, max = 0, sum = 0, iteration = 0;
+
+	seq_puts(s, "\n=======================\n");
+	seq_puts(s, "MTP Write Stats:\n");
+	seq_puts(s, "\n=======================\n");
+	spin_lock_irqsave(&dev->lock, flags);
+	min = dev->perf[0].vfs_wtime;
+	for (i = 0; i < MAX_ITERATION; i++) {
+		seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
+				dev->perf[i].vfs_wbytes,
+				dev->perf[i].vfs_wtime);
+		if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
+			sum += dev->perf[i].vfs_wtime;
+			if (min > dev->perf[i].vfs_wtime)
+				min = dev->perf[i].vfs_wtime;
+			if (max < dev->perf[i].vfs_wtime)
+				max = dev->perf[i].vfs_wtime;
+			iteration++;
+		}
+	}
+
+	seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
+						min, max, sum / iteration);
+	min = max = sum = iteration = 0;
+	seq_puts(s, "\n=======================\n");
+	seq_puts(s, "MTP Read Stats:\n");
+	seq_puts(s, "\n=======================\n");
+
+	min = dev->perf[0].vfs_rtime;
+	for (i = 0; i < MAX_ITERATION; i++) {
+		seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
+				dev->perf[i].vfs_rbytes,
+				dev->perf[i].vfs_rtime);
+		if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
+			sum += dev->perf[i].vfs_rtime;
+			if (min > dev->perf[i].vfs_rtime)
+				min = dev->perf[i].vfs_rtime;
+			if (max < dev->perf[i].vfs_rtime)
+				max = dev->perf[i].vfs_rtime;
+			iteration++;
+		}
+	}
+
+	seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
+						min, max, sum / iteration);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	int clear_stats;
+	unsigned long flags;
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (buf == NULL) {
+		pr_err("[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
+		pr_err("Wrong value. To clear stats, enter value as 0.\n");
+		goto done;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
+	dev->dbg_read_index = 0;
+	dev->dbg_write_index = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
+done:
+	return count;
+}
+
+static int debug_mtp_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debug_mtp_read_stats, inode->i_private);
+}
+
+static const struct file_operations debug_mtp_ops = {
+	.open = debug_mtp_open,
+	.read = seq_read,
+	.write = debug_mtp_reset_stats,
+};
+
+struct dentry *dent_mtp;
+static void mtp_debugfs_init(void)
+{
+	struct dentry *dent_mtp_status;
+
+	dent_mtp = debugfs_create_dir("usb_mtp", 0);
+	if (!dent_mtp || IS_ERR(dent_mtp))
+		return;
+
+	dent_mtp_status = debugfs_create_file("status", 0644,
+					dent_mtp, 0, &debug_mtp_ops);
+	if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
+		debugfs_remove(dent_mtp);
+		dent_mtp = NULL;
+		return;
+	}
+}
+
+static void mtp_debugfs_remove(void)
+{
+	debugfs_remove_recursive(dent_mtp);
+}
+
+static int __mtp_setup(struct mtp_instance *fi_mtp)
+{
+	struct mtp_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+	if (fi_mtp != NULL)
+		fi_mtp->dev = dev;
+
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	init_waitqueue_head(&dev->intr_wq);
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->ioctl_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->intr_idle);
+
+	dev->wq = create_singlethread_workqueue("f_mtp");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+	INIT_WORK(&dev->send_file_work, send_file_work);
+	INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+	_mtp_dev = dev;
+
+	ret = misc_register(&mtp_device);
+	if (ret)
+		goto err2;
+
+	mtp_debugfs_init();
+	return 0;
+
+err2:
+	destroy_workqueue(dev->wq);
+err1:
+	_mtp_dev = NULL;
+	kfree(dev);
+	printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+	return ret;
+}
+
+static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
+{
+	return __mtp_setup(fi_mtp);
+}
+
+
+static void mtp_cleanup(void)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (!dev)
+		return;
+
+	mtp_debugfs_remove();
+	misc_deregister(&mtp_device);
+	destroy_workqueue(dev->wq);
+	_mtp_dev = NULL;
+	kfree(dev);
+}
+
+static struct mtp_instance *to_mtp_instance(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct mtp_instance,
+		func_inst.group);
+}
+
+static void mtp_attr_release(struct config_item *item)
+{
+	struct mtp_instance *fi_mtp = to_mtp_instance(item);
+
+	usb_put_function_instance(&fi_mtp->func_inst);
+}
+
+static struct configfs_item_operations mtp_item_ops = {
+	.release        = mtp_attr_release,
+};
+
+static struct config_item_type mtp_func_type = {
+	.ct_item_ops    = &mtp_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+
+static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
+{
+	return container_of(fi, struct mtp_instance, func_inst);
+}
+
+static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
+{
+	struct mtp_instance *fi_mtp;
+	char *ptr;
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ptr = kstrndup(name, name_len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	fi_mtp = to_fi_mtp(fi);
+	fi_mtp->name = ptr;
+
+	return 0;
+}
+
+static void mtp_free_inst(struct usb_function_instance *fi)
+{
+	struct mtp_instance *fi_mtp;
+
+	fi_mtp = to_fi_mtp(fi);
+	kfree(fi_mtp->name);
+	mtp_cleanup();
+	kfree(fi_mtp);
+}
+
+struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
+{
+	struct mtp_instance *fi_mtp;
+	int ret = 0;
+	struct usb_os_desc *descs[1];
+	char *names[1];
+
+	fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
+	if (!fi_mtp)
+		return ERR_PTR(-ENOMEM);
+	fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
+	fi_mtp->func_inst.free_func_inst = mtp_free_inst;
+
+	fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
+	INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
+	descs[0] = &fi_mtp->mtp_os_desc;
+	names[0] = "MTP";
+
+	if (mtp_config) {
+		ret = mtp_setup_configfs(fi_mtp);
+		if (ret) {
+			kfree(fi_mtp);
+			pr_err("Error setting MTP\n");
+			return ERR_PTR(ret);
+		}
+	} else
+		fi_mtp->dev = _mtp_dev;
+
+	config_group_init_type_name(&fi_mtp->func_inst.group,
+					"", &mtp_func_type);
+	usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
+					descs, names, THIS_MODULE);
+
+	mutex_init(&fi_mtp->dev->read_mutex);
+
+	return  &fi_mtp->func_inst;
+}
+EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
+
+static struct usb_function_instance *mtp_alloc_inst(void)
+{
+		return alloc_inst_mtp_ptp(true);
+}
+
+static int mtp_ctrlreq_configfs(struct usb_function *f,
+				const struct usb_ctrlrequest *ctrl)
+{
+	return mtp_ctrlrequest(f->config->cdev, ctrl);
+}
+
+static void mtp_free(struct usb_function *f)
+{
+	/*NO-OP: no function specific resource allocation in mtp_alloc*/
+}
+
+struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
+					bool mtp_config)
+{
+	struct mtp_instance *fi_mtp = to_fi_mtp(fi);
+	struct mtp_dev *dev;
+
+	/*
+	 * PTP piggybacks on MTP function so make sure we have
+	 * created MTP function before we associate this PTP
+	 * function with a gadget configuration.
+	 */
+	if (fi_mtp->dev == NULL) {
+		pr_err("Error: Create MTP function before linking"
+				" PTP function with a gadget configuration\n");
+		pr_err("\t1: Delete existing PTP function if any\n");
+		pr_err("\t2: Create MTP function\n");
+		pr_err("\t3: Create and symlink PTP function"
+				" with a gadget configuration\n");
+		return ERR_PTR(-EINVAL); /* Invalid Configuration */
+	}
+
+	dev = fi_mtp->dev;
+	dev->function.name = DRIVER_NAME;
+	dev->function.strings = mtp_strings;
+	if (mtp_config) {
+		dev->function.fs_descriptors = fs_mtp_descs;
+		dev->function.hs_descriptors = hs_mtp_descs;
+		dev->function.ss_descriptors = ss_mtp_descs;
+		dev->function.ssp_descriptors = ss_mtp_descs;
+	} else {
+		dev->function.fs_descriptors = fs_ptp_descs;
+		dev->function.hs_descriptors = hs_ptp_descs;
+		dev->function.ss_descriptors = ss_ptp_descs;
+		dev->function.ssp_descriptors = ss_ptp_descs;
+	}
+	dev->function.bind = mtp_function_bind;
+	dev->function.unbind = mtp_function_unbind;
+	dev->function.set_alt = mtp_function_set_alt;
+	dev->function.disable = mtp_function_disable;
+	dev->function.setup = mtp_ctrlreq_configfs;
+	dev->function.free_func = mtp_free;
+	fi->f = &dev->function;
+
+	return &dev->function;
+}
+EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
+
+static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
+{
+	return function_alloc_mtp_ptp(fi, true);
+}
+
+DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_mtp.h b/drivers/usb/gadget/function/f_mtp.h
new file mode 100644
index 0000000..7adb1ff
--- /dev/null
+++ b/drivers/usb/gadget/function/f_mtp.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Badhri Jagan Sridharan <badhri@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+extern struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config);
+extern struct usb_function *function_alloc_mtp_ptp(
+			struct usb_function_instance *fi, bool mtp_config);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 19556f0..97cce3b 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1580,10 +1580,58 @@
 	.ct_owner	= THIS_MODULE,
 };
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+
+struct ncm_setup_desc {
+	struct work_struct work;
+	struct device *device;
+	uint8_t major; // Mirror Link major version
+	uint8_t minor; // Mirror Link minor version
+};
+
+static struct ncm_setup_desc *_ncm_setup_desc;
+
+#define MIRROR_LINK_STRING_LENGTH_MAX 32
+static void ncm_setup_work(struct work_struct *data)
+{
+	char mirror_link_string[MIRROR_LINK_STRING_LENGTH_MAX];
+	char *envp[2] = { mirror_link_string, NULL };
+
+	snprintf(mirror_link_string, MIRROR_LINK_STRING_LENGTH_MAX,
+		"MirrorLink=V%d.%d",
+		_ncm_setup_desc->major, _ncm_setup_desc->minor);
+	kobject_uevent_env(&_ncm_setup_desc->device->kobj, KOBJ_CHANGE, envp);
+}
+
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+			const struct usb_ctrlrequest *ctrl)
+{
+	int value = -EOPNOTSUPP;
+
+	if (ctrl->bRequestType == 0x40 && ctrl->bRequest == 0xF0
+			&& _ncm_setup_desc) {
+		_ncm_setup_desc->minor = (uint8_t)(ctrl->wValue >> 8);
+		_ncm_setup_desc->major = (uint8_t)(ctrl->wValue & 0xFF);
+		schedule_work(&_ncm_setup_desc->work);
+		value = 0;
+	}
+
+	return value;
+}
+#endif
+
 static void ncm_free_inst(struct usb_function_instance *f)
 {
 	struct f_ncm_opts *opts;
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	cancel_work_sync(&_ncm_setup_desc->work);
+	/* release _ncm_setup_desc related resource */
+	device_destroy(_ncm_setup_desc->device->class,
+		_ncm_setup_desc->device->devt);
+	kfree(_ncm_setup_desc);
+#endif
+
 	opts = container_of(f, struct f_ncm_opts, func_inst);
 	if (opts->bound)
 		gether_cleanup(netdev_priv(opts->net));
@@ -1602,6 +1650,14 @@
 
 	config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	_ncm_setup_desc = kzalloc(sizeof(*_ncm_setup_desc), GFP_KERNEL);
+	if (!_ncm_setup_desc)
+		return ERR_PTR(-ENOMEM);
+	INIT_WORK(&_ncm_setup_desc->work, ncm_setup_work);
+	_ncm_setup_desc->device = create_function_device("f_ncm");
+#endif
+
 	return &opts->func_inst;
 }
 
@@ -1626,6 +1682,8 @@
 
 	DBG(c->cdev, "ncm unbind\n");
 
+	opts->bound = false;
+
 	hrtimer_cancel(&ncm->task_timer);
 
 	ncm_string_defs[0].id = 0;
@@ -1635,7 +1693,6 @@
 	usb_ep_free_request(ncm->notify, ncm->notify_req);
 
 	gether_cleanup(netdev_priv(opts->net));
-	opts->bound = false;
 }
 
 static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
diff --git a/drivers/usb/gadget/function/f_ptp.c b/drivers/usb/gadget/function/f_ptp.c
new file mode 100644
index 0000000..da3e4d5
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ptp.c
@@ -0,0 +1,38 @@
+/*
+ * Gadget Function Driver for PTP
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Badhri Jagan Sridharan <badhri@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#include "f_mtp.h"
+
+static struct usb_function_instance *ptp_alloc_inst(void)
+{
+	return alloc_inst_mtp_ptp(false);
+}
+
+static struct usb_function *ptp_alloc(struct usb_function_instance *fi)
+{
+	return function_alloc_mtp_ptp(fi, false);
+}
+
+DECLARE_USB_FUNCTION_INIT(ptp, ptp_alloc_inst, ptp_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Badhri Jagan Sridharan");
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 8a8998b..cbb0959 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -2,7 +2,7 @@
 /*
  * f_qdss.c -- QDSS function Driver
  *
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/init.h>
@@ -415,11 +415,13 @@
 	qdss_data_intf_desc.bInterfaceNumber = iface;
 	qdss->data_iface_id = iface;
 
-	id = usb_string_id(c->cdev);
-	if (id < 0)
-		return id;
-	qdss_string_defs[QDSS_DATA_IDX].id = id;
-	qdss_data_intf_desc.iInterface = id;
+	if (!qdss_string_defs[QDSS_DATA_IDX].id) {
+		id = usb_string_id(c->cdev);
+		if (id < 0)
+			return id;
+		qdss_string_defs[QDSS_DATA_IDX].id = id;
+		qdss_data_intf_desc.iInterface = id;
+	}
 
 	if (qdss->debug_inface_enabled) {
 		/* Allocate ctrl I/F */
@@ -430,11 +432,14 @@
 		}
 		qdss_ctrl_intf_desc.bInterfaceNumber = iface;
 		qdss->ctrl_iface_id = iface;
-		id = usb_string_id(c->cdev);
-		if (id < 0)
-			return id;
-		qdss_string_defs[QDSS_CTRL_IDX].id = id;
-		qdss_ctrl_intf_desc.iInterface = id;
+
+		if (!qdss_string_defs[QDSS_CTRL_IDX].id) {
+			id = usb_string_id(c->cdev);
+			if (id < 0)
+				return id;
+			qdss_string_defs[QDSS_CTRL_IDX].id = id;
+			qdss_ctrl_intf_desc.iInterface = id;
+		}
 	}
 
 	/* for non-accelerated path keep tx fifo size 1k */
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 9cdef10..ed68a48 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -838,7 +838,7 @@
 
 	ss = kzalloc(sizeof(*ss), GFP_KERNEL);
 	if (!ss)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	ss_opts =  container_of(fi, struct f_ss_opts, func_inst);
 
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index b0cf25c..959f666 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -32,6 +32,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int result;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
 		mutex_unlock(&opts->lock);				\
@@ -45,6 +50,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int ret;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		if (opts->refcnt) {					\
 			mutex_unlock(&opts->lock);			\
@@ -67,6 +77,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int result;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
 		mutex_unlock(&opts->lock);				\
@@ -80,6 +95,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int ret;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		if (opts->refcnt) {					\
 			mutex_unlock(&opts->lock);			\
@@ -102,6 +122,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		unsigned qmult;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		qmult = gether_get_qmult(opts->net);			\
 		mutex_unlock(&opts->lock);				\
@@ -115,6 +140,11 @@
 		u8 val;							\
 		int ret;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		if (opts->refcnt) {					\
 			ret = -EBUSY;					\
@@ -141,6 +171,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int ret;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		ret = gether_get_ifname(opts->net, page, PAGE_SIZE);	\
 		mutex_unlock(&opts->lock);				\
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index c3aba4d..0856ca3 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -18,6 +18,7 @@
 #include <linux/mutex.h>
 #include <linux/workqueue.h>
 #include <linux/refcount.h>
+#include <linux/ipc_logging.h>
 
 #ifdef VERBOSE_DEBUG
 #ifndef pr_vdebug
@@ -285,6 +286,8 @@
 	 * destroyed by ffs_epfiles_destroy().
 	 */
 	struct ffs_epfile		*epfiles;
+
+	void				*ipc_log;
 };
 
 
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index 67324f9..785bda0 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -30,4 +30,8 @@
 	int				refcnt;
 };
 
+extern struct device *create_function_device(char *name);
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+		const struct usb_ctrlrequest *ctrl);
+
 #endif /* U_NCM_H */
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 660878a..b77f312 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2083,7 +2083,7 @@
 #if defined(PLX_PCI_RDK2)
 	/* see if PCI int for us by checking irqstat */
 	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
-	if (!intcsr & (1 << NET2272_PCI_IRQ)) {
+	if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
 		spin_unlock(&dev->lock);
 		return IRQ_NONE;
 	}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 67d8a50..fea02c7 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -358,6 +358,7 @@
 	bool extcon_host;		/* check id and set EXTCON_USB_HOST */
 	bool extcon_usb;		/* check vbus and set EXTCON_USB */
 	bool forced_b_device;
+	bool start_to_connect;
 };
 
 #define gadget_to_renesas_usb3(_gadget)	\
@@ -476,7 +477,8 @@
 static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
 {
 	usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
-	usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
+	if (!usb3->workaround_for_vbus)
+		usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
 }
 
 static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3)
@@ -700,8 +702,7 @@
 	usb3_set_mode_by_role_sw(usb3, host);
 	usb3_vbus_out(usb3, a_dev);
 	/* for A-Peripheral or forced B-device mode */
-	if ((!host && a_dev) ||
-	    (usb3->workaround_for_vbus && usb3->forced_b_device))
+	if ((!host && a_dev) || usb3->start_to_connect)
 		usb3_connect(usb3);
 	spin_unlock_irqrestore(&usb3->lock, flags);
 }
@@ -2432,7 +2433,11 @@
 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
 		return -EFAULT;
 
-	if (!strncmp(buf, "1", 1))
+	usb3->start_to_connect = false;
+	if (usb3->workaround_for_vbus && usb3->forced_b_device &&
+	    !strncmp(buf, "2", 1))
+		usb3->start_to_connect = true;
+	else if (!strncmp(buf, "1", 1))
 		usb3->forced_b_device = true;
 	else
 		usb3->forced_b_device = false;
@@ -2440,7 +2445,7 @@
 	if (usb3->workaround_for_vbus)
 		usb3_disconnect(usb3);
 
-	/* Let this driver call usb3_connect() anyway */
+	/* Let this driver call usb3_connect() if needed */
 	usb3_check_id(usb3);
 
 	return count;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 984892d..42668ae 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1979,6 +1979,8 @@
 
 static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
 				      struct usb_host_endpoint *hep)
+__acquires(r8a66597->lock)
+__releases(r8a66597->lock)
 {
 	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
 	struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
@@ -1991,13 +1993,14 @@
 		return;
 	pipenum = pipe->info.pipenum;
 
+	spin_lock_irqsave(&r8a66597->lock, flags);
 	if (pipenum == 0) {
 		kfree(hep->hcpriv);
 		hep->hcpriv = NULL;
+		spin_unlock_irqrestore(&r8a66597->lock, flags);
 		return;
 	}
 
-	spin_lock_irqsave(&r8a66597->lock, flags);
 	pipe_stop(r8a66597, pipe);
 	pipe_irq_disable(r8a66597, pipenum);
 	disable_irq_empty(r8a66597, pipenum);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 61b5e41..53184b6 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1659,7 +1659,8 @@
 		portsc_buf[port_index] = 0;
 
 		/* Bail out if a USB3 port has a new device in link training */
-		if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+		if ((hcd->speed >= HCD_USB3) &&
+		    (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
 			bus_state->bus_suspended = 0;
 			spin_unlock_irqrestore(&xhci->lock, flags);
 			xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 366fd63d..9b8d910 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2471,7 +2471,7 @@
 	if (!*er)
 		return -ENOMEM;
 
-	ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
+	ret = xhci_alloc_erst(xhci, *er, erst, flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 09bf6b4..1493d0f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -187,6 +187,7 @@
 		xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
 	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
 		xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3c556ae..3a0a47f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1869,6 +1869,8 @@
 	unsigned		sw_lpm_support:1;
 	/* support xHCI 1.0 spec USB2 hardware LPM */
 	unsigned		hw_lpm_support:1;
+	/* Broken Suspend flag for SNPS Suspend resume issue */
+	unsigned		broken_suspend:1;
 	/* cached usb2 extened protocol capabilites */
 	u32                     *ext_caps;
 	unsigned int            num_ext_caps;
@@ -1886,8 +1888,6 @@
 	void			*dbc;
 	/* platform-specific data -- must come last */
 	unsigned long		priv[0] __aligned(sizeof(s64));
-	/* Broken Suspend flag for SNPS Suspend resume issue */
-	u8			broken_suspend;
 };
 
 /* Platform specific overrides to generic XHCI hc_driver ops */
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index d045d84..48d10a6 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -578,8 +578,10 @@
 	if (mtu->is_u3_ip) {
 		/* disable LGO_U1/U2 by default */
 		mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
-				SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE |
 				SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
+		/* enable accept LGO_U1/U2 link command from host */
+		mtu3_setbits(mbase, U3D_LINK_POWER_CONTROL,
+				SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE);
 		/* device responses to u3_exit from host automatically */
 		mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
 		/* automatically build U2 link when U3 detect fail */
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 25216e7..3c464d8 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -336,9 +336,9 @@
 
 		lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
 		if (set)
-			lpc |= SW_U1_ACCEPT_ENABLE;
+			lpc |= SW_U1_REQUEST_ENABLE;
 		else
-			lpc &= ~SW_U1_ACCEPT_ENABLE;
+			lpc &= ~SW_U1_REQUEST_ENABLE;
 		mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
 
 		mtu->u1_enable = !!set;
@@ -351,9 +351,9 @@
 
 		lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
 		if (set)
-			lpc |= SW_U2_ACCEPT_ENABLE;
+			lpc |= SW_U2_REQUEST_ENABLE;
 		else
-			lpc &= ~SW_U2_ACCEPT_ENABLE;
+			lpc &= ~SW_U2_REQUEST_ENABLE;
 		mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
 
 		mtu->u2_enable = !!set;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 23a0df7..403eb9791 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -181,9 +181,11 @@
 
 	musb_writel(reg_base, wrp->epintr_set, epmask);
 	musb_writel(reg_base, wrp->coreintr_set, coremask);
-	/* start polling for ID change in dual-role idle mode */
-	if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
-			musb->port_mode == MUSB_OTG)
+	/*
+	 * start polling for runtime PM active and idle,
+	 * and for ID change in dual-role idle mode.
+	 */
+	if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
 		dsps_mod_timer(glue, -1);
 }
 
@@ -227,8 +229,13 @@
 
 	switch (musb->xceiv->otg->state) {
 	case OTG_STATE_A_WAIT_VRISE:
-		dsps_mod_timer_optional(glue);
-		break;
+		if (musb->port_mode == MUSB_HOST) {
+			musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
+			dsps_mod_timer_optional(glue);
+			break;
+		}
+		/* fall through */
+
 	case OTG_STATE_A_WAIT_BCON:
 		/* keep VBUS on for host-only mode */
 		if (musb->port_mode == MUSB_HOST) {
@@ -249,6 +256,10 @@
 				musb->xceiv->otg->state = OTG_STATE_A_IDLE;
 				MUSB_HST_MODE(musb);
 			}
+
+			if (musb->port_mode == MUSB_PERIPHERAL)
+				skip_session = 1;
+
 			if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
 				musb_writeb(mregs, MUSB_DEVCTL,
 					    MUSB_DEVCTL_SESSION);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index eae8b1b..ffe462a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -452,13 +452,10 @@
 	}
 
 	if (request) {
-		u8	is_dma = 0;
-		bool	short_packet = false;
 
 		trace_musb_req_tx(req);
 
 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
-			is_dma = 1;
 			csr |= MUSB_TXCSR_P_WZC_BITS;
 			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
 				 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
@@ -476,16 +473,8 @@
 		 */
 		if ((request->zero && request->length)
 			&& (request->length % musb_ep->packet_sz == 0)
-			&& (request->actual == request->length))
-				short_packet = true;
+			&& (request->actual == request->length)) {
 
-		if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
-			(is_dma && (!dma->desired_mode ||
-				(request->actual &
-					(musb_ep->packet_sz - 1)))))
-				short_packet = true;
-
-		if (short_packet) {
 			/*
 			 * On DMA completion, FIFO may not be
 			 * available yet...
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a688f7f..5fc6825 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -346,12 +346,10 @@
 				channel->status = MUSB_DMA_STATUS_FREE;
 
 				/* completed */
-				if ((devctl & MUSB_DEVCTL_HM)
-					&& (musb_channel->transmit)
-					&& ((channel->desired_mode == 0)
-					    || (channel->actual_len &
-					    (musb_channel->max_packet_sz - 1)))
-				    ) {
+				if (musb_channel->transmit &&
+					(!channel->desired_mode ||
+					(channel->actual_len %
+					    musb_channel->max_packet_sz))) {
 					u8  epnum  = musb_channel->epnum;
 					int offset = musb->io.ep_offset(epnum,
 								    MUSB_TXCSR);
@@ -363,11 +361,14 @@
 					 */
 					musb_ep_select(mbase, epnum);
 					txcsr = musb_readw(mbase, offset);
-					txcsr &= ~(MUSB_TXCSR_DMAENAB
+					if (channel->desired_mode == 1) {
+						txcsr &= ~(MUSB_TXCSR_DMAENAB
 							| MUSB_TXCSR_AUTOSET);
-					musb_writew(mbase, offset, txcsr);
-					/* Send out the packet */
-					txcsr &= ~MUSB_TXCSR_DMAMODE;
+						musb_writew(mbase, offset, txcsr);
+						/* Send out the packet */
+						txcsr &= ~MUSB_TXCSR_DMAMODE;
+						txcsr |= MUSB_TXCSR_DMAENAB;
+					}
 					txcsr |=  MUSB_TXCSR_TXPKTRDY;
 					musb_writew(mbase, offset, txcsr);
 				}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index b9f75960..30ab415 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -457,6 +457,7 @@
 	struct mutex		svid_handler_lock;
 	struct list_head	svid_handlers;
 	ktime_t			svdm_start_time;
+	bool			vdm_in_suspend;
 
 	struct list_head	instance;
 
@@ -659,15 +660,21 @@
 {
 	struct usbpd_svid_handler *handler;
 
-	mutex_lock(&pd->svid_handler_lock);
+	/* in_interrupt() == true when handling VDM RX during suspend */
+	if (!in_interrupt())
+		mutex_lock(&pd->svid_handler_lock);
+
 	list_for_each_entry(handler, &pd->svid_handlers, entry) {
 		if (svid == handler->svid) {
-			mutex_unlock(&pd->svid_handler_lock);
+			if (!in_interrupt())
+				mutex_unlock(&pd->svid_handler_lock);
 			return handler;
 		}
 	}
 
-	mutex_unlock(&pd->svid_handler_lock);
+	if (!in_interrupt())
+		mutex_unlock(&pd->svid_handler_lock);
+
 	return NULL;
 }
 
@@ -1074,6 +1081,8 @@
 	return rx_msg;	/* queue it for usbpd_sm */
 }
 
+static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg);
+
 static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
 		u8 *buf, size_t len)
 {
@@ -1146,6 +1155,13 @@
 			return;
 	}
 
+	if (pd->vdm_in_suspend && msg_type == MSG_VDM) {
+		usbpd_dbg(&pd->dev, "Skip wq and handle VDM directly\n");
+		handle_vdm_rx(pd, rx_msg);
+		kfree(rx_msg);
+		return;
+	}
+
 	spin_lock_irqsave(&pd->rx_lock, flags);
 	list_add_tail(&rx_msg->entry, &pd->rx_q);
 	spin_unlock_irqrestore(&pd->rx_lock, flags);
@@ -1324,6 +1340,7 @@
 
 	/* VDM will get sent in PE_SRC/SNK_READY state handling */
 	pd->vdm_tx = vdm_tx;
+	pd->vdm_in_suspend = false;
 
 	/* slight delay before queuing to prioritize handling of incoming VDM */
 	if (pd->in_explicit_contract)
@@ -1346,6 +1363,14 @@
 }
 EXPORT_SYMBOL(usbpd_send_svdm);
 
+void usbpd_vdm_in_suspend(struct usbpd *pd, bool in_suspend)
+{
+	usbpd_dbg(&pd->dev, "VDM in_suspend:%d\n", in_suspend);
+
+	pd->vdm_in_suspend = in_suspend;
+}
+EXPORT_SYMBOL(usbpd_vdm_in_suspend);
+
 static void handle_vdm_resp_ack(struct usbpd *pd, u32 *vdos, u8 num_vdos,
 	u16 vdm_hdr)
 {
@@ -1529,6 +1554,10 @@
 		return;
 	}
 
+	if (cmd_type != SVDM_CMD_TYPE_INITIATOR &&
+			pd->current_state != PE_SRC_STARTUP_WAIT_FOR_VDM_RESP)
+		start_src_ams(pd, false);
+
 	if (handler && handler->svdm_received) {
 		handler->svdm_received(handler, cmd, cmd_type, vdos, num_vdos);
 
@@ -1685,6 +1714,7 @@
 	kfree(pd->vdm_tx);
 	pd->vdm_tx = NULL;
 	pd->ss_lane_svid = 0x0;
+	pd->vdm_in_suspend = false;
 }
 
 static void handle_get_src_cap_extended(struct usbpd *pd)
@@ -2300,7 +2330,7 @@
 
 	pd->in_explicit_contract = true;
 
-	if (pd->vdm_tx)
+	if (pd->vdm_tx && !pd->sm_queued)
 		kick_sm(pd, 0);
 	else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
 		usbpd_send_svdm(pd, USBPD_SID,
@@ -2361,8 +2391,6 @@
 		}
 
 		vconn_swap(pd);
-		if (!pd->vdm_tx)
-			start_src_ams(pd, false);
 	} else if (IS_DATA(rx_msg, MSG_VDM)) {
 		handle_vdm_rx(pd, rx_msg);
 	} else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP_EXTENDED)) {
@@ -3746,12 +3774,6 @@
 		return -EAGAIN;
 	}
 
-	if (pd->current_state == PE_SNK_READY &&
-			!is_sink_tx_ok(pd)) {
-		usbpd_err(&pd->dev, "Rp indicates SinkTxNG\n");
-		return -EAGAIN;
-	}
-
 	mutex_lock(&pd->swap_lock);
 	reinit_completion(&pd->is_ready);
 	if (dr_swap)
@@ -4115,7 +4137,7 @@
 	mutex_lock(&pd->swap_lock);
 
 	/* Only allowed if we are already in explicit sink contract */
-	if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+	if (pd->current_state != PE_SNK_READY) {
 		usbpd_err(&pd->dev, "Cannot select new PDO yet\n");
 		ret = -EBUSY;
 		goto out;
@@ -4161,7 +4183,7 @@
 	if (pd->selected_pdo != pd->requested_pdo ||
 			pd->current_voltage != pd->requested_voltage) {
 		usbpd_err(&pd->dev, "request rejected\n");
-		ret = -EINVAL;
+		ret = -ECONNREFUSED;
 	}
 
 out:
@@ -4267,7 +4289,7 @@
 	int ret = 0;
 
 	/* Only allowed if we are already in explicit sink contract */
-	if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+	if (pd->current_state != PE_SNK_READY) {
 		usbpd_err(&pd->dev, "Cannot send msg\n");
 		ret = -EBUSY;
 		goto out;
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 1a0cf5d..f87c991 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,7 @@
 
 config FSL_USB2_OTG
 	bool "Freescale USB OTG Transceiver Driver"
-	depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
+	depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
 	depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
 	select USB_PHY
 	help
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 27bdb72..f5f0568 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -61,9 +61,6 @@
 	if (ret)
 		return ret;
 
-	ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
-	if (ret)
-		return ret;
 	am_phy->usb_phy_gen.phy.init = am335x_init;
 	am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
 
@@ -82,7 +79,7 @@
 	device_set_wakeup_enable(dev, false);
 	phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
 
-	return 0;
+	return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
 }
 
 static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c
index 2db22943..994a884 100644
--- a/drivers/usb/phy/phy-msm-snps-hs.c
+++ b/drivers/usb/phy/phy-msm-snps-hs.c
@@ -26,11 +26,9 @@
 #define SLEEPM					BIT(0)
 
 #define USB2_PHY_USB_PHY_UTMI_CTRL5		(0x50)
-#define ATERESET				BIT(0)
 #define POR					BIT(1)
 
 #define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0	(0x54)
-#define VATESTENB_MASK				(0x3 << 0)
 #define RETENABLEN				BIT(3)
 #define FSEL_MASK				(0x7 << 4)
 #define FSEL_DEFAULT				(0x3 << 4)
@@ -49,13 +47,6 @@
 #define USB2_SUSPEND_N				BIT(2)
 #define USB2_SUSPEND_N_SEL			BIT(3)
 
-#define USB2_PHY_USB_PHY_HS_PHY_TEST0		(0x80)
-#define TESTDATAIN_MASK				(0xff << 0)
-
-#define USB2_PHY_USB_PHY_HS_PHY_TEST1		(0x84)
-#define TESTDATAOUTSEL				BIT(4)
-#define TOGGLE_2WR				BIT(6)
-
 #define USB2_PHY_USB_PHY_CFG0			(0x94)
 #define UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN	BIT(0)
 #define UTMI_PHY_CMN_CTRL_OVERRIDE_EN		BIT(1)
@@ -372,7 +363,8 @@
 	msm_hsphy_reset(phy);
 
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0,
-	UTMI_PHY_CMN_CTRL_OVERRIDE_EN, UTMI_PHY_CMN_CTRL_OVERRIDE_EN);
+				UTMI_PHY_CMN_CTRL_OVERRIDE_EN,
+				UTMI_PHY_CMN_CTRL_OVERRIDE_EN);
 
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL5,
 				POR, POR);
@@ -412,26 +404,9 @@
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2,
 				VREGBYPASS, VREGBYPASS);
 
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL5,
-				ATERESET, ATERESET);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST1,
-				TESTDATAOUTSEL, TESTDATAOUTSEL);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST1,
-				TOGGLE_2WR, TOGGLE_2WR);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0,
-				VATESTENB_MASK, 0);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST0,
-				TESTDATAIN_MASK, 0);
-
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2,
-				USB2_SUSPEND_N_SEL, USB2_SUSPEND_N_SEL);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2,
-				USB2_SUSPEND_N, USB2_SUSPEND_N);
+				USB2_SUSPEND_N_SEL | USB2_SUSPEND_N,
+				USB2_SUSPEND_N_SEL | USB2_SUSPEND_N);
 
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL0,
 				SLEEPM, SLEEPM);
@@ -443,7 +418,7 @@
 				USB2_SUSPEND_N_SEL, 0);
 
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0,
-	UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0);
+				UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0);
 
 	return 0;
 }
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 4310df4..b079258 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -458,6 +458,10 @@
  */
 static const struct of_device_id usbhs_of_match[] = {
 	{
+		.compatible = "renesas,usbhs-r8a774c0",
+		.data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
+	},
+	{
 		.compatible = "renesas,usbhs-r8a7790",
 		.data = (void *)USBHS_TYPE_RCAR_GEN2,
 	},
diff --git a/drivers/usb/roles/Kconfig b/drivers/usb/roles/Kconfig
index f5a5e6f..e4194ac 100644
--- a/drivers/usb/roles/Kconfig
+++ b/drivers/usb/roles/Kconfig
@@ -1,3 +1,16 @@
+config USB_ROLE_SWITCH
+	tristate "USB Role Switch Support"
+	help
+	  USB Role Switch is a device that can select the USB role - host or
+	  device - for a USB port (connector). In most cases dual-role capable
+	  USB controller will also represent the switch, but on some platforms
+	  multiplexer/demultiplexer switch is used to route the data lines on
+	  the USB connector between separate USB host and device controllers.
+
+	  Say Y here if your USB connectors support both device and host roles.
+	  To compile the driver as module, choose M here: the module will be
+	  called roles.ko.
+
 if USB_ROLE_SWITCH
 
 config USB_ROLES_INTEL_XHCI
diff --git a/drivers/usb/roles/Makefile b/drivers/usb/roles/Makefile
index e44b179..c028732 100644
--- a/drivers/usb/roles/Makefile
+++ b/drivers/usb/roles/Makefile
@@ -1 +1,3 @@
-obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
+obj-$(CONFIG_USB_ROLE_SWITCH)		+= roles.o
+roles-y					:= class.o
+obj-$(CONFIG_USB_ROLES_INTEL_XHCI)	+= intel-xhci-usb-role-switch.o
diff --git a/drivers/usb/common/roles.c b/drivers/usb/roles/class.c
similarity index 100%
rename from drivers/usb/common/roles.c
rename to drivers/usb/roles/class.c
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c0777a3..4c66edf 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,7 @@
 	{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
 	{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
 	{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+	{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
 	{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
 	{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
 	{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
@@ -1353,8 +1354,13 @@
 	if (priv->partnum == CP210X_PARTNUM_CP2105)
 		req_type = REQTYPE_INTERFACE_TO_HOST;
 
+	result = usb_autopm_get_interface(serial->interface);
+	if (result)
+		return result;
+
 	result = cp210x_read_vendor_block(serial, req_type,
 					  CP210X_READ_LATCH, &buf, sizeof(buf));
+	usb_autopm_put_interface(serial->interface);
 	if (result < 0)
 		return result;
 
@@ -1375,6 +1381,10 @@
 
 	buf.mask = BIT(gpio);
 
+	result = usb_autopm_get_interface(serial->interface);
+	if (result)
+		goto out;
+
 	if (priv->partnum == CP210X_PARTNUM_CP2105) {
 		result = cp210x_write_vendor_block(serial,
 						   REQTYPE_HOST_TO_INTERFACE,
@@ -1392,6 +1402,8 @@
 					 NULL, 0, USB_CTRL_SET_TIMEOUT);
 	}
 
+	usb_autopm_put_interface(serial->interface);
+out:
 	if (result < 0) {
 		dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n",
 				result);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b5cef32..1d8077e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1015,6 +1015,8 @@
 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
 	{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
+	/* EZPrototypes devices */
+	{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
 	{ }					/* Terminating entry */
 };
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 975d026..b863bed 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1309,6 +1309,12 @@
 #define IONICS_PLUGCOMPUTER_PID		0x0102
 
 /*
+ * EZPrototypes (PID reseller)
+ */
+#define EZPROTOTYPES_VID		0x1c40
+#define HJELMSLUND_USB485_ISO_PID	0x0477
+
+/*
  * Dresden Elektronik Sensor Terminal Board
  */
 #define DE_VID			0x1cf1 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e72ad9f..faf833e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1148,6 +1148,8 @@
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
 	  .driver_info = NCTRL(0) | RSVD(3) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff),	/* Telit ME910 (ECM) */
+	  .driver_info = NCTRL(0) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
@@ -1164,6 +1166,10 @@
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+	{ USB_DEVICE(TELIT_VENDOR_ID, 0x1900),				/* Telit LN940 (QMI) */
+	  .driver_info = NCTRL(0) | RSVD(1) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),	/* Telit LN940 (MBIM) */
+	  .driver_info = NCTRL(0) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
 	  .driver_info = RSVD(1) },
@@ -1328,6 +1334,7 @@
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) },	/* GosunCn ZTE WeLink ME3630 (MBIM mode) */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
@@ -1531,6 +1538,7 @@
 	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
 	  .driver_info = RSVD(2) },
+	{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },	/* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1758,6 +1766,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
 	  .driver_info = RSVD(5) | RSVD(6) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) },	/* Simcom SIM7500/SIM7600 MBIM mode */
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
 	  .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1940,7 +1949,18 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) },	/* HP lt2523 (Novatel E371) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) },	/* HP lt4132 (Huawei ME906s-158) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 */
+	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+	{ USB_DEVICE(0x2cb7, 0x0104),						/* Fibocom NL678 series */
+	  .driver_info = RSVD(4) | RSVD(5) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),			/* Fibocom NL678 series */
+	  .driver_info = RSVD(6) },
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index e41f725..5a6df6e 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -46,6 +46,7 @@
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
+	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
@@ -91,9 +92,14 @@
 	{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
 	{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
 	{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
 	{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
 	{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
 	{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
 	{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
 	{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
 	{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 26965cc..559941c 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -8,6 +8,7 @@
 
 #define PL2303_VENDOR_ID	0x067b
 #define PL2303_PRODUCT_ID	0x2303
+#define PL2303_PRODUCT_ID_TB		0x2304
 #define PL2303_PRODUCT_ID_RSAQ2		0x04bb
 #define PL2303_PRODUCT_ID_DCU11		0x1234
 #define PL2303_PRODUCT_ID_PHAROS	0xaaa0
@@ -20,6 +21,7 @@
 #define PL2303_PRODUCT_ID_MOTOROLA	0x0307
 #define PL2303_PRODUCT_ID_ZTEK		0xe1f1
 
+
 #define ATEN_VENDOR_ID		0x0557
 #define ATEN_VENDOR_ID2		0x0547
 #define ATEN_PRODUCT_ID		0x2008
@@ -119,10 +121,15 @@
 
 /* Hewlett-Packard POS Pole Displays */
 #define HP_VENDOR_ID		0x03f0
+#define HP_LM920_PRODUCT_ID	0x026b
+#define HP_TD620_PRODUCT_ID	0x0956
 #define HP_LD960_PRODUCT_ID	0x0b39
 #define HP_LCM220_PRODUCT_ID	0x3139
 #define HP_LCM960_PRODUCT_ID	0x3239
 #define HP_LD220_PRODUCT_ID	0x3524
+#define HP_LD220TA_PRODUCT_ID	0x4349
+#define HP_LD960TA_PRODUCT_ID	0x4439
+#define HP_LM940_PRODUCT_ID	0x5039
 
 /* Cressi Edy (diving computer) PC interface */
 #define CRESSI_VENDOR_ID	0x04b8
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4d02735..edbbb13 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -85,7 +85,8 @@
 /* Motorola Tetra driver */
 #define MOTOROLA_TETRA_IDS()			\
 	{ USB_DEVICE(0x0cad, 0x9011) },	/* Motorola Solutions TETRA PEI */ \
-	{ USB_DEVICE(0x0cad, 0x9012) }	/* MTP6550 */
+	{ USB_DEVICE(0x0cad, 0x9012) },	/* MTP6550 */ \
+	{ USB_DEVICE(0x0cad, 0x9016) }	/* TPG2200 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
 /* Novatel Wireless GPS driver */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e227bb5..101ebac 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -235,8 +235,12 @@
 		if (!(us->fflags & US_FL_NEEDS_CAP16))
 			sdev->try_rc_10_first = 1;
 
-		/* assume SPC3 or latter devices support sense size > 18 */
-		if (sdev->scsi_level > SCSI_SPC_2)
+		/*
+		 * assume SPC3 or latter devices support sense size > 18
+		 * unless US_FL_BAD_SENSE quirk is specified.
+		 */
+		if (sdev->scsi_level > SCSI_SPC_2 &&
+		    !(us->fflags & US_FL_BAD_SENSE))
 			us->fflags |= US_FL_SANE_SENSE;
 
 		/*
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f7f83b21..ea0d27a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1266,6 +1266,18 @@
 		US_FL_FIX_CAPACITY ),
 
 /*
+ * Reported by Icenowy Zheng <icenowy@aosc.io>
+ * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
+ * that do not process read/write command if a long sense is requested,
+ * so force to use 18-byte sense.
+ */
+UNUSUAL_DEV(  0x090c, 0x3350, 0x0000, 0xffff,
+		"SMI",
+		"SM3350 UFS-to-USB-Mass-Storage bridge",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_BAD_SENSE ),
+
+/*
  * Reported by Paul Hartman <paul.hartman+linux@gmail.com>
  * This card reader returns "Illegal Request, Logical Block Address
  * Out of Range" for the first READ(10) after a new card is inserted.
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index c74cc9c..3457c1f 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -317,6 +317,9 @@
 	/* Deadline in jiffies to exit src_try_wait state */
 	unsigned long max_wait;
 
+	/* port belongs to a self powered device */
+	bool self_powered;
+
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *dentry;
 	struct mutex logbuffer_lock;	/* log buffer access lock */
@@ -3257,7 +3260,8 @@
 	case SRC_HARD_RESET_VBUS_OFF:
 		tcpm_set_vconn(port, true);
 		tcpm_set_vbus(port, false);
-		tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
+		tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
+			       TYPEC_HOST);
 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
 		break;
 	case SRC_HARD_RESET_VBUS_ON:
@@ -3270,7 +3274,8 @@
 		memset(&port->pps_data, 0, sizeof(port->pps_data));
 		tcpm_set_vconn(port, false);
 		tcpm_set_charge(port, false);
-		tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
+		tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
+			       TYPEC_DEVICE);
 		/*
 		 * VBUS may or may not toggle, depending on the adapter.
 		 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
@@ -4415,6 +4420,8 @@
 		return -EINVAL;
 	port->operating_snk_mw = mw / 1000;
 
+	port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
+
 	return 0;
 }
 
@@ -4723,6 +4730,7 @@
 	port->typec_caps.prefer_role = tcfg->default_role;
 	port->typec_caps.type = tcfg->type;
 	port->typec_caps.data = tcfg->data;
+	port->self_powered = port->tcpc->config->self_powered;
 
 	return 0;
 }
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index d9fd318..64cbc2d 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -878,7 +878,7 @@
 		return -EINVAL;
 	if (!unmap->size || unmap->size & mask)
 		return -EINVAL;
-	if (unmap->iova + unmap->size < unmap->iova ||
+	if (unmap->iova + unmap->size - 1 < unmap->iova ||
 	    unmap->size > SIZE_MAX)
 		return -EINVAL;
 
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4e656f8..39155d7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1024,7 +1024,8 @@
 		if (nvq->done_idx > VHOST_NET_BATCH)
 			vhost_net_signal_used(nvq);
 		if (unlikely(vq_log))
-			vhost_log_write(vq, vq_log, log, vhost_len);
+			vhost_log_write(vq, vq_log, log, vhost_len,
+					vq->iov, in);
 		total_len += vhost_len;
 		if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
 			vhost_poll_queue(&vq->poll);
@@ -1113,7 +1114,8 @@
 		n->vqs[i].rx_ring = NULL;
 		vhost_net_buf_init(&n->vqs[i].rxq);
 	}
-	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
+	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+		       UIO_MAXIOV + VHOST_NET_BATCH);
 
 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e7e3ae1..0cfa925 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1398,7 +1398,7 @@
 		vqs[i] = &vs->vqs[i].vq;
 		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
 	}
-	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
 
 	vhost_scsi_init_inflight(vs, NULL);
 
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index eb95daa..b214a72 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -390,9 +390,9 @@
 		vq->indirect = kmalloc_array(UIO_MAXIOV,
 					     sizeof(*vq->indirect),
 					     GFP_KERNEL);
-		vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
+		vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
 					GFP_KERNEL);
-		vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
+		vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
 					  GFP_KERNEL);
 		if (!vq->indirect || !vq->log || !vq->heads)
 			goto err_nomem;
@@ -414,7 +414,7 @@
 }
 
 void vhost_dev_init(struct vhost_dev *dev,
-		    struct vhost_virtqueue **vqs, int nvqs)
+		    struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
 {
 	struct vhost_virtqueue *vq;
 	int i;
@@ -427,6 +427,7 @@
 	dev->iotlb = NULL;
 	dev->mm = NULL;
 	dev->worker = NULL;
+	dev->iov_limit = iov_limit;
 	init_llist_head(&dev->work_list);
 	init_waitqueue_head(&dev->wait);
 	INIT_LIST_HEAD(&dev->read_list);
@@ -1034,8 +1035,10 @@
 	int type, ret;
 
 	ret = copy_from_iter(&type, sizeof(type), from);
-	if (ret != sizeof(type))
+	if (ret != sizeof(type)) {
+		ret = -EINVAL;
 		goto done;
+	}
 
 	switch (type) {
 	case VHOST_IOTLB_MSG:
@@ -1054,8 +1057,10 @@
 
 	iov_iter_advance(from, offset);
 	ret = copy_from_iter(&msg, sizeof(msg), from);
-	if (ret != sizeof(msg))
+	if (ret != sizeof(msg)) {
+		ret = -EINVAL;
 		goto done;
+	}
 	if (vhost_process_iotlb_msg(dev, &msg)) {
 		ret = -EFAULT;
 		goto done;
@@ -1733,13 +1738,87 @@
 	return r;
 }
 
+static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
+{
+	struct vhost_umem *umem = vq->umem;
+	struct vhost_umem_node *u;
+	u64 start, end, l, min;
+	int r;
+	bool hit = false;
+
+	while (len) {
+		min = len;
+		/* More than one GPAs can be mapped into a single HVA. So
+		 * iterate all possible umems here to be safe.
+		 */
+		list_for_each_entry(u, &umem->umem_list, link) {
+			if (u->userspace_addr > hva - 1 + len ||
+			    u->userspace_addr - 1 + u->size < hva)
+				continue;
+			start = max(u->userspace_addr, hva);
+			end = min(u->userspace_addr - 1 + u->size,
+				  hva - 1 + len);
+			l = end - start + 1;
+			r = log_write(vq->log_base,
+				      u->start + start - u->userspace_addr,
+				      l);
+			if (r < 0)
+				return r;
+			hit = true;
+			min = min(l, min);
+		}
+
+		if (!hit)
+			return -EFAULT;
+
+		len -= min;
+		hva += min;
+	}
+
+	return 0;
+}
+
+static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
+{
+	struct iovec iov[64];
+	int i, ret;
+
+	if (!vq->iotlb)
+		return log_write(vq->log_base, vq->log_addr + used_offset, len);
+
+	ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
+			     len, iov, 64, VHOST_ACCESS_WO);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < ret; i++) {
+		ret = log_write_hva(vq,	(uintptr_t)iov[i].iov_base,
+				    iov[i].iov_len);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
-		    unsigned int log_num, u64 len)
+		    unsigned int log_num, u64 len, struct iovec *iov, int count)
 {
 	int i, r;
 
 	/* Make sure data written is seen before log. */
 	smp_wmb();
+
+	if (vq->iotlb) {
+		for (i = 0; i < count; i++) {
+			r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+					  iov[i].iov_len);
+			if (r < 0)
+				return r;
+		}
+		return 0;
+	}
+
 	for (i = 0; i < log_num; ++i) {
 		u64 l = min(log[i].len, len);
 		r = log_write(vq->log_base, log[i].addr, l);
@@ -1769,9 +1848,8 @@
 		smp_wmb();
 		/* Log used flag write. */
 		used = &vq->used->flags;
-		log_write(vq->log_base, vq->log_addr +
-			  (used - (void __user *)vq->used),
-			  sizeof vq->used->flags);
+		log_used(vq, (used - (void __user *)vq->used),
+			 sizeof vq->used->flags);
 		if (vq->log_ctx)
 			eventfd_signal(vq->log_ctx, 1);
 	}
@@ -1789,9 +1867,8 @@
 		smp_wmb();
 		/* Log avail event write */
 		used = vhost_avail_event(vq);
-		log_write(vq->log_base, vq->log_addr +
-			  (used - (void __user *)vq->used),
-			  sizeof *vhost_avail_event(vq));
+		log_used(vq, (used - (void __user *)vq->used),
+			 sizeof *vhost_avail_event(vq));
 		if (vq->log_ctx)
 			eventfd_signal(vq->log_ctx, 1);
 	}
@@ -2191,10 +2268,8 @@
 		/* Make sure data is seen before log. */
 		smp_wmb();
 		/* Log used ring entry write. */
-		log_write(vq->log_base,
-			  vq->log_addr +
-			   ((void __user *)used - (void __user *)vq->used),
-			  count * sizeof *used);
+		log_used(vq, ((void __user *)used - (void __user *)vq->used),
+			 count * sizeof *used);
 	}
 	old = vq->last_used_idx;
 	new = (vq->last_used_idx += count);
@@ -2233,10 +2308,11 @@
 		return -EFAULT;
 	}
 	if (unlikely(vq->log_used)) {
+		/* Make sure used idx is seen before log. */
+		smp_wmb();
 		/* Log used index update. */
-		log_write(vq->log_base,
-			  vq->log_addr + offsetof(struct vring_used, idx),
-			  sizeof vq->used->idx);
+		log_used(vq, offsetof(struct vring_used, idx),
+			 sizeof vq->used->idx);
 		if (vq->log_ctx)
 			eventfd_signal(vq->log_ctx, 1);
 	}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 466ef75..9490e7d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -170,9 +170,11 @@
 	struct list_head read_list;
 	struct list_head pending_list;
 	wait_queue_head_t wait;
+	int iov_limit;
 };
 
-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
+		    int nvqs, int iov_limit);
 long vhost_dev_set_owner(struct vhost_dev *dev);
 bool vhost_dev_has_owner(struct vhost_dev *dev);
 long vhost_dev_check_owner(struct vhost_dev *);
@@ -205,7 +207,8 @@
 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
 
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
-		    unsigned int log_num, u64 len);
+		    unsigned int log_num, u64 len,
+		    struct iovec *iov, int count);
 int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
 
 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 98ed5be..e440f87 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -531,7 +531,7 @@
 	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
 	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
 
-	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
+	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
 
 	file->private_data = vsock;
 	spin_lock_init(&vsock->send_pkt_list_lock);
@@ -642,7 +642,7 @@
 		hash_del_rcu(&vsock->hash);
 
 	vsock->guest_cid = guest_cid;
-	hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
+	hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
 	spin_unlock_bh(&vhost_vsock_lock);
 
 	return 0;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index bdfcc0a..6bde543 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -263,6 +263,16 @@
 	memset(data, 0, sizeof(*data));
 
 	/*
+	 * These values are optional and set as 0 by default, the out values
+	 * are modified only if a valid u32 value can be decoded.
+	 */
+	of_property_read_u32(node, "post-pwm-on-delay-ms",
+			     &data->post_pwm_on_delay);
+	of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
+
+	data->enable_gpio = -EINVAL;
+
+	/*
 	 * Determine the number of brightness levels, if this property is not
 	 * set a default table of brightness levels will be used.
 	 */
@@ -374,15 +384,6 @@
 		data->max_brightness--;
 	}
 
-	/*
-	 * These values are optional and set as 0 by default, the out values
-	 * are modified only if a valid u32 value can be decoded.
-	 */
-	of_property_read_u32(node, "post-pwm-on-delay-ms",
-			     &data->post_pwm_on_delay);
-	of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
-
-	data->enable_gpio = -EINVAL;
 	return 0;
 }
 
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 09731b2..c6b3bdb 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -271,6 +271,7 @@
 
 static void vgacon_restore_screen(struct vc_data *c)
 {
+	c->vc_origin = c->vc_visible_origin;
 	vgacon_scrollback_cur->save = 0;
 
 	if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
@@ -287,8 +288,7 @@
 	int start, end, count, soff;
 
 	if (!lines) {
-		c->vc_visible_origin = c->vc_origin;
-		vga_set_mem_top(c);
+		vgacon_restore_screen(c);
 		return;
 	}
 
@@ -298,6 +298,7 @@
 	if (!vgacon_scrollback_cur->save) {
 		vgacon_cursor(c, CM_ERASE);
 		vgacon_save_screen(c);
+		c->vc_origin = (unsigned long)c->vc_screenbuf;
 		vgacon_scrollback_cur->save = 1;
 	}
 
@@ -335,7 +336,7 @@
 		int copysize;
 
 		int diff = c->vc_rows - count;
-		void *d = (void *) c->vc_origin;
+		void *d = (void *) c->vc_visible_origin;
 		void *s = (void *) c->vc_screenbuf;
 
 		count *= c->vc_size_row;
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index ff56107..42f9096 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -287,14 +287,17 @@
 	}
 
 	ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE);
-	if (ret)
+	if (ret) {
+		of_node_put(disp);
 		goto out_fb_release;
+	}
 
 	of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale);
 	cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert");
 
 	ret = of_property_read_u32(disp, "bits-per-pixel",
 				   &info->var.bits_per_pixel);
+	of_node_put(disp);
 	if (ret)
 		goto out_fb_release;
 
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 75ebbbf..5d961e3 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -3066,7 +3066,7 @@
 	for (i = first_fb_vc; i <= last_fb_vc; i++) {
 		if (con2fb_map[i] != idx &&
 		    con2fb_map[i] != -1) {
-			new_idx = i;
+			new_idx = con2fb_map[i];
 			break;
 		}
 	}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 2040542..77cee99 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -435,7 +435,9 @@
 			image->dx += image->width + 8;
 		}
 	} else if (rotate == FB_ROTATE_UD) {
-		for (x = 0; x < num; x++) {
+		u32 dx = image->dx;
+
+		for (x = 0; x < num && image->dx <= dx; x++) {
 			info->fbops->fb_imageblit(info, image);
 			image->dx -= image->width + 8;
 		}
@@ -447,7 +449,9 @@
 			image->dy += image->height + 8;
 		}
 	} else if (rotate == FB_ROTATE_CCW) {
-		for (x = 0; x < num; x++) {
+		u32 dy = image->dy;
+
+		for (x = 0; x < num && image->dy <= dy; x++) {
 			info->fbops->fb_imageblit(info, image);
 			image->dy -= image->height + 8;
 		}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index a3edb20..a846d32 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -609,6 +609,8 @@
 
 	int r = 0;
 
+	memset(&p, 0, sizeof(p));
+
 	switch (cmd) {
 	case OMAPFB_SYNC_GFX:
 		DBG("ioctl SYNC_GFX\n");
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index bbed039..d59c8a5 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2234,10 +2234,8 @@
 	if (!info)
 		return ERR_PTR(-ENOMEM);
 	ret = of_get_pxafb_mode_info(dev, info);
-	if (ret) {
-		kfree(info->modes);
+	if (ret)
 		return ERR_PTR(ret);
-	}
 
 	/*
 	 * On purpose, neither lccrX registers nor video memory size can be
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index afbd610..070026a 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -916,8 +916,6 @@
 
 	dlfb->fb_count++;
 
-	kref_get(&dlfb->kref);
-
 	if (fb_defio && (info->fbdefio == NULL)) {
 		/* enable defio at last moment if not disabled by client */
 
@@ -940,14 +938,17 @@
 	return 0;
 }
 
-/*
- * Called when all client interfaces to start transactions have been disabled,
- * and all references to our device instance (dlfb_data) are released.
- * Every transaction must have a reference, so we know are fully spun down
- */
-static void dlfb_free(struct kref *kref)
+static void dlfb_ops_destroy(struct fb_info *info)
 {
-	struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref);
+	struct dlfb_data *dlfb = info->par;
+
+	if (info->cmap.len != 0)
+		fb_dealloc_cmap(&info->cmap);
+	if (info->monspecs.modedb)
+		fb_destroy_modedb(info->monspecs.modedb);
+	vfree(info->screen_base);
+
+	fb_destroy_modelist(&info->modelist);
 
 	while (!list_empty(&dlfb->deferred_free)) {
 		struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list);
@@ -957,40 +958,13 @@
 	}
 	vfree(dlfb->backing_buffer);
 	kfree(dlfb->edid);
+	usb_put_dev(dlfb->udev);
 	kfree(dlfb);
+
+	/* Assume info structure is freed after this point */
+	framebuffer_release(info);
 }
 
-static void dlfb_free_framebuffer(struct dlfb_data *dlfb)
-{
-	struct fb_info *info = dlfb->info;
-
-	if (info) {
-		unregister_framebuffer(info);
-
-		if (info->cmap.len != 0)
-			fb_dealloc_cmap(&info->cmap);
-		if (info->monspecs.modedb)
-			fb_destroy_modedb(info->monspecs.modedb);
-		vfree(info->screen_base);
-
-		fb_destroy_modelist(&info->modelist);
-
-		dlfb->info = NULL;
-
-		/* Assume info structure is freed after this point */
-		framebuffer_release(info);
-	}
-
-	/* ref taken in probe() as part of registering framebfufer */
-	kref_put(&dlfb->kref, dlfb_free);
-}
-
-static void dlfb_free_framebuffer_work(struct work_struct *work)
-{
-	struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
-					     free_framebuffer_work.work);
-	dlfb_free_framebuffer(dlfb);
-}
 /*
  * Assumes caller is holding info->lock mutex (for open and release at least)
  */
@@ -1000,10 +974,6 @@
 
 	dlfb->fb_count--;
 
-	/* We can't free fb_info here - fbmem will touch it when we return */
-	if (dlfb->virtualized && (dlfb->fb_count == 0))
-		schedule_delayed_work(&dlfb->free_framebuffer_work, HZ);
-
 	if ((dlfb->fb_count == 0) && (info->fbdefio)) {
 		fb_deferred_io_cleanup(info);
 		kfree(info->fbdefio);
@@ -1013,8 +983,6 @@
 
 	dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
 
-	kref_put(&dlfb->kref, dlfb_free);
-
 	return 0;
 }
 
@@ -1172,6 +1140,7 @@
 	.fb_blank = dlfb_ops_blank,
 	.fb_check_var = dlfb_ops_check_var,
 	.fb_set_par = dlfb_ops_set_par,
+	.fb_destroy = dlfb_ops_destroy,
 };
 
 
@@ -1615,12 +1584,13 @@
 	return true;
 }
 
-static void dlfb_init_framebuffer_work(struct work_struct *work);
-
 static int dlfb_usb_probe(struct usb_interface *intf,
 			  const struct usb_device_id *id)
 {
+	int i;
+	const struct device_attribute *attr;
 	struct dlfb_data *dlfb;
+	struct fb_info *info;
 	int retval = -ENOMEM;
 	struct usb_device *usbdev = interface_to_usbdev(intf);
 
@@ -1631,10 +1601,9 @@
 		goto error;
 	}
 
-	kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */
 	INIT_LIST_HEAD(&dlfb->deferred_free);
 
-	dlfb->udev = usbdev;
+	dlfb->udev = usb_get_dev(usbdev);
 	usb_set_intfdata(intf, dlfb);
 
 	dev_dbg(&intf->dev, "console enable=%d\n", console);
@@ -1657,42 +1626,6 @@
 	}
 
 
-	if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
-		retval = -ENOMEM;
-		dev_err(&intf->dev, "unable to allocate urb list\n");
-		goto error;
-	}
-
-	kref_get(&dlfb->kref); /* matching kref_put in free_framebuffer_work */
-
-	/* We don't register a new USB class. Our client interface is dlfbev */
-
-	/* Workitem keep things fast & simple during USB enumeration */
-	INIT_DELAYED_WORK(&dlfb->init_framebuffer_work,
-			  dlfb_init_framebuffer_work);
-	schedule_delayed_work(&dlfb->init_framebuffer_work, 0);
-
-	return 0;
-
-error:
-	if (dlfb) {
-
-		kref_put(&dlfb->kref, dlfb_free); /* last ref from kref_init */
-
-		/* dev has been deallocated. Do not dereference */
-	}
-
-	return retval;
-}
-
-static void dlfb_init_framebuffer_work(struct work_struct *work)
-{
-	int i, retval;
-	struct fb_info *info;
-	const struct device_attribute *attr;
-	struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
-					     init_framebuffer_work.work);
-
 	/* allocates framebuffer driver structure, not framebuffer memory */
 	info = framebuffer_alloc(0, &dlfb->udev->dev);
 	if (!info) {
@@ -1706,17 +1639,22 @@
 	dlfb->ops = dlfb_ops;
 	info->fbops = &dlfb->ops;
 
+	INIT_LIST_HEAD(&info->modelist);
+
+	if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+		retval = -ENOMEM;
+		dev_err(&intf->dev, "unable to allocate urb list\n");
+		goto error;
+	}
+
+	/* We don't register a new USB class. Our client interface is dlfbev */
+
 	retval = fb_alloc_cmap(&info->cmap, 256, 0);
 	if (retval < 0) {
 		dev_err(info->device, "cmap allocation failed: %d\n", retval);
 		goto error;
 	}
 
-	INIT_DELAYED_WORK(&dlfb->free_framebuffer_work,
-			  dlfb_free_framebuffer_work);
-
-	INIT_LIST_HEAD(&info->modelist);
-
 	retval = dlfb_setup_modes(dlfb, info, NULL, 0);
 	if (retval != 0) {
 		dev_err(info->device,
@@ -1760,10 +1698,16 @@
 		 dev_name(info->dev), info->var.xres, info->var.yres,
 		 ((dlfb->backing_buffer) ?
 		 info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
-	return;
+	return 0;
 
 error:
-	dlfb_free_framebuffer(dlfb);
+	if (dlfb->info) {
+		dlfb_ops_destroy(dlfb->info);
+	} else if (dlfb) {
+		usb_put_dev(dlfb->udev);
+		kfree(dlfb);
+	}
+	return retval;
 }
 
 static void dlfb_usb_disconnect(struct usb_interface *intf)
@@ -1791,20 +1735,9 @@
 		for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
 			device_remove_file(info->dev, &fb_device_attrs[i]);
 		device_remove_bin_file(info->dev, &edid_attr);
-		unlink_framebuffer(info);
 	}
 
-	usb_set_intfdata(intf, NULL);
-	dlfb->udev = NULL;
-
-	/* if clients still have us open, will be freed on last close */
-	if (dlfb->fb_count == 0)
-		schedule_delayed_work(&dlfb->free_framebuffer_work, 0);
-
-	/* release reference taken by kref_init in probe() */
-	kref_put(&dlfb->kref, dlfb_free);
-
-	/* consider dlfb_data freed */
+	unregister_framebuffer(info);
 }
 
 static struct usb_driver dlfb_driver = {
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 3093655..1475ed5 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1312,7 +1312,7 @@
 		return -EINVAL;
 	}
 
-	if (f32bit)
+	if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
 		ret = vbg_hgcm_call32(gdev, client_id,
 				      call->function, call->timeout_ms,
 				      VBG_IOCTL_HGCM_CALL_PARMS32(call),
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 5c4a764..81208cd 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -17,6 +17,7 @@
 #include <linux/watchdog.h>
 #include <linux/moduleparam.h>
 #include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
 
 #include <asm/mach-ralink/ralink_regs.h>
 
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 88d81fe..d01efd3 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -77,12 +77,17 @@
 static int rwdt_start(struct watchdog_device *wdev)
 {
 	struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+	u8 val;
 
 	pm_runtime_get_sync(wdev->parent);
 
-	rwdt_write(priv, 0, RWTCSRB);
-	rwdt_write(priv, priv->cks, RWTCSRA);
+	/* Stop the timer before we modify any register */
+	val = readb_relaxed(priv->base + RWTCSRA) & ~RWTCSRA_TME;
+	rwdt_write(priv, val, RWTCSRA);
+
 	rwdt_init_timeout(wdev);
+	rwdt_write(priv, priv->cks, RWTCSRA);
+	rwdt_write(priv, 0, RWTCSRB);
 
 	while (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WRFLG)
 		cpu_relax();
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 98967f0..db7c57d 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -18,6 +18,7 @@
 #include <linux/watchdog.h>
 #include <linux/moduleparam.h>
 #include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
 
 #include <asm/mach-ralink/ralink_regs.h>
 
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index e6c1934..fe1f163 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1650,7 +1650,7 @@
 			xen_have_vector_callback = 0;
 			return;
 		}
-		pr_info("Xen HVM callback vector for event delivery is enabled\n");
+		pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
 		alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
 				xen_hvm_callback_vector);
 	}
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index b1092fb..d4ea335 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -160,9 +160,10 @@
 
 	/* write the data, then modify the indexes */
 	virt_wmb();
-	if (ret < 0)
+	if (ret < 0) {
+		atomic_set(&map->read, 0);
 		intf->in_error = ret;
-	else
+	} else
 		intf->in_prod = prod + ret;
 	/* update the indexes, then notify the other end */
 	virt_wmb();
@@ -282,13 +283,11 @@
 static void pvcalls_sk_state_change(struct sock *sock)
 {
 	struct sock_mapping *map = sock->sk_user_data;
-	struct pvcalls_data_intf *intf;
 
 	if (map == NULL)
 		return;
 
-	intf = map->ring;
-	intf->in_error = -ENOTCONN;
+	atomic_inc(&map->read);
 	notify_remote_via_irq(map->irq);
 }
 
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 77224d8..91da7e4 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -31,6 +31,12 @@
 #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
 #define PVCALLS_FRONT_MAX_SPIN 5000
 
+static struct proto pvcalls_proto = {
+	.name	= "PVCalls",
+	.owner	= THIS_MODULE,
+	.obj_size = sizeof(struct sock),
+};
+
 struct pvcalls_bedata {
 	struct xen_pvcalls_front_ring ring;
 	grant_ref_t ref;
@@ -335,6 +341,42 @@
 	return ret;
 }
 
+static void free_active_ring(struct sock_mapping *map)
+{
+	if (!map->active.ring)
+		return;
+
+	free_pages((unsigned long)map->active.data.in,
+			map->active.ring->ring_order);
+	free_page((unsigned long)map->active.ring);
+}
+
+static int alloc_active_ring(struct sock_mapping *map)
+{
+	void *bytes;
+
+	map->active.ring = (struct pvcalls_data_intf *)
+		get_zeroed_page(GFP_KERNEL);
+	if (!map->active.ring)
+		goto out;
+
+	map->active.ring->ring_order = PVCALLS_RING_ORDER;
+	bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+					PVCALLS_RING_ORDER);
+	if (!bytes)
+		goto out;
+
+	map->active.data.in = bytes;
+	map->active.data.out = bytes +
+		XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+	return 0;
+
+out:
+	free_active_ring(map);
+	return -ENOMEM;
+}
+
 static int create_active(struct sock_mapping *map, int *evtchn)
 {
 	void *bytes;
@@ -343,15 +385,7 @@
 	*evtchn = -1;
 	init_waitqueue_head(&map->active.inflight_conn_req);
 
-	map->active.ring = (struct pvcalls_data_intf *)
-		__get_free_page(GFP_KERNEL | __GFP_ZERO);
-	if (map->active.ring == NULL)
-		goto out_error;
-	map->active.ring->ring_order = PVCALLS_RING_ORDER;
-	bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-					PVCALLS_RING_ORDER);
-	if (bytes == NULL)
-		goto out_error;
+	bytes = map->active.data.in;
 	for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
 		map->active.ring->ref[i] = gnttab_grant_foreign_access(
 			pvcalls_front_dev->otherend_id,
@@ -361,10 +395,6 @@
 		pvcalls_front_dev->otherend_id,
 		pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
 
-	map->active.data.in = bytes;
-	map->active.data.out = bytes +
-		XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
-
 	ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
 	if (ret)
 		goto out_error;
@@ -385,8 +415,6 @@
 out_error:
 	if (*evtchn >= 0)
 		xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
-	free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
-	free_page((unsigned long)map->active.ring);
 	return ret;
 }
 
@@ -406,17 +434,24 @@
 		return PTR_ERR(map);
 
 	bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+	ret = alloc_active_ring(map);
+	if (ret < 0) {
+		pvcalls_exit_sock(sock);
+		return ret;
+	}
 
 	spin_lock(&bedata->socket_lock);
 	ret = get_request(bedata, &req_id);
 	if (ret < 0) {
 		spin_unlock(&bedata->socket_lock);
+		free_active_ring(map);
 		pvcalls_exit_sock(sock);
 		return ret;
 	}
 	ret = create_active(map, &evtchn);
 	if (ret < 0) {
 		spin_unlock(&bedata->socket_lock);
+		free_active_ring(map);
 		pvcalls_exit_sock(sock);
 		return ret;
 	}
@@ -560,15 +595,13 @@
 	error = intf->in_error;
 	/* get pointers before reading from the ring */
 	virt_rmb();
-	if (error < 0)
-		return error;
 
 	size = pvcalls_queued(prod, cons, array_size);
 	masked_prod = pvcalls_mask(prod, array_size);
 	masked_cons = pvcalls_mask(cons, array_size);
 
 	if (size == 0)
-		return 0;
+		return error ?: size;
 
 	if (len > size)
 		len = size;
@@ -780,25 +813,36 @@
 		}
 	}
 
+	map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+	if (map2 == NULL) {
+		clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+			  (void *)&map->passive.flags);
+		pvcalls_exit_sock(sock);
+		return -ENOMEM;
+	}
+	ret = alloc_active_ring(map2);
+	if (ret < 0) {
+		clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+				(void *)&map->passive.flags);
+		kfree(map2);
+		pvcalls_exit_sock(sock);
+		return ret;
+	}
 	spin_lock(&bedata->socket_lock);
 	ret = get_request(bedata, &req_id);
 	if (ret < 0) {
 		clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
 			  (void *)&map->passive.flags);
 		spin_unlock(&bedata->socket_lock);
+		free_active_ring(map2);
+		kfree(map2);
 		pvcalls_exit_sock(sock);
 		return ret;
 	}
-	map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
-	if (map2 == NULL) {
-		clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
-			  (void *)&map->passive.flags);
-		spin_unlock(&bedata->socket_lock);
-		pvcalls_exit_sock(sock);
-		return -ENOMEM;
-	}
+
 	ret = create_active(map2, &evtchn);
 	if (ret < 0) {
+		free_active_ring(map2);
 		kfree(map2);
 		clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
 			  (void *)&map->passive.flags);
@@ -839,7 +883,7 @@
 
 received:
 	map2->sock = newsock;
-	newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
+	newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
 	if (!newsock->sk) {
 		bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
 		map->passive.inflight_req_id = PVCALLS_INVALID_ID;
@@ -1032,8 +1076,8 @@
 		spin_lock(&bedata->socket_lock);
 		list_del(&map->list);
 		spin_unlock(&bedata->socket_lock);
-		if (READ_ONCE(map->passive.inflight_req_id) !=
-		    PVCALLS_INVALID_ID) {
+		if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
+			READ_ONCE(map->passive.inflight_req_id) != 0) {
 			pvcalls_front_free_map(bedata,
 					       map->passive.accept_map);
 		}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index dc62d15..1bb300e 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -208,7 +208,7 @@
 		/* The new front of the queue now owns the state variables. */
 		next = list_entry(vnode->pending_locks.next,
 				  struct file_lock, fl_u.afs.link);
-		vnode->lock_key = afs_file_key(next->fl_file);
+		vnode->lock_key = key_get(afs_file_key(next->fl_file));
 		vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
 		vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
 		goto again;
@@ -413,7 +413,7 @@
 	/* The new front of the queue now owns the state variables. */
 	next = list_entry(vnode->pending_locks.next,
 			  struct file_lock, fl_u.afs.link);
-	vnode->lock_key = afs_file_key(next->fl_file);
+	vnode->lock_key = key_get(afs_file_key(next->fl_file));
 	vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
 	vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
 	afs_lock_may_be_available(vnode);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 071075d..0726e40 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -411,7 +411,6 @@
 	} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
 		valid = true;
 	} else {
-		vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
 		vnode->cb_v_break = vnode->volume->cb_v_break;
 		valid = false;
 	}
@@ -543,6 +542,8 @@
 #endif
 
 	afs_put_permits(rcu_access_pointer(vnode->permit_cache));
+	key_put(vnode->lock_key);
+	vnode->lock_key = NULL;
 	_leave("");
 }
 
diff --git a/fs/aio.c b/fs/aio.c
index 44551d9..45d5ef8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1661,6 +1661,7 @@
 	struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
 	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
 	__poll_t mask = key_to_poll(key);
+	unsigned long flags;
 
 	req->woken = true;
 
@@ -1669,10 +1670,15 @@
 		if (!(mask & req->events))
 			return 0;
 
-		/* try to complete the iocb inline if we can: */
-		if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
+		/*
+		 * Try to complete the iocb inline if we can. Use
+		 * irqsave/irqrestore because not all filesystems (e.g. fuse)
+		 * call this function with IRQs disabled and because IRQs
+		 * have to be disabled before ctx_lock is obtained.
+		 */
+		if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
 			list_del(&iocb->ki_list);
-			spin_unlock(&iocb->ki_ctx->ctx_lock);
+			spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
 
 			list_del_init(&req->wait.entry);
 			aio_poll_complete(iocb, mask);
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index d441244..28d9c2b 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -596,7 +596,6 @@
 	pkt.len = dentry->d_name.len;
 	memcpy(pkt.name, dentry->d_name.name, pkt.len);
 	pkt.name[pkt.len] = '\0';
-	dput(dentry);
 
 	if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
 		ret = -EFAULT;
@@ -609,6 +608,8 @@
 	complete_all(&ino->expire_complete);
 	spin_unlock(&sbi->fs_lock);
 
+	dput(dentry);
+
 	return ret;
 }
 
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 846c052..3c14a8e 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -255,8 +255,10 @@
 	}
 	root_inode = autofs_get_inode(s, S_IFDIR | 0755);
 	root = d_make_root(root_inode);
-	if (!root)
+	if (!root) {
+		ret = -ENOMEM;
 		goto fail_ino;
+	}
 	pipe = NULL;
 
 	root->d_fsdata = ino;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 38b8ce0..cdbb888 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -104,6 +104,20 @@
 }
 EXPORT_SYMBOL(invalidate_bdev);
 
+static void set_init_blocksize(struct block_device *bdev)
+{
+	unsigned bsize = bdev_logical_block_size(bdev);
+	loff_t size = i_size_read(bdev->bd_inode);
+
+	while (bsize < PAGE_SIZE) {
+		if (size & bsize)
+			break;
+		bsize <<= 1;
+	}
+	bdev->bd_block_size = bsize;
+	bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+}
+
 int set_blocksize(struct block_device *bdev, int size)
 {
 	/* Size must be a power of two, and between 512 and PAGE_SIZE */
@@ -1408,18 +1422,9 @@
 
 void bd_set_size(struct block_device *bdev, loff_t size)
 {
-	unsigned bsize = bdev_logical_block_size(bdev);
-
 	inode_lock(bdev->bd_inode);
 	i_size_write(bdev->bd_inode, size);
 	inode_unlock(bdev->bd_inode);
-	while (bsize < PAGE_SIZE) {
-		if (size & bsize)
-			break;
-		bsize <<= 1;
-	}
-	bdev->bd_block_size = bsize;
-	bdev->bd_inode->i_blkbits = blksize_bits(bsize);
 }
 EXPORT_SYMBOL(bd_set_size);
 
@@ -1496,8 +1501,10 @@
 				}
 			}
 
-			if (!ret)
+			if (!ret) {
 				bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+				set_init_blocksize(bdev);
+			}
 
 			/*
 			 * If the device is invalidated, rescan partition
@@ -1532,6 +1539,7 @@
 				goto out_clear;
 			}
 			bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
+			set_init_blocksize(bdev);
 		}
 
 		if (bdev->bd_bdi == &noop_backing_dev_info)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 1343ac5..45f5cf9 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -29,6 +29,7 @@
 	BTRFS_INODE_IN_DELALLOC_LIST,
 	BTRFS_INODE_READDIO_NEED_LOCK,
 	BTRFS_INODE_HAS_PROPS,
+	BTRFS_INODE_SNAPSHOT_FLUSH,
 };
 
 /* in memory btrfs inode */
@@ -147,6 +148,12 @@
 	u64 last_unlink_trans;
 
 	/*
+	 * Track the transaction id of the last transaction used to create a
+	 * hard link for the inode. This is used by the log tree (fsync).
+	 */
+	u64 last_link_trans;
+
+	/*
 	 * Number of bytes outstanding that are going to need csums.  This is
 	 * used in ENOSPC accounting.
 	 */
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 089b46c..48ac8b7 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1003,6 +1003,48 @@
 	return 0;
 }
 
+static struct extent_buffer *alloc_tree_block_no_bg_flush(
+					  struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  u64 parent_start,
+					  const struct btrfs_disk_key *disk_key,
+					  int level,
+					  u64 hint,
+					  u64 empty_size)
+{
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct extent_buffer *ret;
+
+	/*
+	 * If we are COWing a node/leaf from the extent, chunk, device or free
+	 * space trees, make sure that we do not finish block group creation of
+	 * pending block groups. We do this to avoid a deadlock.
+	 * COWing can result in allocation of a new chunk, and flushing pending
+	 * block groups (btrfs_create_pending_block_groups()) can be triggered
+	 * when finishing allocation of a new chunk. Creation of a pending block
+	 * group modifies the extent, chunk, device and free space trees,
+	 * therefore we could deadlock with ourselves since we are holding a
+	 * lock on an extent buffer that btrfs_create_pending_block_groups() may
+	 * try to COW later.
+	 * For similar reasons, we also need to delay flushing pending block
+	 * groups when splitting a leaf or node, from one of those trees, since
+	 * we are holding a write lock on it and its parent or when inserting a
+	 * new root node for one of those trees.
+	 */
+	if (root == fs_info->extent_root ||
+	    root == fs_info->chunk_root ||
+	    root == fs_info->dev_root ||
+	    root == fs_info->free_space_root)
+		trans->can_flush_pending_bgs = false;
+
+	ret = btrfs_alloc_tree_block(trans, root, parent_start,
+				     root->root_key.objectid, disk_key, level,
+				     hint, empty_size);
+	trans->can_flush_pending_bgs = true;
+
+	return ret;
+}
+
 /*
  * does the dirty work in cow of a single block.  The parent block (if
  * supplied) is updated to point to the new cow copy.  The new buffer is marked
@@ -1050,26 +1092,8 @@
 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
 		parent_start = parent->start;
 
-	/*
-	 * If we are COWing a node/leaf from the extent, chunk or device trees,
-	 * make sure that we do not finish block group creation of pending block
-	 * groups. We do this to avoid a deadlock.
-	 * COWing can result in allocation of a new chunk, and flushing pending
-	 * block groups (btrfs_create_pending_block_groups()) can be triggered
-	 * when finishing allocation of a new chunk. Creation of a pending block
-	 * group modifies the extent, chunk and device trees, therefore we could
-	 * deadlock with ourselves since we are holding a lock on an extent
-	 * buffer that btrfs_create_pending_block_groups() may try to COW later.
-	 */
-	if (root == fs_info->extent_root ||
-	    root == fs_info->chunk_root ||
-	    root == fs_info->dev_root)
-		trans->can_flush_pending_bgs = false;
-
-	cow = btrfs_alloc_tree_block(trans, root, parent_start,
-			root->root_key.objectid, &disk_key, level,
-			search_start, empty_size);
-	trans->can_flush_pending_bgs = true;
+	cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
+					   level, search_start, empty_size);
 	if (IS_ERR(cow))
 		return PTR_ERR(cow);
 
@@ -2624,14 +2648,27 @@
 	root_lock = BTRFS_READ_LOCK;
 
 	if (p->search_commit_root) {
-		/* The commit roots are read only so we always do read locks */
-		if (p->need_commit_sem)
+		/*
+		 * The commit roots are read only so we always do read locks,
+		 * and we always must hold the commit_root_sem when doing
+		 * searches on them, the only exception is send where we don't
+		 * want to block transaction commits for a long time, so
+		 * we need to clone the commit root in order to avoid races
+		 * with transaction commits that create a snapshot of one of
+		 * the roots used by a send operation.
+		 */
+		if (p->need_commit_sem) {
 			down_read(&fs_info->commit_root_sem);
-		b = root->commit_root;
-		extent_buffer_get(b);
-		level = btrfs_header_level(b);
-		if (p->need_commit_sem)
+			b = btrfs_clone_extent_buffer(root->commit_root);
 			up_read(&fs_info->commit_root_sem);
+			if (!b)
+				return ERR_PTR(-ENOMEM);
+
+		} else {
+			b = root->commit_root;
+			extent_buffer_get(b);
+		}
+		level = btrfs_header_level(b);
 		/*
 		 * Ensure that all callers have set skip_locking when
 		 * p->search_commit_root = 1.
@@ -2757,6 +2794,10 @@
 again:
 	prev_cmp = -1;
 	b = btrfs_search_slot_get_root(root, p, write_lock_level);
+	if (IS_ERR(b)) {
+		ret = PTR_ERR(b);
+		goto done;
+	}
 
 	while (b) {
 		level = btrfs_header_level(b);
@@ -3364,8 +3405,8 @@
 	else
 		btrfs_node_key(lower, &lower_key, 0);
 
-	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
-				   &lower_key, level, root->node->start, 0);
+	c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
+					 root->node->start, 0);
 	if (IS_ERR(c))
 		return PTR_ERR(c);
 
@@ -3494,8 +3535,8 @@
 	mid = (c_nritems + 1) / 2;
 	btrfs_node_key(c, &disk_key, mid);
 
-	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
-			&disk_key, level, c->start, 0);
+	split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
+					     c->start, 0);
 	if (IS_ERR(split))
 		return PTR_ERR(split);
 
@@ -4279,8 +4320,8 @@
 	else
 		btrfs_item_key(l, &disk_key, mid);
 
-	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
-			&disk_key, 0, l->start, 0);
+	right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
+					     l->start, 0);
 	if (IS_ERR(right))
 		return PTR_ERR(right);
 
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2cddfe7..82682da 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3155,7 +3155,7 @@
 			       struct inode *inode, u64 new_size,
 			       u32 min_type);
 
-int btrfs_start_delalloc_inodes(struct btrfs_root *root);
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
 			      unsigned int extra_bits,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 9814347..8fed470 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -800,39 +800,58 @@
 	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
 		result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
 		btrfs_dev_replace_write_unlock(dev_replace);
-		goto leave;
+		break;
 	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+		result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
+		tgt_device = dev_replace->tgtdev;
+		src_device = dev_replace->srcdev;
+		btrfs_dev_replace_write_unlock(dev_replace);
+		btrfs_scrub_cancel(fs_info);
+		/* btrfs_dev_replace_finishing() will handle the cleanup part */
+		btrfs_info_in_rcu(fs_info,
+			"dev_replace from %s (devid %llu) to %s canceled",
+			btrfs_dev_name(src_device), src_device->devid,
+			btrfs_dev_name(tgt_device));
+		break;
 	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+		/*
+		 * Scrub doing the replace isn't running so we need to do the
+		 * cleanup step of btrfs_dev_replace_finishing() here
+		 */
 		result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
 		tgt_device = dev_replace->tgtdev;
 		src_device = dev_replace->srcdev;
 		dev_replace->tgtdev = NULL;
 		dev_replace->srcdev = NULL;
+		dev_replace->replace_state =
+				BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
+		dev_replace->time_stopped = ktime_get_real_seconds();
+		dev_replace->item_needs_writeback = 1;
+
+		btrfs_dev_replace_write_unlock(dev_replace);
+
+		btrfs_scrub_cancel(fs_info);
+
+		trans = btrfs_start_transaction(root, 0);
+		if (IS_ERR(trans)) {
+			mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+			return PTR_ERR(trans);
+		}
+		ret = btrfs_commit_transaction(trans);
+		WARN_ON(ret);
+
+		btrfs_info_in_rcu(fs_info,
+		"suspended dev_replace from %s (devid %llu) to %s canceled",
+			btrfs_dev_name(src_device), src_device->devid,
+			btrfs_dev_name(tgt_device));
+
+		if (tgt_device)
+			btrfs_destroy_dev_replace_tgtdev(tgt_device);
 		break;
+	default:
+		result = -EINVAL;
 	}
-	dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
-	dev_replace->time_stopped = ktime_get_real_seconds();
-	dev_replace->item_needs_writeback = 1;
-	btrfs_dev_replace_write_unlock(dev_replace);
-	btrfs_scrub_cancel(fs_info);
 
-	trans = btrfs_start_transaction(root, 0);
-	if (IS_ERR(trans)) {
-		mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
-		return PTR_ERR(trans);
-	}
-	ret = btrfs_commit_transaction(trans);
-	WARN_ON(ret);
-
-	btrfs_info_in_rcu(fs_info,
-		"dev_replace from %s (devid %llu) to %s canceled",
-		btrfs_dev_name(src_device), src_device->devid,
-		btrfs_dev_name(tgt_device));
-
-	if (tgt_device)
-		btrfs_destroy_dev_replace_tgtdev(tgt_device);
-
-leave:
 	mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
 	return result;
 }
@@ -887,6 +906,8 @@
 			   "cannot continue dev_replace, tgtdev is missing");
 		btrfs_info(fs_info,
 			   "you may cancel the operation after 'mount -o degraded'");
+		dev_replace->replace_state =
+					BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
 		btrfs_dev_replace_write_unlock(dev_replace);
 		return 0;
 	}
@@ -898,6 +919,10 @@
 	 * dev-replace to start anyway.
 	 */
 	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
+		btrfs_dev_replace_write_lock(dev_replace);
+		dev_replace->replace_state =
+					BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
+		btrfs_dev_replace_write_unlock(dev_replace);
 		btrfs_info(fs_info,
 		"cannot resume dev-replace, other exclusive operation running");
 		return 0;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d4a7f7c..d96d139 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4155,6 +4155,14 @@
 		spin_lock(&fs_info->ordered_root_lock);
 	}
 	spin_unlock(&fs_info->ordered_root_lock);
+
+	/*
+	 * We need this here because if we've been flipped read-only we won't
+	 * get sync() from the umount, so we need to make sure any ordered
+	 * extents that haven't had their dirty pages IO start writeout yet
+	 * actually get run and error out properly.
+	 */
+	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
 }
 
 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 51e41e5..a16760b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -8911,6 +8911,10 @@
 		goto out_free;
 	}
 
+	err = btrfs_run_delayed_items(trans);
+	if (err)
+		goto out_end_trans;
+
 	if (block_rsv)
 		trans->block_rsv = block_rsv;
 
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4dd6faa..79f82f2 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3928,12 +3928,25 @@
 			range_whole = 1;
 		scanned = 1;
 	}
-	if (wbc->sync_mode == WB_SYNC_ALL)
+
+	/*
+	 * We do the tagged writepage as long as the snapshot flush bit is set
+	 * and we are the first one who do the filemap_flush() on this inode.
+	 *
+	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
+	 * not race in and drop the bit.
+	 */
+	if (range_whole && wbc->nr_to_write == LONG_MAX &&
+	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
+			       &BTRFS_I(inode)->runtime_flags))
+		wbc->tagged_writepages = 1;
+
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
 		tag = PAGECACHE_TAG_TOWRITE;
 	else
 		tag = PAGECACHE_TAG_DIRTY;
 retry:
-	if (wbc->sync_mode == WB_SYNC_ALL)
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
 		tag_pages_for_writeback(mapping, index, end);
 	done_index = index;
 	while (!done && !nr_to_write_done && (index <= end) &&
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7158b5b..59f361f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1373,7 +1373,8 @@
 			 * Do the same check as in btrfs_cross_ref_exist but
 			 * without the unnecessary search.
 			 */
-			if (btrfs_file_extent_generation(leaf, fi) <=
+			if (!nolock &&
+			    btrfs_file_extent_generation(leaf, fi) <=
 			    btrfs_root_last_snapshot(&root->root_item))
 				goto out_check;
 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
@@ -3150,9 +3151,6 @@
 	/* once for the tree */
 	btrfs_put_ordered_extent(ordered_extent);
 
-	/* Try to release some metadata so we don't get an OOM but don't wait */
-	btrfs_btree_balance_dirty_nodelay(fs_info);
-
 	return ret;
 }
 
@@ -3688,6 +3686,21 @@
 	 * inode is not a directory, logging its parent unnecessarily.
 	 */
 	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
+	/*
+	 * Similar reasoning for last_link_trans, needs to be set otherwise
+	 * for a case like the following:
+	 *
+	 * mkdir A
+	 * touch foo
+	 * ln foo A/bar
+	 * echo 2 > /proc/sys/vm/drop_caches
+	 * fsync foo
+	 * <power failure>
+	 *
+	 * Would result in link bar and directory A not existing after the power
+	 * failure.
+	 */
+	BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
 
 	path->slots[0]++;
 	if (inode->i_nlink != 1 ||
@@ -6427,14 +6440,19 @@
 		err = btrfs_del_root_ref(trans, key.objectid,
 					 root->root_key.objectid, parent_ino,
 					 &local_index, name, name_len);
-
+		if (err)
+			btrfs_abort_transaction(trans, err);
 	} else if (add_backref) {
 		u64 local_index;
 		int err;
 
 		err = btrfs_del_inode_ref(trans, root, name, name_len,
 					  ino, parent_ino, &local_index);
+		if (err)
+			btrfs_abort_transaction(trans, err);
 	}
+
+	/* Return the original error code */
 	return ret;
 }
 
@@ -6646,6 +6664,7 @@
 			if (err)
 				goto fail;
 		}
+		BTRFS_I(inode)->last_link_trans = trans->transid;
 		d_instantiate(dentry, inode);
 		ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
 					 true, NULL);
@@ -9174,6 +9193,7 @@
 	ei->index_cnt = (u64)-1;
 	ei->dir_index = 0;
 	ei->last_unlink_trans = 0;
+	ei->last_link_trans = 0;
 	ei->last_log_commit = 0;
 
 	spin_lock_init(&ei->lock);
@@ -9985,7 +10005,7 @@
  * some fairly slow code that needs optimization. This walks the list
  * of all the inodes with pending delalloc and forces them to disk.
  */
-static int start_delalloc_inodes(struct btrfs_root *root, int nr)
+static int start_delalloc_inodes(struct btrfs_root *root, int nr, bool snapshot)
 {
 	struct btrfs_inode *binode;
 	struct inode *inode;
@@ -10013,6 +10033,9 @@
 		}
 		spin_unlock(&root->delalloc_lock);
 
+		if (snapshot)
+			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
+				&binode->runtime_flags);
 		work = btrfs_alloc_delalloc_work(inode);
 		if (!work) {
 			iput(inode);
@@ -10046,7 +10069,7 @@
 	return ret;
 }
 
-int btrfs_start_delalloc_inodes(struct btrfs_root *root)
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
@@ -10054,7 +10077,7 @@
 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 		return -EROFS;
 
-	ret = start_delalloc_inodes(root, -1);
+	ret = start_delalloc_inodes(root, -1, true);
 	if (ret > 0)
 		ret = 0;
 	return ret;
@@ -10083,7 +10106,7 @@
 			       &fs_info->delalloc_roots);
 		spin_unlock(&fs_info->delalloc_root_lock);
 
-		ret = start_delalloc_inodes(root, nr);
+		ret = start_delalloc_inodes(root, nr, false);
 		btrfs_put_fs_root(root);
 		if (ret < 0)
 			goto out;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c915215..8bf9cce1 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -778,7 +778,7 @@
 	wait_event(root->subv_writers->wait,
 		   percpu_counter_sum(&root->subv_writers->counter) == 0);
 
-	ret = btrfs_start_delalloc_inodes(root);
+	ret = btrfs_start_delalloc_snapshot(root);
 	if (ret)
 		goto dec_and_free;
 
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index ff43466..e1fcb28 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1013,16 +1013,22 @@
 		btrfs_abort_transaction(trans, ret);
 		goto out_free_path;
 	}
-	spin_lock(&fs_info->qgroup_lock);
-	fs_info->quota_root = quota_root;
-	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
-	spin_unlock(&fs_info->qgroup_lock);
 
 	ret = btrfs_commit_transaction(trans);
 	trans = NULL;
 	if (ret)
 		goto out_free_path;
 
+	/*
+	 * Set quota enabled flag after committing the transaction, to avoid
+	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
+	 * creation.
+	 */
+	spin_lock(&fs_info->qgroup_lock);
+	fs_info->quota_root = quota_root;
+	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+	spin_unlock(&fs_info->qgroup_lock);
+
 	ret = qgroup_rescan_init(fs_info, 0, 1);
 	if (!ret) {
 	        qgroup_rescan_zero_tracking(fs_info);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8ad1458..8888337 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1677,6 +1677,7 @@
 				flags | SB_RDONLY, device_name, data);
 			if (IS_ERR(mnt_root)) {
 				root = ERR_CAST(mnt_root);
+				kfree(subvol_name);
 				goto out;
 			}
 
@@ -1686,12 +1687,14 @@
 			if (error < 0) {
 				root = ERR_PTR(error);
 				mntput(mnt_root);
+				kfree(subvol_name);
 				goto out;
 			}
 		}
 	}
 	if (IS_ERR(mnt_root)) {
 		root = ERR_CAST(mnt_root);
+		kfree(subvol_name);
 		goto out;
 	}
 
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 16ecb76..0805f8c 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -5781,6 +5781,22 @@
 			goto end_trans;
 	}
 
+	/*
+	 * If a new hard link was added to the inode in the current transaction
+	 * and its link count is now greater than 1, we need to fallback to a
+	 * transaction commit, otherwise we can end up not logging all its new
+	 * parents for all the hard links. Here just from the dentry used to
+	 * fsync, we can not visit the ancestor inodes for all the other hard
+	 * links to figure out if any is new, so we fallback to a transaction
+	 * commit (instead of adding a lot of complexity of scanning a btree,
+	 * since this scenario is not a common use case).
+	 */
+	if (inode->vfs_inode.i_nlink > 1 &&
+	    inode->last_link_trans > last_committed) {
+		ret = -EMLINK;
+		goto end_trans;
+	}
+
 	while (1) {
 		if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
 			break;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f4405e4..285f64f 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -850,6 +850,35 @@
 			return ERR_PTR(-EEXIST);
 		}
 
+		/*
+		 * We are going to replace the device path for a given devid,
+		 * make sure it's the same device if the device is mounted
+		 */
+		if (device->bdev) {
+			struct block_device *path_bdev;
+
+			path_bdev = lookup_bdev(path);
+			if (IS_ERR(path_bdev)) {
+				mutex_unlock(&fs_devices->device_list_mutex);
+				return ERR_CAST(path_bdev);
+			}
+
+			if (device->bdev != path_bdev) {
+				bdput(path_bdev);
+				mutex_unlock(&fs_devices->device_list_mutex);
+				btrfs_warn_in_rcu(device->fs_info,
+			"duplicate device fsid:devid for %pU:%llu old:%s new:%s",
+					disk_super->fsid, devid,
+					rcu_str_deref(device->name), path);
+				return ERR_PTR(-EEXIST);
+			}
+			bdput(path_bdev);
+			btrfs_info_in_rcu(device->fs_info,
+				"device fsid %pU devid %llu moved old:%s new:%s",
+				disk_super->fsid, devid,
+				rcu_str_deref(device->name), path);
+		}
+
 		name = rcu_string_strdup(path, GFP_NOFS);
 		if (!name) {
 			mutex_unlock(&fs_devices->device_list_mutex);
@@ -3712,6 +3741,7 @@
 	int ret;
 	u64 num_devices;
 	unsigned seq;
+	bool reducing_integrity;
 
 	if (btrfs_fs_closing(fs_info) ||
 	    atomic_read(&fs_info->balance_pause_req) ||
@@ -3796,24 +3826,30 @@
 		     !(bctl->sys.target & allowed)) ||
 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
-		     !(bctl->meta.target & allowed))) {
-			if (bctl->flags & BTRFS_BALANCE_FORCE) {
-				btrfs_info(fs_info,
-				"balance: force reducing metadata integrity");
-			} else {
-				btrfs_err(fs_info,
-	"balance: reduces metadata integrity, use --force if you want this");
-				ret = -EINVAL;
-				goto out;
-			}
-		}
+		     !(bctl->meta.target & allowed)))
+			reducing_integrity = true;
+		else
+			reducing_integrity = false;
+
+		/* if we're not converting, the target field is uninitialized */
+		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
+		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+			bctl->data.target : fs_info->avail_data_alloc_bits;
 	} while (read_seqretry(&fs_info->profiles_lock, seq));
 
-	/* if we're not converting, the target field is uninitialized */
-	meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
-		bctl->meta.target : fs_info->avail_metadata_alloc_bits;
-	data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
-		bctl->data.target : fs_info->avail_data_alloc_bits;
+	if (reducing_integrity) {
+		if (bctl->flags & BTRFS_BALANCE_FORCE) {
+			btrfs_info(fs_info,
+				   "balance: force reducing metadata integrity");
+		} else {
+			btrfs_err(fs_info,
+	  "balance: reduces metadata integrity, use --force if you want this");
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
 		int meta_index = btrfs_bg_flags_to_raid_index(meta_target);
@@ -4761,19 +4797,17 @@
 	/*
 	 * Use the number of data stripes to figure out how big this chunk
 	 * is really going to be in terms of logical address space,
-	 * and compare that answer with the max chunk size
+	 * and compare that answer with the max chunk size. If it's higher,
+	 * we try to reduce stripe_size.
 	 */
 	if (stripe_size * data_stripes > max_chunk_size) {
-		stripe_size = div_u64(max_chunk_size, data_stripes);
-
-		/* bump the answer up to a 16MB boundary */
-		stripe_size = round_up(stripe_size, SZ_16M);
-
 		/*
-		 * But don't go higher than the limits we found while searching
-		 * for free extents
+		 * Reduce stripe_size, round it up to a 16MB boundary again and
+		 * then use it, unless it ends up being even bigger than the
+		 * previous value we had already.
 		 */
-		stripe_size = min(devices_info[ndevs - 1].max_avail,
+		stripe_size = min(round_up(div_u64(max_chunk_size,
+						   data_stripes), SZ_16M),
 				  stripe_size);
 	}
 
@@ -7467,6 +7501,8 @@
 	struct btrfs_path *path;
 	struct btrfs_root *root = fs_info->dev_root;
 	struct btrfs_key key;
+	u64 prev_devid = 0;
+	u64 prev_dev_ext_end = 0;
 	int ret = 0;
 
 	key.objectid = 1;
@@ -7511,10 +7547,22 @@
 		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
 		physical_len = btrfs_dev_extent_length(leaf, dext);
 
+		/* Check if this dev extent overlaps with the previous one */
+		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
+			btrfs_err(fs_info,
+"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
+				  devid, physical_offset, prev_dev_ext_end);
+			ret = -EUCLEAN;
+			goto out;
+		}
+
 		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
 					    physical_offset, physical_len);
 		if (ret < 0)
 			goto out;
+		prev_devid = devid;
+		prev_dev_ext_end = physical_offset + physical_len;
+
 		ret = btrfs_next_item(root, path);
 		if (ret < 0)
 			goto out;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index ea78c3d..f141b45 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -11,6 +11,7 @@
 #include <linux/security.h>
 #include <linux/posix_acl_xattr.h>
 #include <linux/iversion.h>
+#include <linux/sched/mm.h>
 #include "ctree.h"
 #include "btrfs_inode.h"
 #include "transaction.h"
@@ -422,9 +423,15 @@
 {
 	const struct xattr *xattr;
 	struct btrfs_trans_handle *trans = fs_info;
+	unsigned int nofs_flag;
 	char *name;
 	int err = 0;
 
+	/*
+	 * We're holding a transaction handle, so use a NOFS memory allocation
+	 * context to avoid deadlock if reclaim happens.
+	 */
+	nofs_flag = memalloc_nofs_save();
 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
 		name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
 			       strlen(xattr->name) + 1, GFP_KERNEL);
@@ -440,6 +447,7 @@
 		if (err < 0)
 			break;
 	}
+	memalloc_nofs_restore(nofs_flag);
 	return err;
 }
 
diff --git a/fs/buffer.c b/fs/buffer.c
index 6f1ae3a..c083c4b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -200,6 +200,7 @@
 	struct buffer_head *head;
 	struct page *page;
 	int all_mapped = 1;
+	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
 
 	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
 	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
@@ -227,15 +228,15 @@
 	 * file io on the block device and getblk.  It gets dealt with
 	 * elsewhere, don't buffer_error if we had some unmapped buffers
 	 */
-	if (all_mapped) {
-		printk("__find_get_block_slow() failed. "
-			"block=%llu, b_blocknr=%llu\n",
-			(unsigned long long)block,
-			(unsigned long long)bh->b_blocknr);
-		printk("b_state=0x%08lx, b_size=%zu\n",
-			bh->b_state, bh->b_size);
-		printk("device %pg blocksize: %d\n", bdev,
-			1 << bd_inode->i_blkbits);
+	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
+	if (all_mapped && __ratelimit(&last_warned)) {
+		printk("__find_get_block_slow() failed. block=%llu, "
+		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
+		       "device %pg blocksize: %d\n",
+		       (unsigned long long)block,
+		       (unsigned long long)bh->b_blocknr,
+		       bh->b_state, bh->b_size, bdev,
+		       1 << bd_inode->i_blkbits);
 	}
 out_unlock:
 	spin_unlock(&bd_mapping->private_lock);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dd7dfdd..c7542e8 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1030,6 +1030,8 @@
 	list_del_init(&ci->i_snap_realm_item);
 	ci->i_snap_realm_counter++;
 	ci->i_snap_realm = NULL;
+	if (realm->ino == ci->i_vino.ino)
+		realm->inode = NULL;
 	spin_unlock(&realm->inodes_with_caps_lock);
 	ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
 			    realm);
@@ -3566,7 +3568,6 @@
 			tcap->cap_id = t_cap_id;
 			tcap->seq = t_seq - 1;
 			tcap->issue_seq = t_seq - 1;
-			tcap->mseq = t_mseq;
 			tcap->issued |= issued;
 			tcap->implemented |= issued;
 			if (cap == ci->i_auth_cap)
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 041c27e..f74193d 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -616,7 +616,8 @@
 	     capsnap->size);
 
 	spin_lock(&mdsc->snap_flush_lock);
-	list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
+	if (list_empty(&ci->i_snap_flush_item))
+		list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
 	spin_unlock(&mdsc->snap_flush_lock);
 	return 1;  /* caller may want to ceph_flush_snaps */
 }
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5657b79..269471c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1458,18 +1458,26 @@
 }
 
 static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+		     bool malformed)
 {
 	int length;
-	struct cifs_readdata *rdata = mid->callback_data;
 
 	length = cifs_discard_remaining_data(server);
-	dequeue_mid(mid, rdata->result);
+	dequeue_mid(mid, malformed);
 	mid->resp_buf = server->smallbuf;
 	server->smallbuf = NULL;
 	return length;
 }
 
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+	struct cifs_readdata *rdata = mid->callback_data;
+
+	return  __cifs_readv_discard(server, mid, rdata->result);
+}
+
 int
 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
@@ -1511,12 +1519,23 @@
 		return -1;
 	}
 
+	/* set up first two iov for signature check and to get credits */
+	rdata->iov[0].iov_base = buf;
+	rdata->iov[0].iov_len = 4;
+	rdata->iov[1].iov_base = buf + 4;
+	rdata->iov[1].iov_len = server->total_read - 4;
+	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+
 	/* Was the SMB read successful? */
 	rdata->result = server->ops->map_error(buf, false);
 	if (rdata->result != 0) {
 		cifs_dbg(FYI, "%s: server returned error %d\n",
 			 __func__, rdata->result);
-		return cifs_readv_discard(server, mid);
+		/* normal error on read response */
+		return __cifs_readv_discard(server, mid, false);
 	}
 
 	/* Is there enough to get to the rest of the READ_RSP header? */
@@ -1560,14 +1579,6 @@
 		server->total_read += length;
 	}
 
-	/* set up first iov for signature check */
-	rdata->iov[0].iov_base = buf;
-	rdata->iov[0].iov_len = 4;
-	rdata->iov[1].iov_base = buf + 4;
-	rdata->iov[1].iov_len = server->total_read - 4;
-	cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
-		 rdata->iov[0].iov_base, server->total_read);
-
 	/* how much data is in the response? */
 #ifdef CONFIG_CIFS_SMB_DIRECT
 	use_rdma_mr = rdata->mr;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 52d71b6..a5ea742 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -50,6 +50,7 @@
 #include "cifs_unicode.h"
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
+#include "dns_resolve.h"
 #include "ntlmssp.h"
 #include "nterr.h"
 #include "rfc1002pdu.h"
@@ -318,6 +319,53 @@
 					const char *devname, bool is_smb3);
 
 /*
+ * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
+ * get their ip addresses changed at some point.
+ *
+ * This should be called with server->srv_mutex held.
+ */
+#ifdef CONFIG_CIFS_DFS_UPCALL
+static int reconn_set_ipaddr(struct TCP_Server_Info *server)
+{
+	int rc;
+	int len;
+	char *unc, *ipaddr = NULL;
+
+	if (!server->hostname)
+		return -EINVAL;
+
+	len = strlen(server->hostname) + 3;
+
+	unc = kmalloc(len, GFP_KERNEL);
+	if (!unc) {
+		cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
+		return -ENOMEM;
+	}
+	snprintf(unc, len, "\\\\%s", server->hostname);
+
+	rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
+	kfree(unc);
+
+	if (rc < 0) {
+		cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
+			 __func__, server->hostname, rc);
+		return rc;
+	}
+
+	rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
+				  strlen(ipaddr));
+	kfree(ipaddr);
+
+	return !rc ? -1 : 0;
+}
+#else
+static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
+{
+	return 0;
+}
+#endif
+
+/*
  * cifs tcp session reconnection
  *
  * mark tcp session as reconnecting so temporarily locked
@@ -417,6 +465,11 @@
 			rc = generic_ip_connect(server);
 		if (rc) {
 			cifs_dbg(FYI, "reconnect error %d\n", rc);
+			rc = reconn_set_ipaddr(server);
+			if (rc) {
+				cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+					 __func__, rc);
+			}
 			mutex_unlock(&server->srv_mutex);
 			msleep(3000);
 		} else {
@@ -533,6 +586,21 @@
 	return false;
 }
 
+static inline bool
+zero_credits(struct TCP_Server_Info *server)
+{
+	int val;
+
+	spin_lock(&server->req_lock);
+	val = server->credits + server->echo_credits + server->oplock_credits;
+	if (server->in_flight == 0 && val == 0) {
+		spin_unlock(&server->req_lock);
+		return true;
+	}
+	spin_unlock(&server->req_lock);
+	return false;
+}
+
 static int
 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
 {
@@ -545,6 +613,12 @@
 	for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
 		try_to_freeze();
 
+		/* reconnect if no credits and no requests in flight */
+		if (zero_credits(server)) {
+			cifs_reconnect(server);
+			return -ECONNABORTED;
+		}
+
 		if (server_unresponsive(server))
 			return -ECONNABORTED;
 		if (cifs_rdma_enabled(server) && server->smbd_conn)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8d41ca7..23db881 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1120,14 +1120,18 @@
 
 	/*
 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
-	 * and check it for zero before using.
+	 * and check it before using.
 	 */
 	max_buf = tcon->ses->server->maxBuf;
-	if (!max_buf) {
+	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
 		free_xid(xid);
 		return -EINVAL;
 	}
 
+	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+		     PAGE_SIZE);
+	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+			PAGE_SIZE);
 	max_num = (max_buf - sizeof(struct smb_hdr)) /
 						sizeof(LOCKING_ANDX_RANGE);
 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -1460,12 +1464,16 @@
 
 	/*
 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
-	 * and check it for zero before using.
+	 * and check it before using.
 	 */
 	max_buf = tcon->ses->server->maxBuf;
-	if (!max_buf)
+	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
 		return -EINVAL;
 
+	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+		     PAGE_SIZE);
+	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+			PAGE_SIZE);
 	max_num = (max_buf - sizeof(struct smb_hdr)) /
 						sizeof(LOCKING_ANDX_RANGE);
 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index e169e1a..3925a7b 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -655,7 +655,14 @@
 		/* scan and find it */
 		int i;
 		char *cur_ent;
-		char *end_of_smb = cfile->srch_inf.ntwrk_buf_start +
+		char *end_of_smb;
+
+		if (cfile->srch_inf.ntwrk_buf_start == NULL) {
+			cifs_dbg(VFS, "ntwrk_buf_start is NULL during readdir\n");
+			return -EIO;
+		}
+
+		end_of_smb = cfile->srch_inf.ntwrk_buf_start +
 			server->ops->calc_smb_size(
 					cfile->srch_inf.ntwrk_buf_start,
 					server);
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 4ed10dd..b204e84 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -122,12 +122,14 @@
 
 	/*
 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
-	 * and check it for zero before using.
+	 * and check it before using.
 	 */
 	max_buf = tcon->ses->server->maxBuf;
-	if (!max_buf)
+	if (max_buf < sizeof(struct smb2_lock_element))
 		return -EINVAL;
 
+	BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+	max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
 	max_num = max_buf / sizeof(struct smb2_lock_element);
 	buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
 	if (!buf)
@@ -264,6 +266,8 @@
 		return -EINVAL;
 	}
 
+	BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+	max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
 	max_num = max_buf / sizeof(struct smb2_lock_element);
 	buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
 	if (!buf) {
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 20a2d30..c3ae8c1 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -379,8 +379,8 @@
 	{STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
 	{STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
 	{STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
-	{STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
-	{STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
+	{STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
+	{STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
 	{STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
 	{STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
 	"STATUS_CTL_FILE_NOT_SUPPORTED"},
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 6a9c475..7b8b58f 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -648,6 +648,13 @@
 	if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
 		return false;
 
+	if (rsp->sync_hdr.CreditRequest) {
+		spin_lock(&server->req_lock);
+		server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
+		spin_unlock(&server->req_lock);
+		wake_up(&server->request_q);
+	}
+
 	if (rsp->StructureSize !=
 				smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
 		if (le16_to_cpu(rsp->StructureSize) == 44)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 812da3e..237d728 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -34,6 +34,7 @@
 #include "cifs_ioctl.h"
 #include "smbdirect.h"
 
+/* Change credits for different ops and return the total number of credits */
 static int
 change_conf(struct TCP_Server_Info *server)
 {
@@ -41,17 +42,15 @@
 	server->oplock_credits = server->echo_credits = 0;
 	switch (server->credits) {
 	case 0:
-		return -1;
+		return 0;
 	case 1:
 		server->echoes = false;
 		server->oplocks = false;
-		cifs_dbg(VFS, "disabling echoes and oplocks\n");
 		break;
 	case 2:
 		server->echoes = true;
 		server->oplocks = false;
 		server->echo_credits = 1;
-		cifs_dbg(FYI, "disabling oplocks\n");
 		break;
 	default:
 		server->echoes = true;
@@ -64,14 +63,15 @@
 		server->echo_credits = 1;
 	}
 	server->credits -= server->echo_credits + server->oplock_credits;
-	return 0;
+	return server->credits + server->echo_credits + server->oplock_credits;
 }
 
 static void
 smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
 		 const int optype)
 {
-	int *val, rc = 0;
+	int *val, rc = -1;
+
 	spin_lock(&server->req_lock);
 	val = server->ops->get_credits_field(server, optype);
 	*val += add;
@@ -95,8 +95,26 @@
 	}
 	spin_unlock(&server->req_lock);
 	wake_up(&server->request_q);
-	if (rc)
-		cifs_reconnect(server);
+
+	if (server->tcpStatus == CifsNeedReconnect)
+		return;
+
+	switch (rc) {
+	case -1:
+		/* change_conf hasn't been executed */
+		break;
+	case 0:
+		cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
+		break;
+	case 1:
+		cifs_dbg(VFS, "disabling echoes and oplocks\n");
+		break;
+	case 2:
+		cifs_dbg(FYI, "disabling oplocks\n");
+		break;
+	default:
+		cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
+	}
 }
 
 static void
@@ -154,14 +172,14 @@
 
 			scredits = server->credits;
 			/* can deadlock with reopen */
-			if (scredits == 1) {
+			if (scredits <= 8) {
 				*num = SMB2_MAX_BUFFER_SIZE;
 				*credits = 0;
 				break;
 			}
 
-			/* leave one credit for a possible reopen */
-			scredits--;
+			/* leave some credits for reopen and other ops */
+			scredits -= 8;
 			*num = min_t(unsigned int, size,
 				     scredits * SMB2_MAX_BUFFER_SIZE);
 
@@ -2901,11 +2919,23 @@
 			server->ops->is_status_pending(buf, server, 0))
 		return -1;
 
-	rdata->result = server->ops->map_error(buf, false);
+	/* set up first two iov to get credits */
+	rdata->iov[0].iov_base = buf;
+	rdata->iov[0].iov_len = 4;
+	rdata->iov[1].iov_base = buf + 4;
+	rdata->iov[1].iov_len =
+		min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
+	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+
+	rdata->result = server->ops->map_error(buf, true);
 	if (rdata->result != 0) {
 		cifs_dbg(FYI, "%s: server returned error %d\n",
 			 __func__, rdata->result);
-		dequeue_mid(mid, rdata->result);
+		/* normal error on read response */
+		dequeue_mid(mid, false);
 		return 0;
 	}
 
@@ -2978,14 +3008,6 @@
 		return 0;
 	}
 
-	/* set up first iov for signature check */
-	rdata->iov[0].iov_base = buf;
-	rdata->iov[0].iov_len = 4;
-	rdata->iov[1].iov_base = buf + 4;
-	rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
-	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
-		 rdata->iov[0].iov_base, server->vals->read_rsp_size);
-
 	length = rdata->copy_into_pages(server, rdata, &iter);
 
 	kfree(bvec);
@@ -3184,8 +3206,10 @@
 	}
 
 	/* TODO: add support for compounds containing READ. */
-	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
+	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
+		*num_mids = 1;
 		return receive_encrypted_read(server, &mids[0]);
+	}
 
 	return receive_encrypted_standard(server, mids, bufs, num_mids);
 }
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index f54d07b..a2d7017 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2243,10 +2243,12 @@
 {
 	int i;
 
-	cifs_small_buf_release(rqst->rq_iov[0].iov_base);
-	for (i = 1; i < rqst->rq_nvec; i++)
-		if (rqst->rq_iov[i].iov_base != smb2_padding)
-			kfree(rqst->rq_iov[i].iov_base);
+	if (rqst && rqst->rq_iov) {
+		cifs_small_buf_release(rqst->rq_iov[0].iov_base);
+		for (i = 1; i < rqst->rq_nvec; i++)
+			if (rqst->rq_iov[i].iov_base != smb2_padding)
+				kfree(rqst->rq_iov[i].iov_base);
+	}
 }
 
 int
@@ -2535,7 +2537,8 @@
 void
 SMB2_close_free(struct smb_rqst *rqst)
 {
-	cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+	if (rqst && rqst->rq_iov)
+		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
 }
 
 int
@@ -2685,7 +2688,8 @@
 void
 SMB2_query_info_free(struct smb_rqst *rqst)
 {
-	cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+	if (rqst && rqst->rq_iov)
+		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
 }
 
 static int
@@ -2814,9 +2818,10 @@
 {
 	struct TCP_Server_Info *server = mid->callback_data;
 	struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
-	unsigned int credits_received = 1;
+	unsigned int credits_received = 0;
 
-	if (mid->mid_state == MID_RESPONSE_RECEIVED)
+	if (mid->mid_state == MID_RESPONSE_RECEIVED
+	    || mid->mid_state == MID_RESPONSE_MALFORMED)
 		credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
 
 	DeleteMidQEntry(mid);
@@ -3073,7 +3078,7 @@
 	struct TCP_Server_Info *server = tcon->ses->server;
 	struct smb2_sync_hdr *shdr =
 				(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
-	unsigned int credits_received = 1;
+	unsigned int credits_received = 0;
 	struct smb_rqst rqst = { .rq_iov = rdata->iov,
 				 .rq_nvec = 2,
 				 .rq_pages = rdata->pages,
@@ -3112,6 +3117,9 @@
 		task_io_account_read(rdata->got_bytes);
 		cifs_stats_bytes_read(tcon, rdata->got_bytes);
 		break;
+	case MID_RESPONSE_MALFORMED:
+		credits_received = le16_to_cpu(shdr->CreditRequest);
+		/* fall through */
 	default:
 		if (rdata->result != -ENODATA)
 			rdata->result = -EIO;
@@ -3127,8 +3135,17 @@
 		rdata->mr = NULL;
 	}
 #endif
-	if (rdata->result)
+	if (rdata->result && rdata->result != -ENODATA) {
 		cifs_stats_fail_inc(tcon, SMB2_READ_HE);
+		trace_smb3_read_err(0 /* xid */,
+				    rdata->cfile->fid.persistent_fid,
+				    tcon->tid, tcon->ses->Suid, rdata->offset,
+				    rdata->bytes, rdata->result);
+	} else
+		trace_smb3_read_done(0 /* xid */,
+				     rdata->cfile->fid.persistent_fid,
+				     tcon->tid, tcon->ses->Suid,
+				     rdata->offset, rdata->got_bytes);
 
 	queue_work(cifsiod_wq, &rdata->work);
 	DeleteMidQEntry(mid);
@@ -3185,12 +3202,14 @@
 	if (rdata->credits) {
 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
 						SMB2_MAX_BUFFER_SIZE));
-		shdr->CreditRequest = shdr->CreditCharge;
+		shdr->CreditRequest =
+			cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
 		spin_lock(&server->req_lock);
 		server->credits += rdata->credits -
 						le16_to_cpu(shdr->CreditCharge);
 		spin_unlock(&server->req_lock);
 		wake_up(&server->request_q);
+		rdata->credits = le16_to_cpu(shdr->CreditCharge);
 		flags |= CIFS_HAS_CREDITS;
 	}
 
@@ -3201,13 +3220,11 @@
 	if (rc) {
 		kref_put(&rdata->refcount, cifs_readdata_release);
 		cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
-		trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid,
-				   io_parms.tcon->tid, io_parms.tcon->ses->Suid,
-				   io_parms.offset, io_parms.length);
-	} else
-		trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid,
-				   io_parms.tcon->tid, io_parms.tcon->ses->Suid,
-				   io_parms.offset, io_parms.length);
+		trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
+				    io_parms.tcon->tid,
+				    io_parms.tcon->ses->Suid,
+				    io_parms.offset, io_parms.length, rc);
+	}
 
 	cifs_small_buf_release(buf);
 	return rc;
@@ -3251,10 +3268,11 @@
 		if (rc != -ENODATA) {
 			cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
 			cifs_dbg(VFS, "Send error in read = %d\n", rc);
+			trace_smb3_read_err(xid, req->PersistentFileId,
+					    io_parms->tcon->tid, ses->Suid,
+					    io_parms->offset, io_parms->length,
+					    rc);
 		}
-		trace_smb3_read_err(rc, xid, req->PersistentFileId,
-				    io_parms->tcon->tid, ses->Suid,
-				    io_parms->offset, io_parms->length);
 		free_rsp_buf(resp_buftype, rsp_iov.iov_base);
 		return rc == -ENODATA ? 0 : rc;
 	} else
@@ -3295,7 +3313,7 @@
 	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
 	unsigned int written;
 	struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
-	unsigned int credits_received = 1;
+	unsigned int credits_received = 0;
 
 	switch (mid->mid_state) {
 	case MID_RESPONSE_RECEIVED:
@@ -3323,6 +3341,9 @@
 	case MID_RETRY_NEEDED:
 		wdata->result = -EAGAIN;
 		break;
+	case MID_RESPONSE_MALFORMED:
+		credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+		/* fall through */
 	default:
 		wdata->result = -EIO;
 		break;
@@ -3340,8 +3361,17 @@
 		wdata->mr = NULL;
 	}
 #endif
-	if (wdata->result)
+	if (wdata->result) {
 		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
+		trace_smb3_write_err(0 /* no xid */,
+				     wdata->cfile->fid.persistent_fid,
+				     tcon->tid, tcon->ses->Suid, wdata->offset,
+				     wdata->bytes, wdata->result);
+	} else
+		trace_smb3_write_done(0 /* no xid */,
+				      wdata->cfile->fid.persistent_fid,
+				      tcon->tid, tcon->ses->Suid,
+				      wdata->offset, wdata->bytes);
 
 	queue_work(cifsiod_wq, &wdata->work);
 	DeleteMidQEntry(mid);
@@ -3462,12 +3492,14 @@
 	if (wdata->credits) {
 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
 						    SMB2_MAX_BUFFER_SIZE));
-		shdr->CreditRequest = shdr->CreditCharge;
+		shdr->CreditRequest =
+			cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
 		spin_lock(&server->req_lock);
 		server->credits += wdata->credits -
 						le16_to_cpu(shdr->CreditCharge);
 		spin_unlock(&server->req_lock);
 		wake_up(&server->request_q);
+		wdata->credits = le16_to_cpu(shdr->CreditCharge);
 		flags |= CIFS_HAS_CREDITS;
 	}
 
@@ -3481,10 +3513,7 @@
 				     wdata->bytes, rc);
 		kref_put(&wdata->refcount, release);
 		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
-	} else
-		trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
-				     tcon->tid, tcon->ses->Suid, wdata->offset,
-				     wdata->bytes);
+	}
 
 async_writev_out:
 	cifs_small_buf_release(req);
@@ -3710,8 +3739,8 @@
 		    rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
 			srch_inf->endOfSearch = true;
 			rc = 0;
-		}
-		cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+		} else
+			cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
 		goto qdir_exit;
 	}
 
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 8fb7887..437257d 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -84,8 +84,8 @@
 
 #define NUMBER_OF_SMB2_COMMANDS	0x0013
 
-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
-#define MAX_SMB2_HDR_SIZE 0x00b0
+/* 52 transform hdr + 64 hdr + 88 create rsp */
+#define MAX_SMB2_HDR_SIZE 204
 
 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
 #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 333729c..66348b3d 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -378,7 +378,7 @@
 	if (rc < 0 && rc != -EINTR)
 		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
 			 rc);
-	else
+	else if (rc > 0)
 		rc = 0;
 
 	return rc;
@@ -786,7 +786,8 @@
 	int i, j, rc = 0;
 	int timeout, optype;
 	struct mid_q_entry *midQ[MAX_COMPOUND];
-	unsigned int credits = 0;
+	bool cancelled_mid[MAX_COMPOUND] = {false};
+	unsigned int credits[MAX_COMPOUND] = {0};
 	char *buf;
 
 	timeout = flags & CIFS_TIMEOUT_MASK;
@@ -804,13 +805,31 @@
 		return -ENOENT;
 
 	/*
-	 * Ensure that we do not send more than 50 overlapping requests
-	 * to the same server. We may make this configurable later or
-	 * use ses->maxReq.
+	 * Ensure we obtain 1 credit per request in the compound chain.
+	 * It can be optimized further by waiting for all the credits
+	 * at once but this can wait long enough if we don't have enough
+	 * credits due to some heavy operations in progress or the server
+	 * not granting us much, so a fallback to the current approach is
+	 * needed anyway.
 	 */
-	rc = wait_for_free_request(ses->server, timeout, optype);
-	if (rc)
-		return rc;
+	for (i = 0; i < num_rqst; i++) {
+		rc = wait_for_free_request(ses->server, timeout, optype);
+		if (rc) {
+			/*
+			 * We haven't sent an SMB packet to the server yet but
+			 * we already obtained credits for i requests in the
+			 * compound chain - need to return those credits back
+			 * for future use. Note that we need to call add_credits
+			 * multiple times to match the way we obtained credits
+			 * in the first place and to account for in flight
+			 * requests correctly.
+			 */
+			for (j = 0; j < i; j++)
+				add_credits(ses->server, 1, optype);
+			return rc;
+		}
+		credits[i] = 1;
+	}
 
 	/*
 	 * Make sure that we sign in the same order that we send on this socket
@@ -826,8 +845,10 @@
 			for (j = 0; j < i; j++)
 				cifs_delete_mid(midQ[j]);
 			mutex_unlock(&ses->server->srv_mutex);
+
 			/* Update # of requests on wire to server */
-			add_credits(ses->server, 1, optype);
+			for (j = 0; j < num_rqst; j++)
+				add_credits(ses->server, credits[j], optype);
 			return PTR_ERR(midQ[i]);
 		}
 
@@ -874,19 +895,16 @@
 			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
 				midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
 				midQ[i]->callback = DeleteMidQEntry;
-				spin_unlock(&GlobalMid_Lock);
-				add_credits(ses->server, 1, optype);
-				return rc;
+				cancelled_mid[i] = true;
 			}
 			spin_unlock(&GlobalMid_Lock);
 		}
 	}
 
 	for (i = 0; i < num_rqst; i++)
-		if (midQ[i]->resp_buf)
-			credits += ses->server->ops->get_credits(midQ[i]);
-	if (!credits)
-		credits = 1;
+		if (!cancelled_mid[i] && midQ[i]->resp_buf
+		    && (midQ[i]->mid_state == MID_RESPONSE_RECEIVED))
+			credits[i] = ses->server->ops->get_credits(midQ[i]);
 
 	for (i = 0; i < num_rqst; i++) {
 		if (rc < 0)
@@ -894,8 +912,9 @@
 
 		rc = cifs_sync_mid_result(midQ[i], ses->server);
 		if (rc != 0) {
-			add_credits(ses->server, credits, optype);
-			return rc;
+			/* mark this mid as cancelled to not free it below */
+			cancelled_mid[i] = true;
+			goto out;
 		}
 
 		if (!midQ[i]->resp_buf ||
@@ -942,9 +961,11 @@
 	 * This is prevented above by using a noop callback that will not
 	 * wake this thread except for the very last PDU.
 	 */
-	for (i = 0; i < num_rqst; i++)
-		cifs_delete_mid(midQ[i]);
-	add_credits(ses->server, credits, optype);
+	for (i = 0; i < num_rqst; i++) {
+		if (!cancelled_mid[i])
+			cifs_delete_mid(midQ[i]);
+		add_credits(ses->server, credits[i], optype);
+	}
 
 	return rc;
 }
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index cb49698..cc42e5e 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,4 +1,7 @@
 obj-$(CONFIG_FS_ENCRYPTION)	+= fscrypto.o
 
-fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
+ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/f2fs
+
+fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o fscrypt_ice.o
 fscrypto-$(CONFIG_BLOCK) += bio.o
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 0959044..93cd5e5 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -33,14 +33,17 @@
 
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
-		int ret = fscrypt_decrypt_page(page->mapping->host, page,
-				PAGE_SIZE, 0, page->index);
-
-		if (ret) {
-			WARN_ON_ONCE(1);
-			SetPageError(page);
-		} else if (done) {
+		if (fscrypt_using_hardware_encryption(page->mapping->host)) {
 			SetPageUptodate(page);
+		} else {
+			int ret = fscrypt_decrypt_page(page->mapping->host,
+				page, PAGE_SIZE, 0, page->index);
+			if (ret) {
+				WARN_ON_ONCE(1);
+				SetPageError(page);
+			} else if (done) {
+				SetPageUptodate(page);
+			}
 		}
 		if (done)
 			unlock_page(page);
diff --git a/fs/crypto/fscrypt_ice.c b/fs/crypto/fscrypt_ice.c
new file mode 100644
index 0000000..1de53d4
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "fscrypt_ice.h"
+
+int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+	struct fscrypt_info *ci = inode->i_crypt_info;
+
+	return S_ISREG(inode->i_mode) && ci &&
+		ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+EXPORT_SYMBOL(fscrypt_using_hardware_encryption);
+
+/*
+ * Retrieves encryption key from the inode
+ */
+char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (!inode)
+		return NULL;
+
+	ci = inode->i_crypt_info;
+	if (!ci)
+		return NULL;
+
+	return &(ci->ci_raw_key[0]);
+}
+
+/*
+ * Retrieves encryption salt from the inode
+ */
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (!inode)
+		return NULL;
+
+	ci = inode->i_crypt_info;
+	if (!ci)
+		return NULL;
+
+	return &(ci->ci_raw_key[fscrypt_get_ice_encryption_key_size(inode)]);
+}
+
+/*
+ * returns true if the cipher mode in inode is AES XTS
+ */
+int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+	struct fscrypt_info *ci = inode->i_crypt_info;
+
+	if (!ci)
+		return 0;
+
+	return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE);
+}
+
+/*
+ * returns true if encryption info in both inodes is equal
+ */
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+					const struct inode *inode2)
+{
+	char *key1 = NULL;
+	char *key2 = NULL;
+	char *salt1 = NULL;
+	char *salt2 = NULL;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	if (inode1 == inode2)
+		return true;
+
+	/*
+	 * both do not belong to ice, so we don't care, they are equal
+	 * for us
+	 */
+	if (!fscrypt_should_be_processed_by_ice(inode1) &&
+			!fscrypt_should_be_processed_by_ice(inode2))
+		return true;
+
+	/* one belongs to ice, the other does not -> not equal */
+	if (fscrypt_should_be_processed_by_ice(inode1) ^
+			fscrypt_should_be_processed_by_ice(inode2))
+		return false;
+
+	key1 = fscrypt_get_ice_encryption_key(inode1);
+	key2 = fscrypt_get_ice_encryption_key(inode2);
+	salt1 = fscrypt_get_ice_encryption_salt(inode1);
+	salt2 = fscrypt_get_ice_encryption_salt(inode2);
+
+	/* key and salt should not be null by this point */
+	if (!key1 || !key2 || !salt1 || !salt2 ||
+		(fscrypt_get_ice_encryption_key_size(inode1) !=
+		 fscrypt_get_ice_encryption_key_size(inode2)) ||
+		(fscrypt_get_ice_encryption_salt_size(inode1) !=
+		 fscrypt_get_ice_encryption_salt_size(inode2)))
+		return false;
+
+	if ((memcmp(key1, key2,
+			fscrypt_get_ice_encryption_key_size(inode1)) == 0) &&
+		(memcmp(salt1, salt2,
+			fscrypt_get_ice_encryption_salt_size(inode1)) == 0))
+		return true;
+
+	return false;
+}
+
+void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun)
+{
+	if (fscrypt_should_be_processed_by_ice(inode))
+		bio->bi_iter.bi_dun = dun;
+}
+EXPORT_SYMBOL(fscrypt_set_ice_dun);
+
+/*
+ * This function will be used for filesystem when deciding to merge bios.
+ * Basic assumption is, if inline_encryption is set, single bio has to
+ * guarantee consecutive LBAs as well as ino|pg->index.
+ */
+bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted)
+{
+	if (!bio)
+		return true;
+
+	/* if both of them are not encrypted, no further check is needed */
+	if (!bio_dun(bio) && !bio_encrypted)
+		return true;
+
+	/* ICE allows only consecutive iv_key stream. */
+	return bio_end_dun(bio) == dun;
+}
+EXPORT_SYMBOL(fscrypt_mergeable_bio);
diff --git a/fs/crypto/fscrypt_ice.h b/fs/crypto/fscrypt_ice.h
new file mode 100644
index 0000000..84de010
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _FSCRYPT_ICE_H
+#define _FSCRYPT_ICE_H
+
+#include <linux/blkdev.h>
+#include "fscrypt_private.h"
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+	if (!inode->i_sb->s_cop)
+		return false;
+	if (!inode->i_sb->s_cop->is_encrypted((struct inode *)inode))
+		return false;
+
+	return fscrypt_using_hardware_encryption(inode);
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+	return blk_queue_inlinecrypt(bdev_get_queue(sb->s_bdev));
+}
+
+int fscrypt_is_aes_xts_cipher(const struct inode *inode);
+
+char *fscrypt_get_ice_encryption_key(const struct inode *inode);
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode);
+
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+					const struct inode *inode2);
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+					const struct inode *inode)
+{
+	return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+					const struct inode *inode)
+{
+	return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+#else
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+	return false;
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+	return false;
+}
+
+static inline char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+					const struct inode *inode)
+{
+	return 0;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+					const struct inode *inode)
+{
+	return 0;
+}
+
+static inline int fscrypt_is_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline bool fscrypt_is_ice_encryption_info_equal(
+					const struct inode *inode1,
+					const struct inode *inode2)
+{
+	return false;
+}
+
+static inline int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+#endif
+
+#endif	/* _FSCRYPT_ICE_H */
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 7424f85..f06a8c0 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -12,11 +12,23 @@
 #ifndef _FSCRYPT_PRIVATE_H
 #define _FSCRYPT_PRIVATE_H
 
+#ifndef __FS_HAS_ENCRYPTION
 #define __FS_HAS_ENCRYPTION 1
+#endif
 #include <linux/fscrypt.h>
 #include <crypto/hash.h>
+#include <linux/pfk.h>
 
 /* Encryption parameters */
+
+#define FS_AES_128_ECB_KEY_SIZE		16
+#define FS_AES_128_CBC_KEY_SIZE		16
+#define FS_AES_128_CTS_KEY_SIZE		16
+#define FS_AES_256_GCM_KEY_SIZE		32
+#define FS_AES_256_CBC_KEY_SIZE		32
+#define FS_AES_256_CTS_KEY_SIZE		32
+#define FS_AES_256_XTS_KEY_SIZE		64
+
 #define FS_KEY_DERIVATION_NONCE_SIZE	16
 
 /**
@@ -82,11 +94,13 @@
 	struct fscrypt_master_key *ci_master_key;
 
 	/* fields from the fscrypt_context */
+
 	u8 ci_data_mode;
 	u8 ci_filename_mode;
 	u8 ci_flags;
 	u8 ci_master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
 	u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+	u8 ci_raw_key[FS_MAX_KEY_SIZE];
 };
 
 typedef enum {
@@ -112,6 +126,10 @@
 	    filenames_mode == FS_ENCRYPTION_MODE_ADIANTUM)
 		return true;
 
+	if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE &&
+	    filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
+		return true;
+
 	return false;
 }
 
@@ -168,6 +186,7 @@
 	int ivsize;
 	bool logged_impl_name;
 	bool needs_essiv;
+	bool inline_encryption;
 };
 
 extern void __exit fscrypt_essiv_cleanup(void);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 1e11a68..33fd585 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -18,6 +18,7 @@
 #include <crypto/sha.h>
 #include <crypto/skcipher.h>
 #include "fscrypt_private.h"
+#include "fscrypt_ice.h"
 
 static struct crypto_shash *essiv_hash_tfm;
 
@@ -161,11 +162,20 @@
 		.keysize = 32,
 		.ivsize = 32,
 	},
+	[FS_ENCRYPTION_MODE_PRIVATE] = {
+		.friendly_name = "ice",
+		.cipher_str = "xts(aes)",
+		.keysize = 64,
+		.ivsize = 16,
+		.inline_encryption = true,
+	},
 };
 
 static struct fscrypt_mode *
 select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode)
 {
+	struct fscrypt_mode *mode = NULL;
+
 	if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
 		fscrypt_warn(inode->i_sb,
 			     "inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)",
@@ -174,8 +184,19 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	if (S_ISREG(inode->i_mode))
-		return &available_modes[ci->ci_data_mode];
+	if (S_ISREG(inode->i_mode)) {
+		mode = &available_modes[ci->ci_data_mode];
+		if (IS_ERR(mode)) {
+			fscrypt_warn(inode->i_sb, "Invalid mode");
+			return ERR_PTR(-EINVAL);
+		}
+		if (mode->inline_encryption &&
+				!fscrypt_is_ice_capable(inode->i_sb)) {
+			fscrypt_warn(inode->i_sb, "ICE support not available");
+			return ERR_PTR(-EINVAL);
+		}
+		return mode;
+	}
 
 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
 		return &available_modes[ci->ci_filename_mode];
@@ -220,6 +241,9 @@
 			memcpy(derived_key, payload->raw, mode->keysize);
 			err = 0;
 		}
+	} else if (mode->inline_encryption) {
+		memcpy(derived_key, payload->raw, mode->keysize);
+		err = 0;
 	} else {
 		err = derive_key_aes(payload->raw, ctx, derived_key,
 				     mode->keysize);
@@ -495,12 +519,21 @@
 	if (ci->ci_master_key) {
 		put_master_key(ci->ci_master_key);
 	} else {
-		crypto_free_skcipher(ci->ci_ctfm);
-		crypto_free_cipher(ci->ci_essiv_tfm);
+		if (ci->ci_ctfm)
+			crypto_free_skcipher(ci->ci_ctfm);
+		if (ci->ci_essiv_tfm)
+			crypto_free_cipher(ci->ci_essiv_tfm);
 	}
+	memset(ci->ci_raw_key, 0, sizeof(ci->ci_raw_key));
 	kmem_cache_free(fscrypt_info_cachep, ci);
 }
 
+static int fscrypt_data_encryption_mode(struct inode *inode)
+{
+	return fscrypt_should_be_processed_by_ice(inode) ?
+	FS_ENCRYPTION_MODE_PRIVATE : FS_ENCRYPTION_MODE_AES_256_XTS;
+}
+
 int fscrypt_get_encryption_info(struct inode *inode)
 {
 	struct fscrypt_info *crypt_info;
@@ -524,7 +557,8 @@
 		/* Fake up a context for an unencrypted directory */
 		memset(&ctx, 0, sizeof(ctx));
 		ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
-		ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+		ctx.contents_encryption_mode =
+			fscrypt_data_encryption_mode(inode);
 		ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
 		memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
 	} else if (res != sizeof(ctx)) {
@@ -569,9 +603,13 @@
 	if (res)
 		goto out;
 
-	res = setup_crypto_transform(crypt_info, mode, raw_key, inode);
-	if (res)
-		goto out;
+	if (!mode->inline_encryption) {
+		res = setup_crypto_transform(crypt_info, mode, raw_key, inode);
+		if (res)
+			goto out;
+	} else {
+		memcpy(crypt_info->ci_raw_key, raw_key, mode->keysize);
+	}
 
 	if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
 		crypt_info = NULL;
diff --git a/fs/dax.c b/fs/dax.c
index 3a2682a..09fa706 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -229,8 +229,8 @@
  *
  * Must be called with the i_pages lock held.
  */
-static void *__get_unlocked_mapping_entry(struct address_space *mapping,
-		pgoff_t index, void ***slotp, bool (*wait_fn)(void))
+static void *get_unlocked_mapping_entry(struct address_space *mapping,
+		pgoff_t index, void ***slotp)
 {
 	void *entry, **slot;
 	struct wait_exceptional_entry_queue ewait;
@@ -240,8 +240,6 @@
 	ewait.wait.func = wake_exceptional_entry_func;
 
 	for (;;) {
-		bool revalidate;
-
 		entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
 					  &slot);
 		if (!entry ||
@@ -256,30 +254,37 @@
 		prepare_to_wait_exclusive(wq, &ewait.wait,
 					  TASK_UNINTERRUPTIBLE);
 		xa_unlock_irq(&mapping->i_pages);
-		revalidate = wait_fn();
+		schedule();
 		finish_wait(wq, &ewait.wait);
 		xa_lock_irq(&mapping->i_pages);
-		if (revalidate) {
-			put_unlocked_mapping_entry(mapping, index, entry);
-			return ERR_PTR(-EAGAIN);
-		}
 	}
 }
 
-static bool entry_wait(void)
+/*
+ * The only thing keeping the address space around is the i_pages lock
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
+ */
+static void wait_entry_unlocked(struct address_space *mapping, pgoff_t index,
+		void ***slotp, void *entry)
 {
-	schedule();
-	/*
-	 * Never return an ERR_PTR() from
-	 * __get_unlocked_mapping_entry(), just keep looping.
-	 */
-	return false;
-}
+	struct wait_exceptional_entry_queue ewait;
+	wait_queue_head_t *wq;
 
-static void *get_unlocked_mapping_entry(struct address_space *mapping,
-		pgoff_t index, void ***slotp)
-{
-	return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
+	init_wait(&ewait.wait);
+	ewait.wait.func = wake_exceptional_entry_func;
+
+	wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
+	/*
+	 * Unlike get_unlocked_entry() there is no guarantee that this
+	 * path ever successfully retrieves an unlocked entry before an
+	 * inode dies. Perform a non-exclusive wait in case this path
+	 * never successfully performs its own wake up.
+	 */
+	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+	xa_unlock_irq(&mapping->i_pages);
+	schedule();
+	finish_wait(wq, &ewait.wait);
 }
 
 static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
@@ -398,19 +403,6 @@
 	return NULL;
 }
 
-static bool entry_wait_revalidate(void)
-{
-	rcu_read_unlock();
-	schedule();
-	rcu_read_lock();
-
-	/*
-	 * Tell __get_unlocked_mapping_entry() to take a break, we need
-	 * to revalidate page->mapping after dropping locks
-	 */
-	return true;
-}
-
 bool dax_lock_mapping_entry(struct page *page)
 {
 	pgoff_t index;
@@ -446,14 +438,15 @@
 		}
 		index = page->index;
 
-		entry = __get_unlocked_mapping_entry(mapping, index, &slot,
-				entry_wait_revalidate);
+		entry = __radix_tree_lookup(&mapping->i_pages, index,
+						NULL, &slot);
 		if (!entry) {
 			xa_unlock_irq(&mapping->i_pages);
 			break;
-		} else if (IS_ERR(entry)) {
-			xa_unlock_irq(&mapping->i_pages);
-			WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
+		} else if (slot_locked(mapping, slot)) {
+			rcu_read_unlock();
+			wait_entry_unlocked(mapping, index, &slot, entry);
+			rcu_read_lock();
 			continue;
 		}
 		lock_slot(mapping, slot);
diff --git a/fs/dcache.c b/fs/dcache.c
index 2e7e8d8..cb515f1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1202,15 +1202,11 @@
  */
 void shrink_dcache_sb(struct super_block *sb)
 {
-	long freed;
-
 	do {
 		LIST_HEAD(dispose);
 
-		freed = list_lru_walk(&sb->s_dentry_lru,
+		list_lru_walk(&sb->s_dentry_lru,
 			dentry_lru_isolate_shrink, &dispose, 1024);
-
-		this_cpu_sub(nr_dentry_unused, freed);
 		shrink_dentry_list(&dispose);
 	} while (list_lru_count(&sb->s_dentry_lru) > 0);
 }
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 13b0135..41ef452 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -787,6 +787,13 @@
 	struct dentry *dentry = NULL, *trap;
 	struct name_snapshot old_name;
 
+	if (IS_ERR(old_dir))
+		return old_dir;
+	if (IS_ERR(new_dir))
+		return new_dir;
+	if (IS_ERR_OR_NULL(old_dentry))
+		return old_dentry;
+
 	trap = lock_rename(new_dir, old_dir);
 	/* Source or destination directories don't exist? */
 	if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1991460..1dcd800 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -37,6 +37,8 @@
 #include <linux/uio.h>
 #include <linux/atomic.h>
 #include <linux/prefetch.h>
+#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
+#include <linux/fscrypt.h>
 
 /*
  * How many user pages to map in one call to get_user_pages().  This determines
@@ -451,6 +453,23 @@
 	sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
 }
 
+#ifdef CONFIG_PFK
+static bool is_inode_filesystem_type(const struct inode *inode,
+					const char *fs_type)
+{
+	if (!inode || !fs_type)
+		return false;
+
+	if (!inode->i_sb)
+		return false;
+
+	if (!inode->i_sb->s_type)
+		return false;
+
+	return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+#endif
+
 /*
  * In the AIO read case we speculatively dirty the pages before starting IO.
  * During IO completion, any of these pages which happen to have been written
@@ -473,7 +492,17 @@
 		bio_set_pages_dirty(bio);
 
 	dio->bio_disk = bio->bi_disk;
+#ifdef CONFIG_PFK
+	bio->bi_dio_inode = dio->inode;
 
+/* iv sector for security/pfe/pfk_fscrypt.c and f2fs in fs/f2fs/f2fs.h */
+#define PG_DUN_NEW(i, p)                                            \
+	(((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p) & 0xffffffff))
+
+	if (is_inode_filesystem_type(dio->inode, "f2fs"))
+		fscrypt_set_ice_dun(dio->inode, bio, PG_DUN_NEW(dio->inode,
+			(sdio->logical_offset_in_bio >> PAGE_SHIFT)));
+#endif
 	if (sdio->submit_io) {
 		sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
 		dio->bio_cookie = BLK_QC_T_NONE;
@@ -485,6 +514,18 @@
 	sdio->logical_offset_in_bio = 0;
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+	struct inode *inode = NULL;
+
+	if (bio == NULL)
+		return NULL;
+#ifdef CONFIG_PFK
+	inode = bio->bi_dio_inode;
+#endif
+	return inode;
+}
+
 /*
  * Release any resources in case of a failure
  */
@@ -679,6 +720,7 @@
 	unsigned long fs_count;	/* Number of filesystem-sized blocks */
 	int create;
 	unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
+	loff_t i_size;
 
 	/*
 	 * If there was a memory error and we've overwritten all the
@@ -708,8 +750,8 @@
 		 */
 		create = dio->op == REQ_OP_WRITE;
 		if (dio->flags & DIO_SKIP_HOLES) {
-			if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
-							i_blkbits))
+			i_size = i_size_read(dio->inode);
+			if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
 				create = 0;
 		}
 
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 562fa8c..47ee66d 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -292,6 +292,8 @@
 		flush_workqueue(ls->ls_callback_wq);
 }
 
+#define MAX_CB_QUEUE 25
+
 void dlm_callback_resume(struct dlm_ls *ls)
 {
 	struct dlm_lkb *lkb, *safe;
@@ -302,15 +304,23 @@
 	if (!ls->ls_callback_wq)
 		return;
 
+more:
 	mutex_lock(&ls->ls_cb_mutex);
 	list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
 		list_del_init(&lkb->lkb_cb_list);
 		queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
 		count++;
+		if (count == MAX_CB_QUEUE)
+			break;
 	}
 	mutex_unlock(&ls->ls_cb_mutex);
 
 	if (count)
 		log_rinfo(ls, "dlm_callback_resume %d", count);
+	if (count == MAX_CB_QUEUE) {
+		count = 0;
+		cond_resched();
+		goto more;
+	}
 }
 
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index cc91963..a928ba0 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1209,6 +1209,7 @@
 
 	if (rv < 0) {
 		log_error(ls, "create_lkb idr error %d", rv);
+		dlm_free_lkb(lkb);
 		return rv;
 	}
 
@@ -4179,6 +4180,7 @@
 			  (unsigned long long)lkb->lkb_recover_seq,
 			  ms->m_header.h_nodeid, ms->m_lkid);
 		error = -ENOENT;
+		dlm_put_lkb(lkb);
 		goto fail;
 	}
 
@@ -4232,6 +4234,7 @@
 			  lkb->lkb_id, lkb->lkb_remid,
 			  ms->m_header.h_nodeid, ms->m_lkid);
 		error = -ENOENT;
+		dlm_put_lkb(lkb);
 		goto fail;
 	}
 
@@ -5792,20 +5795,20 @@
 			goto out;
 		}
 	}
-
-	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
-	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
-	   lock and that lkb_astparam is the dlm_user_args structure. */
-
 	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
 			      fake_astfn, ua, fake_bastfn, &args);
-	lkb->lkb_flags |= DLM_IFL_USER;
-
 	if (error) {
+		kfree(ua->lksb.sb_lvbptr);
+		ua->lksb.sb_lvbptr = NULL;
+		kfree(ua);
 		__put_lkb(ls, lkb);
 		goto out;
 	}
 
+	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
+	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
+	   lock and that lkb_astparam is the dlm_user_args structure. */
+	lkb->lkb_flags |= DLM_IFL_USER;
 	error = request_lock(ls, lkb, name, namelen, &args);
 
 	switch (error) {
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 5ba94be..6a1529e 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -680,11 +680,11 @@
 	kfree(ls->ls_recover_buf);
  out_lkbidr:
 	idr_destroy(&ls->ls_lkbidr);
+ out_rsbtbl:
 	for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
 		if (ls->ls_remove_names[i])
 			kfree(ls->ls_remove_names[i]);
 	}
- out_rsbtbl:
 	vfree(ls->ls_rsbtbl);
  out_lsfree:
 	if (do_unreg)
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 8237701..d31b6c7 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -21,8 +21,13 @@
 	spin_lock(&sb->s_inode_list_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 		spin_lock(&inode->i_lock);
+		/*
+		 * We must skip inodes in unusual state. We may also skip
+		 * inodes without pages but we deliberately won't in case
+		 * we need to reschedule to avoid softlockups.
+		 */
 		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
-		    (inode->i_mapping->nrpages == 0)) {
+		    (inode->i_mapping->nrpages == 0 && !need_resched())) {
 			spin_unlock(&inode->i_lock);
 			continue;
 		}
@@ -30,6 +35,7 @@
 		spin_unlock(&inode->i_lock);
 		spin_unlock(&sb->s_inode_list_lock);
 
+		cond_resched();
 		invalidate_mapping_pages(inode->i_mapping, 0, -1);
 		iput(toput_inode);
 		toput_inode = inode;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 779b741..8b4ded9 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1155,7 +1155,7 @@
 	 * semantics). All the events that happen during that period of time are
 	 * chained in ep->ovflist and requeued later on.
 	 */
-	if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
+	if (ep->ovflist != EP_UNACTIVE_PTR) {
 		if (epi->next == EP_UNACTIVE_PTR) {
 			epi->next = ep->ovflist;
 			ep->ovflist = epi;
diff --git a/fs/exec.c b/fs/exec.c
index c7e3417..77c03ce 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -929,7 +929,7 @@
 		bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
 		if (bytes < 0) {
 			ret = bytes;
-			goto out;
+			goto out_free;
 		}
 
 		if (bytes == 0)
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index a453cc8..dcf7ee9 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -107,10 +107,16 @@
 	  decrypted pages in the page cache.
 
 config EXT4_FS_ENCRYPTION
-	bool
-	default y
+	bool "Ext4 FS Encryption"
+	default n
 	depends on EXT4_ENCRYPTION
 
+config EXT4_FS_ICE_ENCRYPTION
+	bool "Ext4 Encryption with ICE support"
+	default n
+	depends on EXT4_FS_ENCRYPTION
+	depends on PFK
+
 config EXT4_DEBUG
 	bool "EXT4 debugging support"
 	depends on EXT4_FS
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 5cfb1e2..032cf9b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2459,8 +2459,19 @@
 #define FALL_BACK_TO_NONDELALLOC 1
 #define CONVERT_INLINE_DATA	 2
 
-extern struct inode *ext4_iget(struct super_block *, unsigned long);
-extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
+typedef enum {
+	EXT4_IGET_NORMAL =	0,
+	EXT4_IGET_SPECIAL =	0x0001, /* OK to iget a system inode */
+	EXT4_IGET_HANDLE = 	0x0002	/* Inode # is from a handle */
+} ext4_iget_flags;
+
+extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+				 ext4_iget_flags flags, const char *function,
+				 unsigned int line);
+
+#define ext4_iget(sb, ino, flags) \
+	__ext4_iget((sb), (ino), (flags), __func__, __LINE__)
+
 extern int  ext4_write_inode(struct inode *, struct writeback_control *);
 extern int  ext4_setattr(struct dentry *, struct iattr *);
 extern int  ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
@@ -2542,6 +2553,8 @@
 extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
 
 /* super.c */
+extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
+					 sector_t block, int op_flags);
 extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
 extern int ext4_calculate_overhead(struct super_block *sb);
 extern void ext4_superblock_csum_set(struct super_block *sb);
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 26a7fe5..5508baa 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -159,6 +159,9 @@
 			ret = err;
 	}
 out:
+	err = file_check_and_advance_wb_err(file);
+	if (ret == 0)
+		ret = err;
 	trace_ext4_sync_file_exit(inode, ret);
 	return ret;
 }
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2addcb8..091a18a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1225,7 +1225,7 @@
 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
 		goto bad_orphan;
 
-	inode = ext4_iget(sb, ino);
+	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
 		ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 1463f5c..8a7394c 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -719,8 +719,11 @@
 
 	if (!PageUptodate(page)) {
 		ret = ext4_read_inline_page(inode, page);
-		if (ret < 0)
+		if (ret < 0) {
+			unlock_page(page);
+			put_page(page);
 			goto out_up_read;
+		}
 	}
 
 	ret = 1;
@@ -1901,12 +1904,12 @@
 	physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
 	physical += offsetof(struct ext4_inode, i_block);
 
-	if (physical)
-		error = fiemap_fill_next_extent(fieinfo, start, physical,
-						inline_len, flags);
 	brelse(iloc.bh);
 out:
 	up_read(&EXT4_I(inode)->xattr_sem);
+	if (physical)
+		error = fiemap_fill_next_extent(fieinfo, start, physical,
+						inline_len, flags);
 	return (error < 0 ? error : 0);
 }
 
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index de6fb66..7292718 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1219,7 +1219,8 @@
 			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
 			*wait_bh++ = bh;
 			decrypt = ext4_encrypted_inode(inode) &&
-				S_ISREG(inode->i_mode);
+				S_ISREG(inode->i_mode) &&
+				!fscrypt_using_hardware_encryption(inode);
 		}
 	}
 	/*
@@ -2761,7 +2762,8 @@
 		 * We may need to convert up to one extent per block in
 		 * the page and we may dirty the inode.
 		 */
-		rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
+		rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
+						PAGE_SIZE >> inode->i_blkbits);
 	}
 
 	/*
@@ -3764,9 +3766,14 @@
 		get_block_func = ext4_dio_get_block_unwritten_async;
 		dio_flags = DIO_LOCKING;
 	}
-	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
-				   get_block_func, ext4_end_io_dio, NULL,
-				   dio_flags);
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+	WARN_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+		&& !fscrypt_using_hardware_encryption(inode));
+#endif
+	ret = __blockdev_direct_IO(iocb, inode,
+				   inode->i_sb->s_bdev, iter,
+				   get_block_func,
+				   ext4_end_io_dio, NULL, dio_flags);
 
 	if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
 						EXT4_STATE_DIO_UNWRITTEN)) {
@@ -3873,8 +3880,9 @@
 	ssize_t ret;
 	int rw = iov_iter_rw(iter);
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+		&& !fscrypt_using_hardware_encryption(inode))
 		return 0;
 #endif
 
@@ -4089,7 +4097,8 @@
 		if (!buffer_uptodate(bh))
 			goto unlock;
 		if (S_ISREG(inode->i_mode) &&
-		    ext4_encrypted_inode(inode)) {
+				ext4_encrypted_inode(inode) &&
+				!fscrypt_using_hardware_encryption(inode)) {
 			/* We expect the key to be set. */
 			BUG_ON(!fscrypt_has_encryption_key(inode));
 			BUG_ON(blocksize != PAGE_SIZE);
@@ -4848,7 +4857,9 @@
 		return inode_peek_iversion(inode);
 }
 
-struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+			  ext4_iget_flags flags, const char *function,
+			  unsigned int line)
 {
 	struct ext4_iloc iloc;
 	struct ext4_inode *raw_inode;
@@ -4862,6 +4873,18 @@
 	gid_t i_gid;
 	projid_t i_projid;
 
+	if ((!(flags & EXT4_IGET_SPECIAL) &&
+	     (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
+	    (ino < EXT4_ROOT_INO) ||
+	    (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
+		if (flags & EXT4_IGET_HANDLE)
+			return ERR_PTR(-ESTALE);
+		__ext4_error(sb, function, line,
+			     "inode #%lu: comm %s: iget: illegal inode #",
+			     ino, current->comm);
+		return ERR_PTR(-EFSCORRUPTED);
+	}
+
 	inode = iget_locked(sb, ino);
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
@@ -4877,18 +4900,26 @@
 	raw_inode = ext4_raw_inode(&iloc);
 
 	if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
-		EXT4_ERROR_INODE(inode, "root inode unallocated");
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: root inode unallocated");
 		ret = -EFSCORRUPTED;
 		goto bad_inode;
 	}
 
+	if ((flags & EXT4_IGET_HANDLE) &&
+	    (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
+		ret = -ESTALE;
+		goto bad_inode;
+	}
+
 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
 			EXT4_INODE_SIZE(inode->i_sb) ||
 		    (ei->i_extra_isize & 3)) {
-			EXT4_ERROR_INODE(inode,
-					 "bad extra_isize %u (inode size %u)",
+			ext4_error_inode(inode, function, line, 0,
+					 "iget: bad extra_isize %u "
+					 "(inode size %u)",
 					 ei->i_extra_isize,
 					 EXT4_INODE_SIZE(inode->i_sb));
 			ret = -EFSCORRUPTED;
@@ -4910,7 +4941,8 @@
 	}
 
 	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
-		EXT4_ERROR_INODE(inode, "checksum invalid");
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: checksum invalid");
 		ret = -EFSBADCRC;
 		goto bad_inode;
 	}
@@ -4967,7 +4999,8 @@
 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
 	inode->i_size = ext4_isize(sb, raw_inode);
 	if ((size = i_size_read(inode)) < 0) {
-		EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: bad i_size value: %lld", size);
 		ret = -EFSCORRUPTED;
 		goto bad_inode;
 	}
@@ -5043,7 +5076,8 @@
 	ret = 0;
 	if (ei->i_file_acl &&
 	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
-		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: bad extended attribute block %llu",
 				 ei->i_file_acl);
 		ret = -EFSCORRUPTED;
 		goto bad_inode;
@@ -5071,8 +5105,9 @@
 	} else if (S_ISLNK(inode->i_mode)) {
 		/* VFS does not allow setting these so must be corruption */
 		if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
-			EXT4_ERROR_INODE(inode,
-			  "immutable or append flags not allowed on symlinks");
+			ext4_error_inode(inode, function, line, 0,
+					 "iget: immutable or append flags "
+					 "not allowed on symlinks");
 			ret = -EFSCORRUPTED;
 			goto bad_inode;
 		}
@@ -5102,7 +5137,8 @@
 		make_bad_inode(inode);
 	} else {
 		ret = -EFSCORRUPTED;
-		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: bogus i_mode (%o)", inode->i_mode);
 		goto bad_inode;
 	}
 	brelse(iloc.bh);
@@ -5116,13 +5152,6 @@
 	return ERR_PTR(ret);
 }
 
-struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
-{
-	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
-		return ERR_PTR(-EFSCORRUPTED);
-	return ext4_iget(sb, ino);
-}
-
 static int ext4_inode_blocks_set(handle_t *handle,
 				struct ext4_inode *raw_inode,
 				struct ext4_inode_info *ei)
@@ -5411,9 +5440,13 @@
 {
 	int err;
 
-	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
+	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
+	    sb_rdonly(inode->i_sb))
 		return 0;
 
+	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+		return -EIO;
+
 	if (EXT4_SB(inode->i_sb)->s_journal) {
 		if (ext4_journal_current_handle()) {
 			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
@@ -5429,7 +5462,8 @@
 		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
 			return 0;
 
-		err = ext4_force_commit(inode->i_sb);
+		err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
+						EXT4_I(inode)->i_sync_tid);
 	} else {
 		struct ext4_iloc iloc;
 
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0edee31..d37dafa 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -125,7 +125,7 @@
 	    !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
+	inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
 	if (IS_ERR(inode_bl))
 		return PTR_ERR(inode_bl);
 	ei_bl = EXT4_I(inode_bl);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 61a9d19..a98bfca 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -116,9 +116,9 @@
 	int i, retval = 0;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, pblock);
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	i_data = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -145,9 +145,9 @@
 	int i, retval = 0;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, pblock);
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	i_data = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -175,9 +175,9 @@
 	int i, retval = 0;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, pblock);
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	i_data = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -224,9 +224,9 @@
 	struct buffer_head *bh;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	tmp_idata = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -254,9 +254,9 @@
 	struct buffer_head *bh;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	tmp_idata = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -382,9 +382,9 @@
 	struct ext4_extent_header *eh;
 
 	block = ext4_idx_pblock(ix);
-	bh = sb_bread(inode->i_sb, block);
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, block, 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	eh = (struct ext4_extent_header *)bh->b_data;
 	if (eh->eh_depth != 0) {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index ffa2575..4f8de2b 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1571,7 +1571,7 @@
 					 dentry);
 			return ERR_PTR(-EFSCORRUPTED);
 		}
-		inode = ext4_iget_normal(dir->i_sb, ino);
+		inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL);
 		if (inode == ERR_PTR(-ESTALE)) {
 			EXT4_ERROR_INODE(dir,
 					 "deleted inode referenced: %u",
@@ -1613,7 +1613,7 @@
 		return ERR_PTR(-EFSCORRUPTED);
 	}
 
-	return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
+	return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL));
 }
 
 /*
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index db75901..440dcee 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -482,8 +482,10 @@
 		gfp_t gfp_flags = GFP_NOFS;
 
 	retry_encrypt:
-		data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
-						page->index, gfp_flags);
+		if (!fscrypt_using_hardware_encryption(inode))
+			data_page = fscrypt_encrypt_page(inode,
+					page, PAGE_SIZE, 0,
+					page->index, gfp_flags);
 		if (IS_ERR(data_page)) {
 			ret = PTR_ERR(data_page);
 			if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index aa1b9e1..8e5947f 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -280,6 +280,7 @@
 		}
 		if (bio == NULL) {
 			struct fscrypt_ctx *ctx = NULL;
+			unsigned int flags = 0;
 
 			if (ext4_encrypted_inode(inode) &&
 			    S_ISREG(inode->i_mode)) {
@@ -298,8 +299,9 @@
 			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
 			bio->bi_end_io = mpage_end_io;
 			bio->bi_private = ctx;
-			bio_set_op_attrs(bio, REQ_OP_READ,
-						is_readahead ? REQ_RAHEAD : 0);
+			if (is_readahead)
+				flags = flags | REQ_RAHEAD;
+			bio_set_op_attrs(bio, REQ_OP_READ, flags);
 		}
 
 		length = first_hole << blkbits;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index a5efee3..48421de 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -127,10 +127,12 @@
 	else if (free_blocks_count < 0)
 		ext4_warning(sb, "Bad blocks count %u",
 			     input->blocks_count);
-	else if (!(bh = sb_bread(sb, end - 1)))
+	else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
+		err = PTR_ERR(bh);
+		bh = NULL;
 		ext4_warning(sb, "Cannot read last block (%llu)",
 			     end - 1);
-	else if (outside(input->block_bitmap, start, end))
+	} else if (outside(input->block_bitmap, start, end))
 		ext4_warning(sb, "Block bitmap not in group (block %llu)",
 			     (unsigned long long)input->block_bitmap);
 	else if (outside(input->inode_bitmap, start, end))
@@ -781,11 +783,11 @@
 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
 	ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
-	struct buffer_head **o_group_desc, **n_group_desc;
-	struct buffer_head *dind;
-	struct buffer_head *gdb_bh;
+	struct buffer_head **o_group_desc, **n_group_desc = NULL;
+	struct buffer_head *dind = NULL;
+	struct buffer_head *gdb_bh = NULL;
 	int gdbackups;
-	struct ext4_iloc iloc;
+	struct ext4_iloc iloc = { .bh = NULL };
 	__le32 *data;
 	int err;
 
@@ -794,21 +796,22 @@
 		       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
 		       gdb_num);
 
-	gdb_bh = sb_bread(sb, gdblock);
-	if (!gdb_bh)
-		return -EIO;
+	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
+	if (IS_ERR(gdb_bh))
+		return PTR_ERR(gdb_bh);
 
 	gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
 	if (gdbackups < 0) {
 		err = gdbackups;
-		goto exit_bh;
+		goto errout;
 	}
 
 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
-		goto exit_bh;
+	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
+	if (IS_ERR(dind)) {
+		err = PTR_ERR(dind);
+		dind = NULL;
+		goto errout;
 	}
 
 	data = (__le32 *)dind->b_data;
@@ -816,18 +819,18 @@
 		ext4_warning(sb, "new group %u GDT block %llu not reserved",
 			     group, gdblock);
 		err = -EINVAL;
-		goto exit_dind;
+		goto errout;
 	}
 
 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
 	if (unlikely(err))
-		goto exit_dind;
+		goto errout;
 
 	BUFFER_TRACE(gdb_bh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, gdb_bh);
 	if (unlikely(err))
-		goto exit_dind;
+		goto errout;
 
 	BUFFER_TRACE(dind, "get_write_access");
 	err = ext4_journal_get_write_access(handle, dind);
@@ -837,7 +840,7 @@
 	/* ext4_reserve_inode_write() gets a reference on the iloc */
 	err = ext4_reserve_inode_write(handle, inode, &iloc);
 	if (unlikely(err))
-		goto exit_dind;
+		goto errout;
 
 	n_group_desc = ext4_kvmalloc((gdb_num + 1) *
 				     sizeof(struct buffer_head *),
@@ -846,7 +849,7 @@
 		err = -ENOMEM;
 		ext4_warning(sb, "not enough memory for %lu groups",
 			     gdb_num + 1);
-		goto exit_inode;
+		goto errout;
 	}
 
 	/*
@@ -862,7 +865,7 @@
 	err = ext4_handle_dirty_metadata(handle, NULL, dind);
 	if (unlikely(err)) {
 		ext4_std_error(sb, err);
-		goto exit_inode;
+		goto errout;
 	}
 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
 			   (9 - EXT4_SB(sb)->s_cluster_bits);
@@ -871,8 +874,7 @@
 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
 	if (unlikely(err)) {
 		ext4_std_error(sb, err);
-		iloc.bh = NULL;
-		goto exit_inode;
+		goto errout;
 	}
 	brelse(dind);
 
@@ -888,15 +890,11 @@
 	err = ext4_handle_dirty_super(handle, sb);
 	if (err)
 		ext4_std_error(sb, err);
-
 	return err;
-
-exit_inode:
+errout:
 	kvfree(n_group_desc);
 	brelse(iloc.bh);
-exit_dind:
 	brelse(dind);
-exit_bh:
 	brelse(gdb_bh);
 
 	ext4_debug("leaving with error %d\n", err);
@@ -916,9 +914,9 @@
 
 	gdblock = ext4_meta_bg_first_block_no(sb, group) +
 		   ext4_bg_has_super(sb, group);
-	gdb_bh = sb_bread(sb, gdblock);
-	if (!gdb_bh)
-		return -EIO;
+	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
+	if (IS_ERR(gdb_bh))
+		return PTR_ERR(gdb_bh);
 	n_group_desc = ext4_kvmalloc((gdb_num + 1) *
 				     sizeof(struct buffer_head *),
 				     GFP_NOFS);
@@ -975,9 +973,10 @@
 		return -ENOMEM;
 
 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
+	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
+	if (IS_ERR(dind)) {
+		err = PTR_ERR(dind);
+		dind = NULL;
 		goto exit_free;
 	}
 
@@ -996,9 +995,10 @@
 			err = -EINVAL;
 			goto exit_bh;
 		}
-		primary[res] = sb_bread(sb, blk);
-		if (!primary[res]) {
-			err = -EIO;
+		primary[res] = ext4_sb_bread(sb, blk, 0);
+		if (IS_ERR(primary[res])) {
+			err = PTR_ERR(primary[res]);
+			primary[res] = NULL;
 			goto exit_bh;
 		}
 		gdbackups = verify_reserved_gdb(sb, group, primary[res]);
@@ -1631,13 +1631,13 @@
 	}
 
 	if (reserved_gdb || gdb_off == 0) {
-		if (ext4_has_feature_resize_inode(sb) ||
+		if (!ext4_has_feature_resize_inode(sb) ||
 		    !le16_to_cpu(es->s_reserved_gdt_blocks)) {
 			ext4_warning(sb,
 				     "No reserved GDT blocks, can't resize");
 			return -EPERM;
 		}
-		inode = ext4_iget(sb, EXT4_RESIZE_INO);
+		inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
 		if (IS_ERR(inode)) {
 			ext4_warning(sb, "Error opening resize inode");
 			return PTR_ERR(inode);
@@ -1965,7 +1965,8 @@
 		}
 
 		if (!resize_inode)
-			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
+			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
+						 EXT4_IGET_SPECIAL);
 		if (IS_ERR(resize_inode)) {
 			ext4_warning(sb, "Error opening resize inode");
 			return PTR_ERR(resize_inode);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8a149df..f266ab5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -140,6 +140,29 @@
 MODULE_ALIAS("ext3");
 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
 
+/*
+ * This works like sb_bread() except it uses ERR_PTR for error
+ * returns.  Currently with sb_bread it's impossible to distinguish
+ * between ENOMEM and EIO situations (since both result in a NULL
+ * return.
+ */
+struct buffer_head *
+ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
+{
+	struct buffer_head *bh = sb_getblk(sb, block);
+
+	if (bh == NULL)
+		return ERR_PTR(-ENOMEM);
+	if (buffer_uptodate(bh))
+		return bh;
+	ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
+	wait_on_buffer(bh);
+	if (buffer_uptodate(bh))
+		return bh;
+	put_bh(bh);
+	return ERR_PTR(-EIO);
+}
+
 static int ext4_verify_csum_type(struct super_block *sb,
 				 struct ext4_super_block *es)
 {
@@ -1150,20 +1173,11 @@
 {
 	struct inode *inode;
 
-	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
-		return ERR_PTR(-ESTALE);
-	if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
-		return ERR_PTR(-ESTALE);
-
-	/* iget isn't really right if the inode is currently unallocated!!
-	 *
-	 * ext4_read_inode will return a bad_inode if the inode had been
-	 * deleted, so we should be safe.
-	 *
+	/*
 	 * Currently we don't know the generation for parent directory, so
 	 * a generation of 0 means "accept any"
 	 */
-	inode = ext4_iget_normal(sb, ino);
+	inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
 	if (IS_ERR(inode))
 		return ERR_CAST(inode);
 	if (generation && inode->i_generation != generation) {
@@ -1188,6 +1202,16 @@
 				    ext4_nfs_get_inode);
 }
 
+static int ext4_nfs_commit_metadata(struct inode *inode)
+{
+	struct writeback_control wbc = {
+		.sync_mode = WB_SYNC_ALL
+	};
+
+	trace_ext4_nfs_commit_metadata(inode);
+	return ext4_write_inode(inode, &wbc);
+}
+
 /*
  * Try to release metadata pages (indirect blocks, directories) which are
  * mapped via the block device.  Since these pages could have journal heads
@@ -1304,6 +1328,11 @@
 	return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
 }
 
+static inline bool ext4_is_encrypted(struct inode *inode)
+{
+	return ext4_encrypted_inode(inode);
+}
+
 static const struct fscrypt_operations ext4_cryptops = {
 	.key_prefix		= "ext4:",
 	.get_context		= ext4_get_context,
@@ -1311,6 +1340,7 @@
 	.dummy_context		= ext4_dummy_context,
 	.empty_dir		= ext4_empty_dir,
 	.max_namelen		= EXT4_NAME_LEN,
+	.is_encrypted       = ext4_is_encrypted,
 };
 #endif
 
@@ -1392,6 +1422,7 @@
 	.fh_to_dentry = ext4_fh_to_dentry,
 	.fh_to_parent = ext4_fh_to_parent,
 	.get_parent = ext4_get_parent,
+	.commit_metadata = ext4_nfs_commit_metadata,
 };
 
 enum {
@@ -4327,7 +4358,7 @@
 	 * so we can safely mount the rest of the filesystem now.
 	 */
 
-	root = ext4_iget(sb, EXT4_ROOT_INO);
+	root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
 	if (IS_ERR(root)) {
 		ext4_msg(sb, KERN_ERR, "get root inode failed");
 		ret = PTR_ERR(root);
@@ -4597,7 +4628,7 @@
 	 * happen if we iget() an unused inode, as the subsequent iput()
 	 * will try to delete it.
 	 */
-	journal_inode = ext4_iget(sb, journal_inum);
+	journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
 	if (IS_ERR(journal_inode)) {
 		ext4_msg(sb, KERN_ERR, "no journal found");
 		return NULL;
@@ -4879,7 +4910,7 @@
 	ext4_superblock_csum_set(sb);
 	if (sync)
 		lock_buffer(sbh);
-	if (buffer_write_io_error(sbh)) {
+	if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
 		/*
 		 * Oh, dear.  A previous attempt to write the
 		 * superblock failed.  This could happen because the
@@ -5679,7 +5710,7 @@
 	if (!qf_inums[type])
 		return -EPERM;
 
-	qf_inode = ext4_iget(sb, qf_inums[type]);
+	qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
 	if (IS_ERR(qf_inode)) {
 		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
 		return PTR_ERR(qf_inode);
@@ -5689,9 +5720,9 @@
 	qf_inode->i_flags |= S_NOQUOTA;
 	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
 	err = dquot_enable(qf_inode, type, format_id, flags);
-	iput(qf_inode);
 	if (err)
 		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
+	iput(qf_inode);
 
 	return err;
 }
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 4380c86..c0ba520 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -384,7 +384,7 @@
 	struct inode *inode;
 	int err;
 
-	inode = ext4_iget(parent->i_sb, ea_ino);
+	inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
 		ext4_error(parent->i_sb,
@@ -522,14 +522,13 @@
 	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
 		  name_index, name, buffer, (long)buffer_size);
 
-	error = -ENODATA;
 	if (!EXT4_I(inode)->i_file_acl)
-		goto cleanup;
+		return -ENODATA;
 	ea_idebug(inode, "reading block %llu",
 		  (unsigned long long)EXT4_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-	if (!bh)
-		goto cleanup;
+	bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 	ea_bdebug(bh, "b_count=%d, refcount=%d",
 		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
 	error = ext4_xattr_check_block(inode, bh);
@@ -696,26 +695,23 @@
 	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
 		  buffer, (long)buffer_size);
 
-	error = 0;
 	if (!EXT4_I(inode)->i_file_acl)
-		goto cleanup;
+		return 0;
 	ea_idebug(inode, "reading block %llu",
 		  (unsigned long long)EXT4_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-	error = -EIO;
-	if (!bh)
-		goto cleanup;
+	bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 	ea_bdebug(bh, "b_count=%d, refcount=%d",
 		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
 	error = ext4_xattr_check_block(inode, bh);
 	if (error)
 		goto cleanup;
 	ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
-	error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
-
+	error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
+					buffer_size);
 cleanup:
 	brelse(bh);
-
 	return error;
 }
 
@@ -830,9 +826,9 @@
 	}
 
 	if (EXT4_I(inode)->i_file_acl) {
-		bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-		if (!bh) {
-			ret = -EIO;
+		bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+		if (IS_ERR(bh)) {
+			ret = PTR_ERR(bh);
 			goto out;
 		}
 
@@ -1490,7 +1486,8 @@
 	}
 
 	while (ce) {
-		ea_inode = ext4_iget(inode->i_sb, ce->e_value);
+		ea_inode = ext4_iget(inode->i_sb, ce->e_value,
+				     EXT4_IGET_NORMAL);
 		if (!IS_ERR(ea_inode) &&
 		    !is_bad_inode(ea_inode) &&
 		    (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
@@ -1825,16 +1822,15 @@
 
 	if (EXT4_I(inode)->i_file_acl) {
 		/* The inode already has an extended attribute block. */
-		bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
-		error = -EIO;
-		if (!bs->bh)
-			goto cleanup;
+		bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+		if (IS_ERR(bs->bh))
+			return PTR_ERR(bs->bh);
 		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
 			atomic_read(&(bs->bh->b_count)),
 			le32_to_cpu(BHDR(bs->bh)->h_refcount));
 		error = ext4_xattr_check_block(inode, bs->bh);
 		if (error)
-			goto cleanup;
+			return error;
 		/* Find the named attribute. */
 		bs->s.base = BHDR(bs->bh);
 		bs->s.first = BFIRST(bs->bh);
@@ -1843,13 +1839,10 @@
 		error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
 					 i->name_index, i->name, 1);
 		if (error && error != -ENODATA)
-			goto cleanup;
+			return error;
 		bs->s.not_found = error;
 	}
-	error = 0;
-
-cleanup:
-	return error;
+	return 0;
 }
 
 static int
@@ -2278,9 +2271,9 @@
 
 	if (!EXT4_I(inode)->i_file_acl)
 		return NULL;
-	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-	if (!bh)
-		return ERR_PTR(-EIO);
+	bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+	if (IS_ERR(bh))
+		return bh;
 	error = ext4_xattr_check_block(inode, bh);
 	if (error) {
 		brelse(bh);
@@ -2733,7 +2726,7 @@
 	base = IFIRST(header);
 	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
 	min_offs = end - base;
-	total_ino = sizeof(struct ext4_xattr_ibody_header);
+	total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
 
 	error = xattr_check_inode(inode, header, end);
 	if (error)
@@ -2750,10 +2743,11 @@
 	if (EXT4_I(inode)->i_file_acl) {
 		struct buffer_head *bh;
 
-		bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-		error = -EIO;
-		if (!bh)
+		bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+		if (IS_ERR(bh)) {
+			error = PTR_ERR(bh);
 			goto cleanup;
+		}
 		error = ext4_xattr_check_block(inode, bh);
 		if (error) {
 			brelse(bh);
@@ -2907,11 +2901,12 @@
 	}
 
 	if (EXT4_I(inode)->i_file_acl) {
-		bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-		if (!bh) {
-			EXT4_ERROR_INODE(inode, "block %llu read error",
-					 EXT4_I(inode)->i_file_acl);
-			error = -EIO;
+		bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+		if (IS_ERR(bh)) {
+			error = PTR_ERR(bh);
+			if (error == -EIO)
+				EXT4_ERROR_INODE(inode, "block %llu read error",
+						 EXT4_I(inode)->i_file_acl);
 			goto cleanup;
 		}
 		error = ext4_xattr_check_block(inode, bh);
@@ -3064,8 +3059,10 @@
 	while (ce) {
 		struct buffer_head *bh;
 
-		bh = sb_bread(inode->i_sb, ce->e_value);
-		if (!bh) {
+		bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
+		if (IS_ERR(bh)) {
+			if (PTR_ERR(bh) == -ENOMEM)
+				return NULL;
 			EXT4_ERROR_INODE(inode, "block %lu read error",
 					 (unsigned long)ce->e_value);
 		} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index fa707cd..63e5995 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -160,7 +160,7 @@
 	return (void *)f2fs_acl;
 
 fail:
-	kfree(f2fs_acl);
+	kvfree(f2fs_acl);
 	return ERR_PTR(-EINVAL);
 }
 
@@ -190,7 +190,7 @@
 		acl = NULL;
 	else
 		acl = ERR_PTR(retval);
-	kfree(value);
+	kvfree(value);
 
 	return acl;
 }
@@ -240,7 +240,7 @@
 
 	error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0);
 
-	kfree(value);
+	kvfree(value);
 	if (!error)
 		set_cached_acl(inode, type, acl);
 
@@ -352,12 +352,14 @@
 		return PTR_ERR(p);
 
 	clone = f2fs_acl_clone(p, GFP_NOFS);
-	if (!clone)
-		goto no_mem;
+	if (!clone) {
+		ret = -ENOMEM;
+		goto release_acl;
+	}
 
 	ret = f2fs_acl_create_masq(clone, mode);
 	if (ret < 0)
-		goto no_mem_clone;
+		goto release_clone;
 
 	if (ret == 0)
 		posix_acl_release(clone);
@@ -371,11 +373,11 @@
 
 	return 0;
 
-no_mem_clone:
+release_clone:
 	posix_acl_release(clone);
-no_mem:
+release_acl:
 	posix_acl_release(p);
-	return -ENOMEM;
+	return ret;
 }
 
 int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 9c28ea4..f955cd3 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -44,7 +44,7 @@
 		cond_resched();
 		goto repeat;
 	}
-	f2fs_wait_on_page_writeback(page, META, true);
+	f2fs_wait_on_page_writeback(page, META, true, true);
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 	return page;
@@ -370,9 +370,8 @@
 				goto continue_unlock;
 			}
 
-			f2fs_wait_on_page_writeback(page, META, true);
+			f2fs_wait_on_page_writeback(page, META, true, true);
 
-			BUG_ON(PageWriteback(page));
 			if (!clear_page_dirty_for_io(page))
 				goto continue_unlock;
 
@@ -911,7 +910,7 @@
 	f2fs_put_page(cp1, 1);
 	f2fs_put_page(cp2, 1);
 fail_no_cp:
-	kfree(sbi->ckpt);
+	kvfree(sbi->ckpt);
 	return -EINVAL;
 }
 
@@ -1290,11 +1289,11 @@
 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
 	int err;
 
-	memcpy(page_address(page), src, PAGE_SIZE);
-	set_page_dirty(page);
+	f2fs_wait_on_page_writeback(page, META, true, true);
 
-	f2fs_wait_on_page_writeback(page, META, true);
-	f2fs_bug_on(sbi, PageWriteback(page));
+	memcpy(page_address(page), src, PAGE_SIZE);
+
+	set_page_dirty(page);
 	if (unlikely(!clear_page_dirty_for_io(page)))
 		f2fs_bug_on(sbi, 1);
 
@@ -1328,11 +1327,9 @@
 	int err;
 
 	/* Flush all the NAT/SIT pages */
-	while (get_pages(sbi, F2FS_DIRTY_META)) {
-		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
-		if (unlikely(f2fs_cp_error(sbi)))
-			break;
-	}
+	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
+					!f2fs_cp_error(sbi));
 
 	/*
 	 * modify checkpoint
@@ -1405,14 +1402,6 @@
 		for (i = 0; i < nm_i->nat_bits_blocks; i++)
 			f2fs_update_meta_page(sbi, nm_i->nat_bits +
 					(i << F2FS_BLKSIZE_BITS), blk + i);
-
-		/* Flush all the NAT BITS pages */
-		while (get_pages(sbi, F2FS_DIRTY_META)) {
-			f2fs_sync_meta_pages(sbi, META, LONG_MAX,
-							FS_CP_META_IO);
-			if (unlikely(f2fs_cp_error(sbi)))
-				break;
-		}
 	}
 
 	/* write out checkpoint buffer at block 0 */
@@ -1448,6 +1437,8 @@
 
 	/* Here, we have one bio having CP pack except cp pack 2 page */
 	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
+					!f2fs_cp_error(sbi));
 
 	/* wait for previous submitted meta pages writeback */
 	f2fs_wait_on_all_pages_writeback(sbi);
@@ -1465,7 +1456,7 @@
 	 * invalidate intermediate page cache borrowed from meta inode
 	 * which are used for migration of encrypted inode's blocks.
 	 */
-	if (f2fs_sb_has_encrypt(sbi->sb))
+	if (f2fs_sb_has_encrypt(sbi))
 		invalidate_mapping_pages(META_MAPPING(sbi),
 				MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e9681f8..6cab353 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -143,6 +143,8 @@
 
 static void f2fs_read_end_io(struct bio *bio)
 {
+	struct page *first_page = bio->bi_io_vec[0].bv_page;
+
 	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
 						FAULT_READ_IO)) {
 		f2fs_show_injection_info(FAULT_READ_IO);
@@ -157,6 +159,13 @@
 		return;
 	}
 
+	if (first_page != NULL &&
+		__read_io_type(first_page) == F2FS_RD_DATA) {
+		trace_android_fs_dataread_end(first_page->mapping->host,
+						page_offset(first_page),
+						bio->bi_iter.bi_size);
+	}
+
 	__read_end_io(bio);
 }
 
@@ -324,6 +333,32 @@
 	submit_bio(bio);
 }
 
+static void __f2fs_submit_read_bio(struct f2fs_sb_info *sbi,
+				struct bio *bio, enum page_type type)
+{
+	if (trace_android_fs_dataread_start_enabled() && (type == DATA)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL &&
+			__read_io_type(first_page) == F2FS_RD_DATA) {
+			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+			path = android_fstrace_get_pathname(pathbuf,
+						MAX_TRACE_PATHBUF_LEN,
+						first_page->mapping->host);
+
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				path,
+				current->comm);
+		}
+	}
+	__submit_bio(sbi, bio, type);
+}
+
 static void __submit_merged_bio(struct f2fs_bio_info *io)
 {
 	struct f2fs_io_info *fio = &io->fio;
@@ -373,29 +408,6 @@
 	return false;
 }
 
-static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
-						struct page *page, nid_t ino,
-						enum page_type type)
-{
-	enum page_type btype = PAGE_TYPE_OF_BIO(type);
-	enum temp_type temp;
-	struct f2fs_bio_info *io;
-	bool ret = false;
-
-	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
-		io = sbi->write_io[btype] + temp;
-
-		down_read(&io->io_rwsem);
-		ret = __has_merged_page(io, inode, page, ino);
-		up_read(&io->io_rwsem);
-
-		/* TODO: use HOT temp only for meta pages now. */
-		if (ret || btype == META)
-			break;
-	}
-	return ret;
-}
-
 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
 				enum page_type type, enum temp_type temp)
 {
@@ -421,13 +433,19 @@
 				nid_t ino, enum page_type type, bool force)
 {
 	enum temp_type temp;
-
-	if (!force && !has_merged_page(sbi, inode, page, ino, type))
-		return;
+	bool ret = true;
 
 	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
+		if (!force)	{
+			enum page_type btype = PAGE_TYPE_OF_BIO(type);
+			struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
 
-		__f2fs_submit_merged_write(sbi, type, temp);
+			down_read(&io->io_rwsem);
+			ret = __has_merged_page(io, inode, page, ino);
+			up_read(&io->io_rwsem);
+		}
+		if (ret)
+			__f2fs_submit_merged_write(sbi, type, temp);
 
 		/* TODO: use HOT temp only for meta pages now. */
 		if (type >= META)
@@ -463,6 +481,7 @@
 	struct bio *bio;
 	struct page *page = fio->encrypted_page ?
 			fio->encrypted_page : fio->page;
+	struct inode *inode = fio->page->mapping->host;
 
 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
 			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
@@ -475,20 +494,19 @@
 	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
 				1, is_read_io(fio->op), fio->type, fio->temp);
 
+	if (f2fs_may_encrypt_bio(inode, fio))
+		fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
+
 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 		bio_put(bio);
 		return -EFAULT;
 	}
-
-	if (fio->io_wbc && !is_read_io(fio->op))
-		wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
-
 	bio_set_op_attrs(bio, fio->op, fio->op_flags);
 
 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
 			__read_io_type(page): WB_DATA_TYPE(fio->page));
 
-	__submit_bio(fio->sbi, bio, fio->type);
+	__f2fs_submit_read_bio(fio->sbi, bio, fio->type);
 	return 0;
 }
 
@@ -498,6 +516,9 @@
 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
 	struct page *bio_page;
+	struct inode *inode;
+	bool bio_encrypted;
+	u64 dun;
 
 	f2fs_bug_on(sbi, is_read_io(fio->op));
 
@@ -520,6 +541,9 @@
 	verify_block_addr(fio, fio->new_blkaddr);
 
 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+	inode = fio->page->mapping->host;
+	dun = PG_DUN(inode, fio->page);
+	bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
 
 	/* set submitted = true as a return value */
 	fio->submitted = true;
@@ -530,6 +554,11 @@
 	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
 			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
 		__submit_merged_bio(io);
+
+	/* ICE support */
+	if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted))
+		__submit_merged_bio(io);
+
 alloc_new:
 	if (io->bio == NULL) {
 		if ((fio->type == DATA || fio->type == NODE) &&
@@ -541,6 +570,8 @@
 		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
 						BIO_MAX_PAGES, false,
 						fio->type, fio->temp);
+		if (bio_encrypted)
+			fscrypt_set_ice_dun(inode, io->bio, dun);
 		io->fio = *fio;
 	}
 
@@ -582,9 +613,10 @@
 		return ERR_PTR(-ENOMEM);
 	f2fs_target_device(sbi, blkaddr, bio);
 	bio->bi_end_io = f2fs_read_end_io;
-	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
+	bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
-	if (f2fs_encrypted_file(inode))
+	if (f2fs_encrypted_file(inode) &&
+		!fscrypt_using_hardware_encryption(inode))
 		post_read_steps |= 1 << STEP_DECRYPT;
 	if (post_read_steps) {
 		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
@@ -609,6 +641,9 @@
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
+	if (f2fs_may_encrypt_bio(inode, NULL))
+		fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, page));
+
 	/* wait for GCed page writeback via META_MAPPING */
 	f2fs_wait_on_block_writeback(inode, blkaddr);
 
@@ -618,7 +653,7 @@
 	}
 	ClearPageError(page);
 	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
-	__submit_bio(F2FS_I_SB(inode), bio, DATA);
+	__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
 	return 0;
 }
 
@@ -644,7 +679,7 @@
  */
 void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
 {
-	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
 	__set_data_blkaddr(dn);
 	if (set_page_dirty(dn->node_page))
 		dn->node_changed = true;
@@ -674,7 +709,7 @@
 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
 						dn->ofs_in_node, count);
 
-	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
 
 	for (; count > 0; dn->ofs_in_node++) {
 		block_t blkaddr = datablock_addr(dn->inode,
@@ -958,6 +993,9 @@
 			return err;
 	}
 
+	if (direct_io && allow_outplace_dio(inode, iocb, from))
+		return 0;
+
 	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
 		return 0;
 
@@ -971,6 +1009,7 @@
 	map.m_next_pgofs = NULL;
 	map.m_next_extent = NULL;
 	map.m_seg_type = NO_CHECK_TYPE;
+	map.m_may_create = true;
 
 	if (direct_io) {
 		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
@@ -1029,7 +1068,7 @@
 	unsigned int maxblocks = map->m_len;
 	struct dnode_of_data dn;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
+	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
 	pgoff_t pgofs, end_offset, end;
 	int err = 0, ofs = 1;
 	unsigned int ofs_in_node, last_ofs_in_node;
@@ -1049,6 +1088,10 @@
 	end = pgofs + maxblocks;
 
 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
+							map->m_may_create)
+			goto next_dnode;
+
 		map->m_pblk = ei.blk + pgofs - ei.fofs;
 		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
 		map->m_flags = F2FS_MAP_MAPPED;
@@ -1063,7 +1106,7 @@
 	}
 
 next_dnode:
-	if (create)
+	if (map->m_may_create)
 		__do_map_lock(sbi, flag, true);
 
 	/* When reading holes, we need its node page */
@@ -1100,11 +1143,13 @@
 
 	if (is_valid_data_blkaddr(sbi, blkaddr)) {
 		/* use out-place-update for driect IO under LFS mode */
-		if (test_opt(sbi, LFS) && create &&
-				flag == F2FS_GET_BLOCK_DIO) {
+		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
+							map->m_may_create) {
 			err = __allocate_data_block(&dn, map->m_seg_type);
-			if (!err)
+			if (!err) {
+				blkaddr = dn.data_blkaddr;
 				set_inode_flag(inode, FI_APPEND_WRITE);
+			}
 		}
 	} else {
 		if (create) {
@@ -1210,7 +1255,7 @@
 
 	f2fs_put_dnode(&dn);
 
-	if (create) {
+	if (map->m_may_create) {
 		__do_map_lock(sbi, flag, false);
 		f2fs_balance_fs(sbi, dn.node_changed);
 	}
@@ -1236,7 +1281,7 @@
 	}
 	f2fs_put_dnode(&dn);
 unlock_out:
-	if (create) {
+	if (map->m_may_create) {
 		__do_map_lock(sbi, flag, false);
 		f2fs_balance_fs(sbi, dn.node_changed);
 	}
@@ -1258,6 +1303,7 @@
 	map.m_next_pgofs = NULL;
 	map.m_next_extent = NULL;
 	map.m_seg_type = NO_CHECK_TYPE;
+	map.m_may_create = false;
 	last_lblk = F2FS_BLK_ALIGN(pos + len);
 
 	while (map.m_lblk < last_lblk) {
@@ -1272,7 +1318,7 @@
 
 static int __get_data_block(struct inode *inode, sector_t iblock,
 			struct buffer_head *bh, int create, int flag,
-			pgoff_t *next_pgofs, int seg_type)
+			pgoff_t *next_pgofs, int seg_type, bool may_write)
 {
 	struct f2fs_map_blocks map;
 	int err;
@@ -1282,6 +1328,7 @@
 	map.m_next_pgofs = next_pgofs;
 	map.m_next_extent = NULL;
 	map.m_seg_type = seg_type;
+	map.m_may_create = may_write;
 
 	err = f2fs_map_blocks(inode, &map, create, flag);
 	if (!err) {
@@ -1298,16 +1345,25 @@
 {
 	return __get_data_block(inode, iblock, bh_result, create,
 							flag, next_pgofs,
-							NO_CHECK_TYPE);
+							NO_CHECK_TYPE, create);
+}
+
+static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh_result, int create)
+{
+	return __get_data_block(inode, iblock, bh_result, create,
+				F2FS_GET_BLOCK_DIO, NULL,
+				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
+				true);
 }
 
 static int get_data_block_dio(struct inode *inode, sector_t iblock,
 			struct buffer_head *bh_result, int create)
 {
 	return __get_data_block(inode, iblock, bh_result, create,
-						F2FS_GET_BLOCK_DIO, NULL,
-						f2fs_rw_hint_to_seg_type(
-							inode->i_write_hint));
+				F2FS_GET_BLOCK_DIO, NULL,
+				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
+				false);
 }
 
 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1319,7 +1375,7 @@
 
 	return __get_data_block(inode, iblock, bh_result, create,
 						F2FS_GET_BLOCK_BMAP, NULL,
-						NO_CHECK_TYPE);
+						NO_CHECK_TYPE, create);
 }
 
 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -1518,6 +1574,8 @@
 	sector_t last_block_in_file;
 	sector_t block_nr;
 	struct f2fs_map_blocks map;
+	bool bio_encrypted;
+	u64 dun;
 
 	map.m_pblk = 0;
 	map.m_lblk = 0;
@@ -1526,6 +1584,7 @@
 	map.m_next_pgofs = NULL;
 	map.m_next_extent = NULL;
 	map.m_seg_type = NO_CHECK_TYPE;
+	map.m_may_create = false;
 
 	for (; nr_pages; nr_pages--) {
 		if (pages) {
@@ -1596,9 +1655,17 @@
 		if (bio && (last_block_in_bio != block_nr - 1 ||
 			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
 submit_and_realloc:
+			__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+			bio = NULL;
+		}
+
+		dun = PG_DUN(inode, page);
+		bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
+		if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted)) {
 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
 			bio = NULL;
 		}
+
 		if (bio == NULL) {
 			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
 					is_readahead ? REQ_RAHEAD : 0);
@@ -1606,6 +1673,8 @@
 				bio = NULL;
 				goto set_error_page;
 			}
+			if (bio_encrypted)
+				fscrypt_set_ice_dun(inode, bio, dun);
 		}
 
 		/*
@@ -1628,7 +1697,7 @@
 		goto next_page;
 confused:
 		if (bio) {
-			__submit_bio(F2FS_I_SB(inode), bio, DATA);
+			__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
 			bio = NULL;
 		}
 		unlock_page(page);
@@ -1638,7 +1707,7 @@
 	}
 	BUG_ON(pages && !list_empty(pages));
 	if (bio)
-		__submit_bio(F2FS_I_SB(inode), bio, DATA);
+		__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
 	return 0;
 }
 
@@ -1686,6 +1755,9 @@
 	f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
 
 retry_encrypt:
+	if (fscrypt_using_hardware_encryption(inode))
+		return 0;
+
 	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
 			PAGE_SIZE, 0, fio->page->index, gfp_flags);
 	if (IS_ERR(fio->encrypted_page)) {
@@ -1856,6 +1928,8 @@
 		if (fio->need_lock == LOCK_REQ)
 			f2fs_unlock_op(fio->sbi);
 		err = f2fs_inplace_write_data(fio);
+		if (err && PageWriteback(page))
+			end_page_writeback(page);
 		trace_f2fs_do_write_data_page(fio->page, IPU);
 		set_inode_flag(inode, FI_UPDATE_WRITE);
 		return err;
@@ -2143,12 +2217,11 @@
 			if (PageWriteback(page)) {
 				if (wbc->sync_mode != WB_SYNC_NONE)
 					f2fs_wait_on_page_writeback(page,
-								DATA, true);
+							DATA, true, true);
 				else
 					goto continue_unlock;
 			}
 
-			BUG_ON(PageWriteback(page));
 			if (!clear_page_dirty_for_io(page))
 				goto continue_unlock;
 
@@ -2325,6 +2398,7 @@
 	bool locked = false;
 	struct extent_info ei = {0,0,0};
 	int err = 0;
+	int flag;
 
 	/*
 	 * we already allocated all the blocks, so we don't need to get
@@ -2334,9 +2408,15 @@
 			!is_inode_flag_set(inode, FI_NO_PREALLOC))
 		return 0;
 
+	/* f2fs_lock_op avoids race between write CP and convert_inline_page */
+	if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
+		flag = F2FS_GET_BLOCK_DEFAULT;
+	else
+		flag = F2FS_GET_BLOCK_PRE_AIO;
+
 	if (f2fs_has_inline_data(inode) ||
 			(pos & PAGE_MASK) >= i_size_read(inode)) {
-		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+		__do_map_lock(sbi, flag, true);
 		locked = true;
 	}
 restart:
@@ -2374,6 +2454,7 @@
 				f2fs_put_dnode(&dn);
 				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
 								true);
+				WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
 				locked = true;
 				goto restart;
 			}
@@ -2387,7 +2468,7 @@
 	f2fs_put_dnode(&dn);
 unlock_out:
 	if (locked)
-		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+		__do_map_lock(sbi, flag, false);
 	return err;
 }
 
@@ -2468,7 +2549,7 @@
 		}
 	}
 
-	f2fs_wait_on_page_writeback(page, DATA, false);
+	f2fs_wait_on_page_writeback(page, DATA, false, true);
 
 	if (len == PAGE_SIZE || PageUptodate(page))
 		return 0;
@@ -2560,6 +2641,53 @@
 	return 0;
 }
 
+static void f2fs_dio_end_io(struct bio *bio)
+{
+	struct f2fs_private_dio *dio = bio->bi_private;
+
+	dec_page_count(F2FS_I_SB(dio->inode),
+			dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
+
+	bio->bi_private = dio->orig_private;
+	bio->bi_end_io = dio->orig_end_io;
+
+	kvfree(dio);
+
+	bio_endio(bio);
+}
+
+static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
+							loff_t file_offset)
+{
+	struct f2fs_private_dio *dio;
+	bool write = (bio_op(bio) == REQ_OP_WRITE);
+	int err;
+
+	dio = f2fs_kzalloc(F2FS_I_SB(inode),
+			sizeof(struct f2fs_private_dio), GFP_NOFS);
+	if (!dio) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	dio->inode = inode;
+	dio->orig_end_io = bio->bi_end_io;
+	dio->orig_private = bio->bi_private;
+	dio->write = write;
+
+	bio->bi_end_io = f2fs_dio_end_io;
+	bio->bi_private = dio;
+
+	inc_page_count(F2FS_I_SB(inode),
+			write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
+
+	submit_bio(bio);
+	return;
+out:
+	bio->bi_status = BLK_STS_IOERR;
+	bio_endio(bio);
+}
+
 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct address_space *mapping = iocb->ki_filp->f_mapping;
@@ -2629,7 +2757,10 @@
 			down_read(&fi->i_gc_rwsem[READ]);
 	}
 
-	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
+	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+			iter, rw == WRITE ? get_data_block_dio_write :
+			get_data_block_dio, NULL, f2fs_dio_submit_bio,
+			DIO_LOCKING | DIO_SKIP_HOLES);
 
 	if (do_opu)
 		up_read(&fi->i_gc_rwsem[READ]);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 139b4d5..503fde8 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -53,6 +53,8 @@
 	si->vw_cnt = atomic_read(&sbi->vw_cnt);
 	si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
 	si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
+	si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ);
+	si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE);
 	si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
 	si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
 	si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA);
@@ -62,7 +64,7 @@
 		si->nr_flushed =
 			atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
 		si->nr_flushing =
-			atomic_read(&SM_I(sbi)->fcc_info->issing_flush);
+			atomic_read(&SM_I(sbi)->fcc_info->queued_flush);
 		si->flush_list_empty =
 			llist_empty(&SM_I(sbi)->fcc_info->issue_list);
 	}
@@ -70,7 +72,7 @@
 		si->nr_discarded =
 			atomic_read(&SM_I(sbi)->dcc_info->issued_discard);
 		si->nr_discarding =
-			atomic_read(&SM_I(sbi)->dcc_info->issing_discard);
+			atomic_read(&SM_I(sbi)->dcc_info->queued_discard);
 		si->nr_discard_cmd =
 			atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
 		si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
@@ -94,8 +96,10 @@
 	si->free_secs = free_sections(sbi);
 	si->prefree_count = prefree_segments(sbi);
 	si->dirty_count = dirty_segments(sbi);
-	si->node_pages = NODE_MAPPING(sbi)->nrpages;
-	si->meta_pages = META_MAPPING(sbi)->nrpages;
+	if (sbi->node_inode)
+		si->node_pages = NODE_MAPPING(sbi)->nrpages;
+	if (sbi->meta_inode)
+		si->meta_pages = META_MAPPING(sbi)->nrpages;
 	si->nats = NM_I(sbi)->nat_cnt;
 	si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
 	si->sits = MAIN_SEGS(sbi);
@@ -173,7 +177,6 @@
 static void update_mem_info(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_stat_info *si = F2FS_STAT(sbi);
-	unsigned npages;
 	int i;
 
 	if (si->base_mem)
@@ -197,7 +200,7 @@
 	si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
 	si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
 	si->base_mem += SIT_VBLOCK_MAP_SIZE;
-	if (sbi->segs_per_sec > 1)
+	if (__is_large_section(sbi))
 		si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
 	si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
 
@@ -256,10 +259,14 @@
 						sizeof(struct extent_node);
 
 	si->page_mem = 0;
-	npages = NODE_MAPPING(sbi)->nrpages;
-	si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
-	npages = META_MAPPING(sbi)->nrpages;
-	si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+	if (sbi->node_inode) {
+		unsigned npages = NODE_MAPPING(sbi)->nrpages;
+		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+	}
+	if (sbi->meta_inode) {
+		unsigned npages = META_MAPPING(sbi)->nrpages;
+		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+	}
 }
 
 static int stat_show(struct seq_file *s, void *v)
@@ -374,6 +381,8 @@
 		seq_printf(s, "  - Inner Struct Count: tree: %d(%d), node: %d\n",
 				si->ext_tree, si->zombie_tree, si->ext_node);
 		seq_puts(s, "\nBalancing F2FS Async:\n");
+		seq_printf(s, "  - DIO (R: %4d, W: %4d)\n",
+			   si->nr_dio_read, si->nr_dio_write);
 		seq_printf(s, "  - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
 			   si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta);
 		seq_printf(s, "  - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
@@ -444,18 +453,7 @@
 	return 0;
 }
 
-static int stat_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, stat_show, inode->i_private);
-}
-
-static const struct file_operations stat_fops = {
-	.owner = THIS_MODULE,
-	.open = stat_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(stat);
 
 int f2fs_build_stats(struct f2fs_sb_info *sbi)
 {
@@ -510,7 +508,7 @@
 	list_del(&si->stat_list);
 	mutex_unlock(&f2fs_stat_mutex);
 
-	kfree(si);
+	kvfree(si);
 }
 
 int __init f2fs_create_root_stats(void)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 2ef84b4..7ff9e99 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -293,7 +293,7 @@
 {
 	enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
 	lock_page(page);
-	f2fs_wait_on_page_writeback(page, type, true);
+	f2fs_wait_on_page_writeback(page, type, true, true);
 	de->ino = cpu_to_le32(inode->i_ino);
 	set_de_type(de, inode->i_mode);
 	set_page_dirty(page);
@@ -307,7 +307,7 @@
 {
 	struct f2fs_inode *ri;
 
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
 	/* copy name info. to this inode page */
 	ri = F2FS_INODE(ipage);
@@ -550,7 +550,7 @@
 	++level;
 	goto start;
 add_dentry:
-	f2fs_wait_on_page_writeback(dentry_page, DATA, true);
+	f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
 
 	if (inode) {
 		down_write(&F2FS_I(inode)->i_sem);
@@ -705,7 +705,7 @@
 		return f2fs_delete_inline_entry(dentry, page, dir, inode);
 
 	lock_page(page);
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 
 	dentry_blk = page_address(page);
 	bit_pos = dentry - dentry_blk->dentry;
@@ -808,6 +808,17 @@
 		de_name.name = d->filename[bit_pos];
 		de_name.len = le16_to_cpu(de->name_len);
 
+		/* check memory boundary before moving forward */
+		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+		if (unlikely(bit_pos > d->max)) {
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"%s: corrupted namelen=%d, run fsck to fix.",
+				__func__, le16_to_cpu(de->name_len));
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			err = -EINVAL;
+			goto out;
+		}
+
 		if (f2fs_encrypted_inode(d->inode)) {
 			int save_len = fstr->len;
 
@@ -830,7 +841,6 @@
 		if (readdir_ra)
 			f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
 
-		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
 		ctx->pos = start_pos + bit_pos;
 	}
 out:
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 56204a8..fbd4a5d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -67,7 +67,7 @@
 	unsigned int inject_type;
 };
 
-extern char *f2fs_fault_name[FAULT_MAX];
+extern const char *f2fs_fault_name[FAULT_MAX];
 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
 #endif
 
@@ -152,12 +152,13 @@
 #define F2FS_FEATURE_VERITY		0x0400	/* reserved */
 #define F2FS_FEATURE_SB_CHKSUM		0x0800
 
-#define F2FS_HAS_FEATURE(sb, mask)					\
-	((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
-#define F2FS_SET_FEATURE(sb, mask)					\
-	(F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask))
-#define F2FS_CLEAR_FEATURE(sb, mask)					\
-	(F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask))
+#define __F2FS_HAS_FEATURE(raw_super, mask)				\
+	((raw_super->feature & cpu_to_le32(mask)) != 0)
+#define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
+#define F2FS_SET_FEATURE(sbi, mask)					\
+	(sbi->raw_super->feature |= cpu_to_le32(mask))
+#define F2FS_CLEAR_FEATURE(sbi, mask)					\
+	(sbi->raw_super->feature &= ~cpu_to_le32(mask))
 
 /*
  * Default values for user and/or group using reserved blocks
@@ -284,7 +285,7 @@
 	struct block_device *bdev;	/* bdev */
 	unsigned short ref;		/* reference count */
 	unsigned char state;		/* state */
-	unsigned char issuing;		/* issuing discard */
+	unsigned char queued;		/* queued discard */
 	int error;			/* bio error */
 	spinlock_t lock;		/* for state/bio_ref updating */
 	unsigned short bio_ref;		/* bio reference count */
@@ -326,7 +327,7 @@
 	unsigned int undiscard_blks;		/* # of undiscard blocks */
 	unsigned int next_pos;			/* next discard position */
 	atomic_t issued_discard;		/* # of issued discard */
-	atomic_t issing_discard;		/* # of issing discard */
+	atomic_t queued_discard;		/* # of queued discard */
 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
 	struct rb_root_cached root;		/* root of discard rb-tree */
 	bool rbtree_check;			/* config for consistence check */
@@ -416,6 +417,7 @@
 #define F2FS_GOING_DOWN_METASYNC	0x1	/* going down with metadata */
 #define F2FS_GOING_DOWN_NOSYNC		0x2	/* going down */
 #define F2FS_GOING_DOWN_METAFLUSH	0x3	/* going down with meta flush */
+#define F2FS_GOING_DOWN_NEED_FSCK	0x4	/* going down to trigger fsck */
 
 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /*
@@ -557,16 +559,8 @@
 };
 
 struct extent_node {
-	struct rb_node rb_node;
-	union {
-		struct {
-			unsigned int fofs;
-			unsigned int len;
-			u32 blk;
-		};
-		struct extent_info ei;	/* extent info */
-
-	};
+	struct rb_node rb_node;		/* rb node located in rb-tree */
+	struct extent_info ei;		/* extent info */
 	struct list_head list;		/* node in global extent list of sbi */
 	struct extent_tree *et;		/* extent tree pointer */
 };
@@ -601,6 +595,7 @@
 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
 	pgoff_t *m_next_extent;		/* point to next possible extent */
 	int m_seg_type;
+	bool m_may_create;		/* indicate it is from write path */
 };
 
 /* for flag in get_data_block */
@@ -889,7 +884,7 @@
 	struct task_struct *f2fs_issue_flush;	/* flush thread */
 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
 	atomic_t issued_flush;			/* # of issued flushes */
-	atomic_t issing_flush;			/* # of issing flushes */
+	atomic_t queued_flush;			/* # of queued flushes */
 	struct llist_head issue_list;		/* list for command issue */
 	struct llist_node *dispatch_list;	/* list for command dispatch */
 };
@@ -956,6 +951,8 @@
 	F2FS_RD_DATA,
 	F2FS_RD_NODE,
 	F2FS_RD_META,
+	F2FS_DIO_WRITE,
+	F2FS_DIO_READ,
 	NR_COUNT_TYPE,
 };
 
@@ -1170,8 +1167,6 @@
 
 	/* for bio operations */
 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
-	struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
-						/* bio ordering for NODE/DATA */
 	/* keep migration IO order for LFS mode */
 	struct rw_semaphore io_order_lock;
 	mempool_t *write_io_dummy;		/* Dummy pages */
@@ -1263,6 +1258,7 @@
 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
 	unsigned int cur_victim_sec;		/* current victim section num */
 	unsigned int gc_mode;			/* current GC state */
+	unsigned int next_victim_seg[2];	/* next segment in victim section */
 	/* for skip statistic */
 	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
@@ -1272,6 +1268,8 @@
 
 	/* maximum # of trials to find a victim segment for SSR and GC */
 	unsigned int max_victim_search;
+	/* migration granularity of garbage collection, unit: segment */
+	unsigned int migration_granularity;
 
 	/*
 	 * for stat information.
@@ -1330,6 +1328,13 @@
 	__u32 s_chksum_seed;
 };
 
+struct f2fs_private_dio {
+	struct inode *inode;
+	void *orig_private;
+	bio_end_io_t *orig_end_io;
+	bool write;
+};
+
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 #define f2fs_show_injection_info(type)					\
 	printk_ratelimited("%sF2FS-fs : inject %s in %s of %pF\n",	\
@@ -1608,12 +1613,16 @@
 {
 	unsigned long flags;
 
-	set_sbi_flag(sbi, SBI_NEED_FSCK);
+	/*
+	 * In order to re-enable nat_bits we need to call fsck.f2fs by
+	 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
+	 * so let's rely on regular fsck or unclean shutdown.
+	 */
 
 	if (lock)
 		spin_lock_irqsave(&sbi->cp_lock, flags);
 	__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
-	kfree(NM_I(sbi)->nat_bits);
+	kvfree(NM_I(sbi)->nat_bits);
 	NM_I(sbi)->nat_bits = NULL;
 	if (lock)
 		spin_unlock_irqrestore(&sbi->cp_lock, flags);
@@ -2146,7 +2155,11 @@
 {
 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
-		get_pages(sbi, F2FS_WB_CP_DATA))
+		get_pages(sbi, F2FS_WB_CP_DATA) ||
+		get_pages(sbi, F2FS_DIO_READ) ||
+		get_pages(sbi, F2FS_DIO_WRITE) ||
+		atomic_read(&SM_I(sbi)->dcc_info->queued_discard) ||
+		atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
 		return false;
 	return f2fs_time_over(sbi, type);
 }
@@ -2370,6 +2383,7 @@
 	case FI_NEW_INODE:
 		if (set)
 			return;
+		/* fall through */
 	case FI_DATA_EXIST:
 	case FI_INLINE_DOTS:
 	case FI_PIN_FILE:
@@ -2672,22 +2686,37 @@
 
 static inline bool f2fs_may_extent_tree(struct inode *inode)
 {
-	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+	if (!test_opt(sbi, EXTENT_CACHE) ||
 			is_inode_flag_set(inode, FI_NO_EXTENT))
 		return false;
 
+	/*
+	 * for recovered files during mount do not create extents
+	 * if shrinker is not registered.
+	 */
+	if (list_empty(&sbi->s_list))
+		return false;
+
 	return S_ISREG(inode->i_mode);
 }
 
 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
 					size_t size, gfp_t flags)
 {
+	void *ret;
+
 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
 		f2fs_show_injection_info(FAULT_KMALLOC);
 		return NULL;
 	}
 
-	return kmalloc(size, flags);
+	ret = kmalloc(size, flags);
+	if (ret)
+		return ret;
+
+	return kvmalloc(size, flags);
 }
 
 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
@@ -2762,6 +2791,8 @@
 	spin_unlock(&sbi->iostat_lock);
 }
 
+#define __is_large_section(sbi)		((sbi)->segs_per_sec > 1)
+
 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META &&	\
 				(!is_read_io(fio->op) || fio->is_meta))
 
@@ -3007,7 +3038,7 @@
 			struct f2fs_summary *sum, int type,
 			struct f2fs_io_info *fio, bool add_list);
 void f2fs_wait_on_page_writeback(struct page *page,
-			enum page_type type, bool ordered);
+			enum page_type type, bool ordered, bool locked);
 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
 								block_t len);
@@ -3147,6 +3178,7 @@
 	int total_count, utilization;
 	int bg_gc, nr_wb_cp_data, nr_wb_data;
 	int nr_rd_data, nr_rd_node, nr_rd_meta;
+	int nr_dio_read, nr_dio_write;
 	unsigned int io_skip_bggc, other_skip_bggc;
 	int nr_flushing, nr_flushed, flush_list_empty;
 	int nr_discarding, nr_discarded;
@@ -3459,9 +3491,9 @@
 }
 
 #define F2FS_FEATURE_FUNCS(name, flagname) \
-static inline int f2fs_sb_has_##name(struct super_block *sb) \
+static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
 { \
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_##flagname); \
+	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
 }
 
 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
@@ -3491,7 +3523,7 @@
 
 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
 {
-	return f2fs_sb_has_blkzoned(sbi->sb);
+	return f2fs_sb_has_blkzoned(sbi);
 }
 
 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
@@ -3558,6 +3590,10 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int rw = iov_iter_rw(iter);
 
+	if ((f2fs_encrypted_file(inode)) &&
+		!fscrypt_using_hardware_encryption(inode))
+		return true;
+
 	if (f2fs_post_read_required(inode))
 		return true;
 	if (sbi->s_ndevs)
@@ -3566,7 +3602,7 @@
 	 * for blkzoned device, fallback direct IO to buffered IO, so
 	 * all IOs can be serialized by log-structured write.
 	 */
-	if (f2fs_sb_has_blkzoned(sbi->sb))
+	if (f2fs_sb_has_blkzoned(sbi))
 		return true;
 	if (test_opt(sbi, LFS) && (rw == WRITE) &&
 				block_unaligned_IO(inode, iocb, iter))
@@ -3577,6 +3613,16 @@
 	return false;
 }
 
+static inline bool f2fs_may_encrypt_bio(struct inode *inode,
+		struct f2fs_io_info *fio)
+{
+	if (fio && (fio->type != DATA || fio->encrypted_page))
+		return false;
+
+	return (f2fs_encrypted_file(inode) &&
+			fscrypt_using_hardware_encryption(inode));
+}
+
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
 							unsigned int type);
@@ -3589,7 +3635,7 @@
 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
 {
 #ifdef CONFIG_QUOTA
-	if (f2fs_sb_has_quota_ino(sbi->sb))
+	if (f2fs_sb_has_quota_ino(sbi))
 		return true;
 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 88b1246..ae2b45e 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -82,7 +82,7 @@
 	}
 
 	/* fill the page */
-	f2fs_wait_on_page_writeback(page, DATA, false);
+	f2fs_wait_on_page_writeback(page, DATA, false, true);
 
 	/* wait for GCed page writeback via META_MAPPING */
 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
@@ -216,6 +216,9 @@
 
 	trace_f2fs_sync_file_enter(inode);
 
+	if (S_ISDIR(inode->i_mode))
+		goto go_write;
+
 	/* if fdatasync is triggered, let's do in-place-update */
 	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
 		set_inode_flag(inode, FI_NEED_IPU);
@@ -575,7 +578,7 @@
 	if (IS_ERR(page))
 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
 truncate_out:
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 	zero_user(page, offset, PAGE_SIZE - offset);
 
 	/* An encrypted inode should have a key and truncate the last page. */
@@ -696,7 +699,7 @@
 	unsigned int flags;
 
 	if (f2fs_has_extra_attr(inode) &&
-			f2fs_sb_has_inode_crtime(inode->i_sb) &&
+			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
 		stat->result_mask |= STATX_BTIME;
 		stat->btime.tv_sec = fi->i_crtime.tv_sec;
@@ -892,7 +895,7 @@
 	if (IS_ERR(page))
 		return PTR_ERR(page);
 
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 	zero_user(page, start, len);
 	set_page_dirty(page);
 	f2fs_put_page(page, 1);
@@ -1496,7 +1499,8 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
-			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE };
+			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
+			.m_may_create = true };
 	pgoff_t pg_end;
 	loff_t new_size = i_size_read(inode);
 	loff_t off_end;
@@ -1681,7 +1685,7 @@
 
 	inode->i_ctime = current_time(inode);
 	f2fs_set_inode_flags(inode);
-	f2fs_mark_inode_dirty_sync(inode, false);
+	f2fs_mark_inode_dirty_sync(inode, true);
 	return 0;
 }
 
@@ -1746,10 +1750,12 @@
 
 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
-	if (!get_dirty_pages(inode))
-		goto skip_flush;
-
-	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+	/*
+	 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
+	 * f2fs_is_atomic_file.
+	 */
+	if (get_dirty_pages(inode))
+		f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
 		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
 					inode->i_ino, get_dirty_pages(inode));
 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
@@ -1757,7 +1763,7 @@
 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 		goto out;
 	}
-skip_flush:
+
 	set_inode_flag(inode, FI_ATOMIC_FILE);
 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1962,6 +1968,13 @@
 		f2fs_stop_checkpoint(sbi, false);
 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 		break;
+	case F2FS_GOING_DOWN_NEED_FSCK:
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		/* do checkpoint only */
+		ret = f2fs_sync_fs(sb, 1);
+		if (ret)
+			goto out;
+		break;
 	default:
 		ret = -EINVAL;
 		goto out;
@@ -2030,7 +2043,7 @@
 {
 	struct inode *inode = file_inode(filp);
 
-	if (!f2fs_sb_has_encrypt(inode->i_sb))
+	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
 		return -EOPNOTSUPP;
 
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
@@ -2040,7 +2053,7 @@
 
 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
 {
-	if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb))
+	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
 		return -EOPNOTSUPP;
 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
 }
@@ -2051,7 +2064,7 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int err;
 
-	if (!f2fs_sb_has_encrypt(inode->i_sb))
+	if (!f2fs_sb_has_encrypt(sbi))
 		return -EOPNOTSUPP;
 
 	err = mnt_want_write_file(filp);
@@ -2155,7 +2168,7 @@
 	}
 
 	ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
-	range.start += sbi->blocks_per_seg;
+	range.start += BLKS_PER_SEC(sbi);
 	if (range.start <= end)
 		goto do_more;
 out:
@@ -2197,7 +2210,8 @@
 {
 	struct inode *inode = file_inode(filp);
 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
-					.m_seg_type = NO_CHECK_TYPE };
+					.m_seg_type = NO_CHECK_TYPE ,
+					.m_may_create = false };
 	struct extent_info ei = {0, 0, 0};
 	pgoff_t pg_start, pg_end, next_pgofs;
 	unsigned int blk_per_seg = sbi->blocks_per_seg;
@@ -2560,7 +2574,7 @@
 		return -EFAULT;
 
 	if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
-			sbi->segs_per_sec != 1) {
+			__is_large_section(sbi)) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"Can't flush %u in %d for segs_per_sec %u != 1\n",
 				range.dev_num, sbi->s_ndevs,
@@ -2635,12 +2649,11 @@
 	struct inode *inode = file_inode(filp);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct super_block *sb = sbi->sb;
 	struct page *ipage;
 	kprojid_t kprojid;
 	int err;
 
-	if (!f2fs_sb_has_project_quota(sb)) {
+	if (!f2fs_sb_has_project_quota(sbi)) {
 		if (projid != F2FS_DEF_PROJID)
 			return -EOPNOTSUPP;
 		else
@@ -2757,7 +2770,7 @@
 	fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
 				F2FS_FL_USER_VISIBLE);
 
-	if (f2fs_sb_has_project_quota(inode->i_sb))
+	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
 		fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
 							fi->i_projid);
 
@@ -2932,6 +2945,7 @@
 	map.m_next_pgofs = NULL;
 	map.m_next_extent = &m_next_extent;
 	map.m_seg_type = NO_CHECK_TYPE;
+	map.m_may_create = false;
 	end = F2FS_I_SB(inode)->max_file_blocks;
 
 	while (map.m_lblk < end) {
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index a07241f..195cf0f 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -142,7 +142,7 @@
 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
 	if (IS_ERR(gc_th->f2fs_gc_task)) {
 		err = PTR_ERR(gc_th->f2fs_gc_task);
-		kfree(gc_th);
+		kvfree(gc_th);
 		sbi->gc_thread = NULL;
 	}
 out:
@@ -155,7 +155,7 @@
 	if (!gc_th)
 		return;
 	kthread_stop(gc_th->f2fs_gc_task);
-	kfree(gc_th);
+	kvfree(gc_th);
 	sbi->gc_thread = NULL;
 }
 
@@ -323,8 +323,7 @@
 	p.min_cost = get_max_cost(sbi, &p);
 
 	if (*result != NULL_SEGNO) {
-		if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
-			get_valid_blocks(sbi, *result, false) &&
+		if (get_valid_blocks(sbi, *result, false) &&
 			!sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
 			p.min_segno = *result;
 		goto out;
@@ -333,6 +332,22 @@
 	if (p.max_search == 0)
 		goto out;
 
+	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
+		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
+			p.min_segno = sbi->next_victim_seg[BG_GC];
+			*result = p.min_segno;
+			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
+			goto got_result;
+		}
+		if (gc_type == FG_GC &&
+				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
+			p.min_segno = sbi->next_victim_seg[FG_GC];
+			*result = p.min_segno;
+			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
+			goto got_result;
+		}
+	}
+
 	last_victim = sm->last_victim[p.gc_mode];
 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
 		p.min_segno = check_bg_victims(sbi);
@@ -395,6 +410,8 @@
 	}
 	if (p.min_segno != NULL_SEGNO) {
 got_it:
+		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
+got_result:
 		if (p.alloc_mode == LFS) {
 			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
 			if (gc_type == FG_GC)
@@ -402,13 +419,13 @@
 			else
 				set_bit(secno, dirty_i->victim_secmap);
 		}
-		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
 
+	}
+out:
+	if (p.min_segno != NULL_SEGNO)
 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
 				sbi->cur_victim_sec,
 				prefree_segments(sbi), free_segments(sbi));
-	}
-out:
 	mutex_unlock(&dirty_i->seglist_lock);
 
 	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
@@ -658,6 +675,14 @@
 	fio.page = page;
 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
 
+	/*
+	 * don't cache encrypted data into meta inode until previous dirty
+	 * data were writebacked to avoid racing between GC and flush.
+	 */
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
+
+	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
+
 	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
 					dn.data_blkaddr,
 					FGP_LOCK | FGP_CREAT, GFP_NOFS);
@@ -743,7 +768,9 @@
 	 * don't cache encrypted data into meta inode until previous dirty
 	 * data were writebacked to avoid racing between GC and flush.
 	 */
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
+
+	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
 
 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
 	if (err)
@@ -802,8 +829,8 @@
 	}
 
 write_page:
+	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
 	set_page_dirty(fio.encrypted_page);
-	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
 	if (clear_page_dirty_for_io(fio.encrypted_page))
 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
 
@@ -811,7 +838,7 @@
 	ClearPageError(page);
 
 	/* allocate block address */
-	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
 
 	fio.op = REQ_OP_WRITE;
 	fio.op_flags = REQ_SYNC;
@@ -897,8 +924,9 @@
 		bool is_dirty = PageDirty(page);
 
 retry:
+		f2fs_wait_on_page_writeback(page, DATA, true, true);
+
 		set_page_dirty(page);
-		f2fs_wait_on_page_writeback(page, DATA, true);
 		if (clear_page_dirty_for_io(page)) {
 			inode_dec_dirty_pages(inode);
 			f2fs_remove_dirty_inode(inode);
@@ -1093,15 +1121,18 @@
 	struct blk_plug plug;
 	unsigned int segno = start_segno;
 	unsigned int end_segno = start_segno + sbi->segs_per_sec;
-	int seg_freed = 0;
+	int seg_freed = 0, migrated = 0;
 	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
 						SUM_TYPE_DATA : SUM_TYPE_NODE;
 	int submitted = 0;
 
+	if (__is_large_section(sbi))
+		end_segno = rounddown(end_segno, sbi->segs_per_sec);
+
 	/* readahead multi ssa blocks those have contiguous address */
-	if (sbi->segs_per_sec > 1)
+	if (__is_large_section(sbi))
 		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
-					sbi->segs_per_sec, META_SSA, true);
+					end_segno - segno, META_SSA, true);
 
 	/* reference all summary page */
 	while (segno < end_segno) {
@@ -1130,10 +1161,13 @@
 					GET_SUM_BLOCK(sbi, segno));
 		f2fs_put_page(sum_page, 0);
 
-		if (get_valid_blocks(sbi, segno, false) == 0 ||
-				!PageUptodate(sum_page) ||
-				unlikely(f2fs_cp_error(sbi)))
-			goto next;
+		if (get_valid_blocks(sbi, segno, false) == 0)
+			goto freed;
+		if (__is_large_section(sbi) &&
+				migrated >= sbi->migration_granularity)
+			goto skip;
+		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
+			goto skip;
 
 		sum = page_address(sum_page);
 		if (type != GET_SUM_TYPE((&sum->footer))) {
@@ -1141,7 +1175,7 @@
 				"type [%d, %d] in SSA and SIT",
 				segno, type, GET_SUM_TYPE((&sum->footer)));
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
-			goto next;
+			goto skip;
 		}
 
 		/*
@@ -1160,10 +1194,15 @@
 
 		stat_inc_seg_count(sbi, type, gc_type);
 
+freed:
 		if (gc_type == FG_GC &&
 				get_valid_blocks(sbi, segno, false) == 0)
 			seg_freed++;
-next:
+		migrated++;
+
+		if (__is_large_section(sbi) && segno + 1 < end_segno)
+			sbi->next_victim_seg[gc_type] = segno + 1;
+skip:
 		f2fs_put_page(sum_page, 0);
 	}
 
@@ -1307,7 +1346,7 @@
 	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
 
 	/* give warm/cold data area from slower device */
-	if (sbi->s_ndevs && sbi->segs_per_sec == 1)
+	if (sbi->s_ndevs && !__is_large_section(sbi))
 		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
 				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
 }
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 3638927..b8676a2 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -73,7 +73,7 @@
 
 	addr = inline_data_addr(inode, ipage);
 
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 	memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
 	set_page_dirty(ipage);
 
@@ -179,7 +179,7 @@
 	fio.old_blkaddr = dn->data_blkaddr;
 	set_inode_flag(dn->inode, FI_HOT_DATA);
 	f2fs_outplace_write_data(dn, &fio);
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 	if (dirty) {
 		inode_dec_dirty_pages(dn->inode);
 		f2fs_remove_dirty_inode(dn->inode);
@@ -254,7 +254,7 @@
 
 	f2fs_bug_on(F2FS_I_SB(inode), page->index);
 
-	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
 	src_addr = kmap_atomic(page);
 	dst_addr = inline_data_addr(inode, dn.inode_page);
 	memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
@@ -295,7 +295,7 @@
 		ipage = f2fs_get_node_page(sbi, inode->i_ino);
 		f2fs_bug_on(sbi, IS_ERR(ipage));
 
-		f2fs_wait_on_page_writeback(ipage, NODE, true);
+		f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
 		src_addr = inline_data_addr(inode, npage);
 		dst_addr = inline_data_addr(inode, ipage);
@@ -409,7 +409,7 @@
 		goto out;
 	}
 
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 
 	dentry_blk = page_address(page);
 
@@ -519,18 +519,18 @@
 
 	stat_dec_inline_dir(dir);
 	clear_inode_flag(dir, FI_INLINE_DENTRY);
-	kfree(backup_dentry);
+	kvfree(backup_dentry);
 	return 0;
 recover:
 	lock_page(ipage);
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 	memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
 	f2fs_i_depth_write(dir, 0);
 	f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
 	set_page_dirty(ipage);
 	f2fs_put_page(ipage, 1);
 
-	kfree(backup_dentry);
+	kvfree(backup_dentry);
 	return err;
 }
 
@@ -583,7 +583,7 @@
 		}
 	}
 
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
 	name_hash = f2fs_dentry_hash(new_name, NULL);
 	f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
@@ -615,7 +615,7 @@
 	int i;
 
 	lock_page(page);
-	f2fs_wait_on_page_writeback(page, NODE, true);
+	f2fs_wait_on_page_writeback(page, NODE, true, true);
 
 	inline_dentry = inline_data_addr(dir, page);
 	make_dentry_ptr_inline(dir, &d, inline_dentry);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 91ceee0..bec5296 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -103,7 +103,7 @@
 
 	while (start < end) {
 		if (*start++) {
-			f2fs_wait_on_page_writeback(ipage, NODE, true);
+			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
 			set_inode_flag(inode, FI_DATA_EXIST);
 			set_raw_inline(inode, F2FS_INODE(ipage));
@@ -118,7 +118,7 @@
 {
 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
 
-	if (!f2fs_sb_has_inode_chksum(sbi->sb))
+	if (!f2fs_sb_has_inode_chksum(sbi))
 		return false;
 
 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
@@ -218,7 +218,7 @@
 		return false;
 	}
 
-	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
+	if (f2fs_sb_has_flexible_inline_xattr(sbi)
 			&& !f2fs_has_extra_attr(inode)) {
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
 		f2fs_msg(sbi->sb, KERN_WARNING,
@@ -228,7 +228,7 @@
 	}
 
 	if (f2fs_has_extra_attr(inode) &&
-			!f2fs_sb_has_extra_attr(sbi->sb)) {
+			!f2fs_sb_has_extra_attr(sbi)) {
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"%s: inode (ino=%lx) is with extra_attr, "
@@ -340,7 +340,7 @@
 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
 					le16_to_cpu(ri->i_extra_isize) : 0;
 
-	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
+	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
 	} else if (f2fs_has_inline_xattr(inode) ||
 				f2fs_has_inline_dentry(inode)) {
@@ -390,14 +390,14 @@
 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
 		set_inode_flag(inode, FI_PROJ_INHERIT);
 
-	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
+	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
 	else
 		i_projid = F2FS_DEF_PROJID;
 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
 
-	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) &&
+	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
 		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
@@ -497,7 +497,7 @@
 	struct f2fs_inode *ri;
 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
 
-	f2fs_wait_on_page_writeback(node_page, NODE, true);
+	f2fs_wait_on_page_writeback(node_page, NODE, true, true);
 	set_page_dirty(node_page);
 
 	f2fs_inode_synced(inode);
@@ -542,11 +542,11 @@
 	if (f2fs_has_extra_attr(inode)) {
 		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
 
-		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb))
+		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
 			ri->i_inline_xattr_size =
 				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
 
-		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
+		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
 								i_projid)) {
 			projid_t i_projid;
@@ -556,7 +556,7 @@
 			ri->i_projid = cpu_to_le32(i_projid);
 		}
 
-		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) &&
+		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
 								i_crtime)) {
 			ri->i_crtime =
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 99299ed..62d9829 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -61,7 +61,7 @@
 		goto fail;
 	}
 
-	if (f2fs_sb_has_project_quota(sbi->sb) &&
+	if (f2fs_sb_has_project_quota(sbi) &&
 		(F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL))
 		F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid;
 	else
@@ -79,7 +79,7 @@
 				f2fs_may_encrypt(inode))
 		f2fs_set_encrypted_inode(inode);
 
-	if (f2fs_sb_has_extra_attr(sbi->sb)) {
+	if (f2fs_sb_has_extra_attr(sbi)) {
 		set_inode_flag(inode, FI_EXTRA_ATTR);
 		F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
 	}
@@ -92,7 +92,7 @@
 	if (f2fs_may_inline_dentry(inode))
 		set_inode_flag(inode, FI_INLINE_DENTRY);
 
-	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
+	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
 		f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
 		if (f2fs_has_inline_xattr(inode))
 			xattr_size = F2FS_OPTION(sbi).inline_xattr_size;
@@ -635,7 +635,7 @@
 	f2fs_handle_failed_inode(inode);
 out_free_encrypted_link:
 	if (disk_link.name != (unsigned char *)symname)
-		kfree(disk_link.name);
+		kvfree(disk_link.name);
 	return err;
 }
 
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 2b342064..6162d2c 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -826,6 +826,7 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct node_info ni;
 	int err;
+	pgoff_t index;
 
 	err = f2fs_get_node_info(sbi, dn->nid, &ni);
 	if (err)
@@ -845,10 +846,11 @@
 	clear_node_page_dirty(dn->node_page);
 	set_sbi_flag(sbi, SBI_IS_DIRTY);
 
+	index = dn->node_page->index;
 	f2fs_put_page(dn->node_page, 1);
 
 	invalidate_mapping_pages(NODE_MAPPING(sbi),
-			dn->node_page->index, dn->node_page->index);
+			index, index);
 
 	dn->node_page = NULL;
 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
@@ -1104,7 +1106,7 @@
 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
 			lock_page(page);
 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
-			f2fs_wait_on_page_writeback(page, NODE, true);
+			f2fs_wait_on_page_writeback(page, NODE, true, true);
 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
 			set_page_dirty(page);
 			unlock_page(page);
@@ -1232,7 +1234,7 @@
 	new_ni.version = 0;
 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
 
-	f2fs_wait_on_page_writeback(page, NODE, true);
+	f2fs_wait_on_page_writeback(page, NODE, true, true);
 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
 	if (!PageUptodate(page))
@@ -1598,10 +1600,10 @@
 			.for_reclaim = 0,
 		};
 
-		set_page_dirty(node_page);
-		f2fs_wait_on_page_writeback(node_page, NODE, true);
+		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
 
-		f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
+		set_page_dirty(node_page);
+
 		if (!clear_page_dirty_for_io(node_page)) {
 			err = -EAGAIN;
 			goto out_page;
@@ -1689,8 +1691,7 @@
 				goto continue_unlock;
 			}
 
-			f2fs_wait_on_page_writeback(page, NODE, true);
-			BUG_ON(PageWriteback(page));
+			f2fs_wait_on_page_writeback(page, NODE, true, true);
 
 			set_fsync_mark(page, 0);
 			set_dentry_mark(page, 0);
@@ -1741,7 +1742,7 @@
 			"Retry to write fsync mark: ino=%u, idx=%lx",
 					ino, last_page->index);
 		lock_page(last_page);
-		f2fs_wait_on_page_writeback(last_page, NODE, true);
+		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
 		set_page_dirty(last_page);
 		unlock_page(last_page);
 		goto retry;
@@ -1822,9 +1823,8 @@
 				goto lock_node;
 			}
 
-			f2fs_wait_on_page_writeback(page, NODE, true);
+			f2fs_wait_on_page_writeback(page, NODE, true, true);
 
-			BUG_ON(PageWriteback(page));
 			if (!clear_page_dirty_for_io(page))
 				goto continue_unlock;
 
@@ -1891,7 +1891,7 @@
 		get_page(page);
 		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
 
-		f2fs_wait_on_page_writeback(page, NODE, true);
+		f2fs_wait_on_page_writeback(page, NODE, true, false);
 		if (TestClearPageError(page))
 			ret = -EIO;
 
@@ -2469,7 +2469,7 @@
 	src_addr = inline_xattr_addr(inode, page);
 	inline_size = inline_xattr_size(inode);
 
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 	memcpy(dst_addr, src_addr, inline_size);
 update_inode:
 	f2fs_update_inode(inode, ipage);
@@ -2563,17 +2563,17 @@
 	if (dst->i_inline & F2FS_EXTRA_ATTR) {
 		dst->i_extra_isize = src->i_extra_isize;
 
-		if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) &&
+		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
 							i_inline_xattr_size))
 			dst->i_inline_xattr_size = src->i_inline_xattr_size;
 
-		if (f2fs_sb_has_project_quota(sbi->sb) &&
+		if (f2fs_sb_has_project_quota(sbi) &&
 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
 								i_projid))
 			dst->i_projid = src->i_projid;
 
-		if (f2fs_sb_has_inode_crtime(sbi->sb) &&
+		if (f2fs_sb_has_inode_crtime(sbi) &&
 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
 							i_crtime_nsec)) {
 			dst->i_crtime = src->i_crtime;
@@ -3115,17 +3115,17 @@
 
 		for (i = 0; i < nm_i->nat_blocks; i++)
 			kvfree(nm_i->free_nid_bitmap[i]);
-		kfree(nm_i->free_nid_bitmap);
+		kvfree(nm_i->free_nid_bitmap);
 	}
 	kvfree(nm_i->free_nid_count);
 
-	kfree(nm_i->nat_bitmap);
-	kfree(nm_i->nat_bits);
+	kvfree(nm_i->nat_bitmap);
+	kvfree(nm_i->nat_bits);
 #ifdef CONFIG_F2FS_CHECK_FS
-	kfree(nm_i->nat_bitmap_mir);
+	kvfree(nm_i->nat_bitmap_mir);
 #endif
 	sbi->nm_info = NULL;
-	kfree(nm_i);
+	kvfree(nm_i);
 }
 
 int __init f2fs_create_node_manager_caches(void)
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 1c73d87..e05af5d 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -361,7 +361,7 @@
 {
 	struct f2fs_node *rn = F2FS_NODE(p);
 
-	f2fs_wait_on_page_writeback(p, NODE, true);
+	f2fs_wait_on_page_writeback(p, NODE, true, true);
 
 	if (i)
 		rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 1dfb17f..e3883db 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -250,7 +250,7 @@
 	i_gid_write(inode, le32_to_cpu(raw->i_gid));
 
 	if (raw->i_inline & F2FS_EXTRA_ATTR) {
-		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
+		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
 			F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
 								i_projid)) {
 			projid_t i_projid;
@@ -539,7 +539,7 @@
 		goto out;
 	}
 
-	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
 
 	err = f2fs_get_node_info(sbi, dn.nid, &ni);
 	if (err)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 6edcf83..9b79056 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -229,7 +229,7 @@
 
 		lock_page(page);
 
-		f2fs_wait_on_page_writeback(page, DATA, true);
+		f2fs_wait_on_page_writeback(page, DATA, true, true);
 
 		if (recover) {
 			struct dnode_of_data dn;
@@ -387,8 +387,9 @@
 		if (page->mapping == inode->i_mapping) {
 			trace_f2fs_commit_inmem_page(page, INMEM);
 
+			f2fs_wait_on_page_writeback(page, DATA, true, true);
+
 			set_page_dirty(page);
-			f2fs_wait_on_page_writeback(page, DATA, true);
 			if (clear_page_dirty_for_io(page)) {
 				inode_dec_dirty_pages(inode);
 				f2fs_remove_dirty_inode(inode);
@@ -620,14 +621,16 @@
 		return 0;
 
 	if (!test_opt(sbi, FLUSH_MERGE)) {
+		atomic_inc(&fcc->queued_flush);
 		ret = submit_flush_wait(sbi, ino);
+		atomic_dec(&fcc->queued_flush);
 		atomic_inc(&fcc->issued_flush);
 		return ret;
 	}
 
-	if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) {
+	if (atomic_inc_return(&fcc->queued_flush) == 1 || sbi->s_ndevs > 1) {
 		ret = submit_flush_wait(sbi, ino);
-		atomic_dec(&fcc->issing_flush);
+		atomic_dec(&fcc->queued_flush);
 
 		atomic_inc(&fcc->issued_flush);
 		return ret;
@@ -646,14 +649,14 @@
 
 	if (fcc->f2fs_issue_flush) {
 		wait_for_completion(&cmd.wait);
-		atomic_dec(&fcc->issing_flush);
+		atomic_dec(&fcc->queued_flush);
 	} else {
 		struct llist_node *list;
 
 		list = llist_del_all(&fcc->issue_list);
 		if (!list) {
 			wait_for_completion(&cmd.wait);
-			atomic_dec(&fcc->issing_flush);
+			atomic_dec(&fcc->queued_flush);
 		} else {
 			struct flush_cmd *tmp, *next;
 
@@ -662,7 +665,7 @@
 			llist_for_each_entry_safe(tmp, next, list, llnode) {
 				if (tmp == &cmd) {
 					cmd.ret = ret;
-					atomic_dec(&fcc->issing_flush);
+					atomic_dec(&fcc->queued_flush);
 					continue;
 				}
 				tmp->ret = ret;
@@ -691,7 +694,7 @@
 	if (!fcc)
 		return -ENOMEM;
 	atomic_set(&fcc->issued_flush, 0);
-	atomic_set(&fcc->issing_flush, 0);
+	atomic_set(&fcc->queued_flush, 0);
 	init_waitqueue_head(&fcc->flush_wait_queue);
 	init_llist_head(&fcc->issue_list);
 	SM_I(sbi)->fcc_info = fcc;
@@ -703,7 +706,7 @@
 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
 	if (IS_ERR(fcc->f2fs_issue_flush)) {
 		err = PTR_ERR(fcc->f2fs_issue_flush);
-		kfree(fcc);
+		kvfree(fcc);
 		SM_I(sbi)->fcc_info = NULL;
 		return err;
 	}
@@ -722,7 +725,7 @@
 		kthread_stop(flush_thread);
 	}
 	if (free) {
-		kfree(fcc);
+		kvfree(fcc);
 		SM_I(sbi)->fcc_info = NULL;
 	}
 }
@@ -907,7 +910,7 @@
 	dc->len = len;
 	dc->ref = 0;
 	dc->state = D_PREP;
-	dc->issuing = 0;
+	dc->queued = 0;
 	dc->error = 0;
 	init_completion(&dc->wait);
 	list_add_tail(&dc->list, pend_list);
@@ -940,7 +943,7 @@
 							struct discard_cmd *dc)
 {
 	if (dc->state == D_DONE)
-		atomic_sub(dc->issuing, &dcc->issing_discard);
+		atomic_sub(dc->queued, &dcc->queued_discard);
 
 	list_del(&dc->list);
 	rb_erase_cached(&dc->rb_node, &dcc->root);
@@ -1143,12 +1146,12 @@
 		dc->bio_ref++;
 		spin_unlock_irqrestore(&dc->lock, flags);
 
-		atomic_inc(&dcc->issing_discard);
-		dc->issuing++;
+		atomic_inc(&dcc->queued_discard);
+		dc->queued++;
 		list_move_tail(&dc->list, wait_list);
 
 		/* sanity check on discard range */
-		__check_sit_bitmap(sbi, start, start + len);
+		__check_sit_bitmap(sbi, lstart, lstart + len);
 
 		bio->bi_private = dc;
 		bio->bi_end_io = f2fs_submit_discard_endio;
@@ -1649,6 +1652,10 @@
 		if (dcc->discard_wake)
 			dcc->discard_wake = 0;
 
+		/* clean up pending candidates before going to sleep */
+		if (atomic_read(&dcc->queued_discard))
+			__wait_all_discard_cmd(sbi, NULL);
+
 		if (try_to_freeze())
 			continue;
 		if (f2fs_readonly(sbi->sb))
@@ -1734,7 +1741,7 @@
 		struct block_device *bdev, block_t blkstart, block_t blklen)
 {
 #ifdef CONFIG_BLK_DEV_ZONED
-	if (f2fs_sb_has_blkzoned(sbi->sb) &&
+	if (f2fs_sb_has_blkzoned(sbi) &&
 				bdev_zoned_model(bdev) != BLK_ZONED_NONE)
 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
 #endif
@@ -1882,7 +1889,7 @@
 	unsigned int start = 0, end = -1;
 	unsigned int secno, start_segno;
 	bool force = (cpc->reason & CP_DISCARD);
-	bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
+	bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
 
 	mutex_lock(&dirty_i->seglist_lock);
 
@@ -1914,7 +1921,7 @@
 					(end - 1) <= cpc->trim_end)
 				continue;
 
-		if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+		if (!test_opt(sbi, LFS) || !__is_large_section(sbi)) {
 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
 				(end - start) << sbi->log_blocks_per_seg);
 			continue;
@@ -1946,7 +1953,7 @@
 					sbi->blocks_per_seg, cur_pos);
 			len = next_pos - cur_pos;
 
-			if (f2fs_sb_has_blkzoned(sbi->sb) ||
+			if (f2fs_sb_has_blkzoned(sbi) ||
 			    (force && len < cpc->trim_minlen))
 				goto skip;
 
@@ -1994,7 +2001,7 @@
 	INIT_LIST_HEAD(&dcc->fstrim_list);
 	mutex_init(&dcc->cmd_lock);
 	atomic_set(&dcc->issued_discard, 0);
-	atomic_set(&dcc->issing_discard, 0);
+	atomic_set(&dcc->queued_discard, 0);
 	atomic_set(&dcc->discard_cmd_cnt, 0);
 	dcc->nr_discards = 0;
 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
@@ -2010,7 +2017,7 @@
 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
 	if (IS_ERR(dcc->f2fs_issue_discard)) {
 		err = PTR_ERR(dcc->f2fs_issue_discard);
-		kfree(dcc);
+		kvfree(dcc);
 		SM_I(sbi)->dcc_info = NULL;
 		return err;
 	}
@@ -2027,7 +2034,7 @@
 
 	f2fs_stop_discard_thread(sbi);
 
-	kfree(dcc);
+	kvfree(dcc);
 	SM_I(sbi)->dcc_info = NULL;
 }
 
@@ -2146,7 +2153,7 @@
 	/* update total number of valid blocks to be written in ckpt area */
 	SIT_I(sbi)->written_valid_blocks += del;
 
-	if (sbi->segs_per_sec > 1)
+	if (__is_large_section(sbi))
 		get_sec_entry(sbi, segno)->valid_blocks += del;
 }
 
@@ -2412,7 +2419,7 @@
 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
 {
 	/* if segs_per_sec is large than 1, we need to keep original policy. */
-	if (sbi->segs_per_sec != 1)
+	if (__is_large_section(sbi))
 		return CURSEG_I(sbi, type)->segno;
 
 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
@@ -2722,7 +2729,7 @@
 	struct discard_policy dpolicy;
 	unsigned long long trimmed = 0;
 	int err = 0;
-	bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
+	bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
 
 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
 		return -EINVAL;
@@ -3272,16 +3279,18 @@
 }
 
 void f2fs_wait_on_page_writeback(struct page *page,
-				enum page_type type, bool ordered)
+				enum page_type type, bool ordered, bool locked)
 {
 	if (PageWriteback(page)) {
 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 
 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
-		if (ordered)
+		if (ordered) {
 			wait_on_page_writeback(page);
-		else
+			f2fs_bug_on(sbi, locked && PageWriteback(page));
+		} else {
 			wait_for_stable_page(page);
+		}
 	}
 }
 
@@ -3298,7 +3307,7 @@
 
 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
 	if (cpage) {
-		f2fs_wait_on_page_writeback(cpage, DATA, true);
+		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
 		f2fs_put_page(cpage, 1);
 	}
 }
@@ -3880,7 +3889,7 @@
 	if (!sit_i->tmp_map)
 		return -ENOMEM;
 
-	if (sbi->segs_per_sec > 1) {
+	if (__is_large_section(sbi)) {
 		sit_i->sec_entries =
 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
 						      MAIN_SECS(sbi)),
@@ -4035,7 +4044,7 @@
 					se->valid_blocks;
 			}
 
-			if (sbi->segs_per_sec > 1)
+			if (__is_large_section(sbi))
 				get_sec_entry(sbi, start)->valid_blocks +=
 							se->valid_blocks;
 		}
@@ -4079,7 +4088,7 @@
 			sbi->discard_blks -= se->valid_blocks;
 		}
 
-		if (sbi->segs_per_sec > 1) {
+		if (__is_large_section(sbi)) {
 			get_sec_entry(sbi, start)->valid_blocks +=
 							se->valid_blocks;
 			get_sec_entry(sbi, start)->valid_blocks -=
@@ -4314,7 +4323,7 @@
 
 	destroy_victim_secmap(sbi);
 	SM_I(sbi)->dirty_info = NULL;
-	kfree(dirty_i);
+	kvfree(dirty_i);
 }
 
 static void destroy_curseg(struct f2fs_sb_info *sbi)
@@ -4326,10 +4335,10 @@
 		return;
 	SM_I(sbi)->curseg_array = NULL;
 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
-		kfree(array[i].sum_blk);
-		kfree(array[i].journal);
+		kvfree(array[i].sum_blk);
+		kvfree(array[i].journal);
 	}
-	kfree(array);
+	kvfree(array);
 }
 
 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
@@ -4340,7 +4349,7 @@
 	SM_I(sbi)->free_info = NULL;
 	kvfree(free_i->free_segmap);
 	kvfree(free_i->free_secmap);
-	kfree(free_i);
+	kvfree(free_i);
 }
 
 static void destroy_sit_info(struct f2fs_sb_info *sbi)
@@ -4353,26 +4362,26 @@
 
 	if (sit_i->sentries) {
 		for (start = 0; start < MAIN_SEGS(sbi); start++) {
-			kfree(sit_i->sentries[start].cur_valid_map);
+			kvfree(sit_i->sentries[start].cur_valid_map);
 #ifdef CONFIG_F2FS_CHECK_FS
-			kfree(sit_i->sentries[start].cur_valid_map_mir);
+			kvfree(sit_i->sentries[start].cur_valid_map_mir);
 #endif
-			kfree(sit_i->sentries[start].ckpt_valid_map);
-			kfree(sit_i->sentries[start].discard_map);
+			kvfree(sit_i->sentries[start].ckpt_valid_map);
+			kvfree(sit_i->sentries[start].discard_map);
 		}
 	}
-	kfree(sit_i->tmp_map);
+	kvfree(sit_i->tmp_map);
 
 	kvfree(sit_i->sentries);
 	kvfree(sit_i->sec_entries);
 	kvfree(sit_i->dirty_sentries_bitmap);
 
 	SM_I(sbi)->sit_info = NULL;
-	kfree(sit_i->sit_bitmap);
+	kvfree(sit_i->sit_bitmap);
 #ifdef CONFIG_F2FS_CHECK_FS
-	kfree(sit_i->sit_bitmap_mir);
+	kvfree(sit_i->sit_bitmap_mir);
 #endif
-	kfree(sit_i);
+	kvfree(sit_i);
 }
 
 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
@@ -4388,7 +4397,7 @@
 	destroy_free_segmap(sbi);
 	destroy_sit_info(sbi);
 	sbi->sm_info = NULL;
-	kfree(sm_info);
+	kvfree(sm_info);
 }
 
 int __init f2fs_create_segment_manager_caches(void)
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index ab3465f..a77f76f 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -333,7 +333,7 @@
 	 * In order to get # of valid blocks in a section instantly from many
 	 * segments, f2fs manages two counting structures separately.
 	 */
-	if (use_section && sbi->segs_per_sec > 1)
+	if (use_section && __is_large_section(sbi))
 		return get_sec_entry(sbi, segno)->valid_blocks;
 	else
 		return get_seg_entry(sbi, segno)->valid_blocks;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 9e13db9..a467aca 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -135,6 +135,6 @@
 	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
 
 	spin_lock(&f2fs_list_lock);
-	list_del(&sbi->s_list);
+	list_del_init(&sbi->s_list);
 	spin_unlock(&f2fs_list_lock);
 }
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index af58b2c..db8f2f9 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -38,7 +38,7 @@
 
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 
-char *f2fs_fault_name[FAULT_MAX] = {
+const char *f2fs_fault_name[FAULT_MAX] = {
 	[FAULT_KMALLOC]		= "kmalloc",
 	[FAULT_KVMALLOC]	= "kvmalloc",
 	[FAULT_PAGE_ALLOC]	= "page alloc",
@@ -259,7 +259,7 @@
 			"quota options when quota turned on");
 		return -EINVAL;
 	}
-	if (f2fs_sb_has_quota_ino(sb)) {
+	if (f2fs_sb_has_quota_ino(sbi)) {
 		f2fs_msg(sb, KERN_INFO,
 			"QUOTA feature is enabled, so ignore qf_name");
 		return 0;
@@ -289,7 +289,7 @@
 	set_opt(sbi, QUOTA);
 	return 0;
 errout:
-	kfree(qname);
+	kvfree(qname);
 	return ret;
 }
 
@@ -302,7 +302,7 @@
 			" when quota turned on");
 		return -EINVAL;
 	}
-	kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
+	kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
 	F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
 	return 0;
 }
@@ -314,7 +314,7 @@
 	 * 'grpquota' mount options are allowed even without quota feature
 	 * to support legacy quotas in quota files.
 	 */
-	if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
+	if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
 		f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
 			 "Cannot enable project quota enforcement.");
 		return -1;
@@ -348,7 +348,7 @@
 		}
 	}
 
-	if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) {
+	if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
 		f2fs_msg(sbi->sb, KERN_INFO,
 			"QUOTA feature is enabled, so ignore jquota_fmt");
 		F2FS_OPTION(sbi).s_jquota_fmt = 0;
@@ -399,10 +399,10 @@
 				set_opt(sbi, BG_GC);
 				set_opt(sbi, FORCE_FG_GC);
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_disable_roll_forward:
 			set_opt(sbi, DISABLE_ROLL_FORWARD);
@@ -417,7 +417,7 @@
 			set_opt(sbi, DISCARD);
 			break;
 		case Opt_nodiscard:
-			if (f2fs_sb_has_blkzoned(sb)) {
+			if (f2fs_sb_has_blkzoned(sbi)) {
 				f2fs_msg(sb, KERN_WARNING,
 					"discard is required for zoned block devices");
 				return -EINVAL;
@@ -566,11 +566,11 @@
 				return -ENOMEM;
 			if (strlen(name) == 8 &&
 					!strncmp(name, "adaptive", 8)) {
-				if (f2fs_sb_has_blkzoned(sb)) {
+				if (f2fs_sb_has_blkzoned(sbi)) {
 					f2fs_msg(sb, KERN_WARNING,
 						 "adaptive mode is not allowed with "
 						 "zoned block device feature");
-					kfree(name);
+					kvfree(name);
 					return -EINVAL;
 				}
 				set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
@@ -578,10 +578,10 @@
 					!strncmp(name, "lfs", 3)) {
 				set_opt_mode(sbi, F2FS_MOUNT_LFS);
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_io_size_bits:
 			if (args->from && match_int(args, &arg))
@@ -714,10 +714,10 @@
 					!strncmp(name, "fs-based", 8)) {
 				F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_alloc:
 			name = match_strdup(&args[0]);
@@ -731,10 +731,10 @@
 					!strncmp(name, "reuse", 5)) {
 				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_fsync:
 			name = match_strdup(&args[0]);
@@ -751,14 +751,14 @@
 				F2FS_OPTION(sbi).fsync_mode =
 							FSYNC_MODE_NOBARRIER;
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_test_dummy_encryption:
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
-			if (!f2fs_sb_has_encrypt(sb)) {
+			if (!f2fs_sb_has_encrypt(sbi)) {
 				f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
 				return -EINVAL;
 			}
@@ -783,10 +783,10 @@
 					!strncmp(name, "disable", 7)) {
 				set_opt(sbi, DISABLE_CHECKPOINT);
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		default:
 			f2fs_msg(sb, KERN_ERR,
@@ -799,13 +799,13 @@
 	if (f2fs_check_quota_options(sbi))
 		return -EINVAL;
 #else
-	if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
 		f2fs_msg(sbi->sb, KERN_INFO,
 			 "Filesystem with quota feature cannot be mounted RDWR "
 			 "without CONFIG_QUOTA");
 		return -EINVAL;
 	}
-	if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+	if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
 		f2fs_msg(sb, KERN_ERR,
 			"Filesystem with project quota feature cannot be "
 			"mounted RDWR without CONFIG_QUOTA");
@@ -821,8 +821,8 @@
 	}
 
 	if (test_opt(sbi, INLINE_XATTR_SIZE)) {
-		if (!f2fs_sb_has_extra_attr(sb) ||
-			!f2fs_sb_has_flexible_inline_xattr(sb)) {
+		if (!f2fs_sb_has_extra_attr(sbi) ||
+			!f2fs_sb_has_flexible_inline_xattr(sbi)) {
 			f2fs_msg(sb, KERN_ERR,
 					"extra_attr or flexible_inline_xattr "
 					"feature is off");
@@ -1017,10 +1017,10 @@
 	for (i = 0; i < sbi->s_ndevs; i++) {
 		blkdev_put(FDEV(i).bdev, FMODE_EXCL);
 #ifdef CONFIG_BLK_DEV_ZONED
-		kfree(FDEV(i).blkz_type);
+		kvfree(FDEV(i).blkz_type);
 #endif
 	}
-	kfree(sbi->devs);
+	kvfree(sbi->devs);
 }
 
 static void f2fs_put_super(struct super_block *sb)
@@ -1058,9 +1058,6 @@
 		f2fs_write_checkpoint(sbi, &cpc);
 	}
 
-	/* f2fs_write_checkpoint can update stat informaion */
-	f2fs_destroy_stats(sbi);
-
 	/*
 	 * normally superblock is clean, so we need to release this.
 	 * In addition, EIO will skip do checkpoint, we need this as well.
@@ -1078,31 +1075,40 @@
 	f2fs_bug_on(sbi, sbi->fsync_node_num);
 
 	iput(sbi->node_inode);
+	sbi->node_inode = NULL;
+
 	iput(sbi->meta_inode);
+	sbi->meta_inode = NULL;
+
+	/*
+	 * iput() can update stat information, if f2fs_write_checkpoint()
+	 * above failed with error.
+	 */
+	f2fs_destroy_stats(sbi);
 
 	/* destroy f2fs internal modules */
 	f2fs_destroy_node_manager(sbi);
 	f2fs_destroy_segment_manager(sbi);
 
-	kfree(sbi->ckpt);
+	kvfree(sbi->ckpt);
 
 	f2fs_unregister_sysfs(sbi);
 
 	sb->s_fs_info = NULL;
 	if (sbi->s_chksum_driver)
 		crypto_free_shash(sbi->s_chksum_driver);
-	kfree(sbi->raw_super);
+	kvfree(sbi->raw_super);
 
 	destroy_device_list(sbi);
 	mempool_destroy(sbi->write_io_dummy);
 #ifdef CONFIG_QUOTA
 	for (i = 0; i < MAXQUOTAS; i++)
-		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+		kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
 #endif
 	destroy_percpu_info(sbi);
 	for (i = 0; i < NR_PAGE_TYPE; i++)
-		kfree(sbi->write_io[i]);
-	kfree(sbi);
+		kvfree(sbi->write_io[i]);
+	kvfree(sbi);
 }
 
 int f2fs_sync_fs(struct super_block *sb, int sync)
@@ -1431,7 +1437,7 @@
 	sbi->sb->s_flags |= SB_LAZYTIME;
 	set_opt(sbi, FLUSH_MERGE);
 	set_opt(sbi, DISCARD);
-	if (f2fs_sb_has_blkzoned(sbi->sb))
+	if (f2fs_sb_has_blkzoned(sbi))
 		set_opt_mode(sbi, F2FS_MOUNT_LFS);
 	else
 		set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
@@ -1457,19 +1463,16 @@
 
 	sbi->sb->s_flags |= SB_ACTIVE;
 
-	mutex_lock(&sbi->gc_mutex);
 	f2fs_update_time(sbi, DISABLE_TIME);
 
 	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+		mutex_lock(&sbi->gc_mutex);
 		err = f2fs_gc(sbi, true, false, NULL_SEGNO);
 		if (err == -ENODATA)
 			break;
-		if (err && err != -EAGAIN) {
-			mutex_unlock(&sbi->gc_mutex);
+		if (err && err != -EAGAIN)
 			return err;
-		}
 	}
-	mutex_unlock(&sbi->gc_mutex);
 
 	err = sync_filesystem(sbi->sb);
 	if (err)
@@ -1531,7 +1534,7 @@
 				GFP_KERNEL);
 			if (!org_mount_opt.s_qf_names[i]) {
 				for (j = 0; j < i; j++)
-					kfree(org_mount_opt.s_qf_names[j]);
+					kvfree(org_mount_opt.s_qf_names[j]);
 				return -ENOMEM;
 			}
 		} else {
@@ -1575,7 +1578,7 @@
 		sb->s_flags &= ~SB_RDONLY;
 		if (sb_any_quota_suspended(sb)) {
 			dquot_resume(sb, -1);
-		} else if (f2fs_sb_has_quota_ino(sb)) {
+		} else if (f2fs_sb_has_quota_ino(sbi)) {
 			err = f2fs_enable_quotas(sb);
 			if (err)
 				goto restore_opts;
@@ -1651,7 +1654,7 @@
 #ifdef CONFIG_QUOTA
 	/* Release old quota file names */
 	for (i = 0; i < MAXQUOTAS; i++)
-		kfree(org_mount_opt.s_qf_names[i]);
+		kvfree(org_mount_opt.s_qf_names[i]);
 #endif
 	/* Update the POSIXACL Flag */
 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
@@ -1672,7 +1675,7 @@
 #ifdef CONFIG_QUOTA
 	F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
 	for (i = 0; i < MAXQUOTAS; i++) {
-		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+		kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
 		F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
 	}
 #endif
@@ -1817,7 +1820,7 @@
 	int enabled = 0;
 	int i, err;
 
-	if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) {
+	if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
 		err = f2fs_enable_quotas(sbi->sb);
 		if (err) {
 			f2fs_msg(sbi->sb, KERN_ERR,
@@ -1848,7 +1851,7 @@
 	unsigned long qf_inum;
 	int err;
 
-	BUG_ON(!f2fs_sb_has_quota_ino(sb));
+	BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
 
 	qf_inum = f2fs_qf_ino(sb, type);
 	if (!qf_inum)
@@ -1993,7 +1996,7 @@
 		goto out_put;
 
 	err = dquot_quota_off(sb, type);
-	if (err || f2fs_sb_has_quota_ino(sb))
+	if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
 		goto out_put;
 
 	inode_lock(inode);
@@ -2173,7 +2176,7 @@
 	 * if LOST_FOUND feature is enabled.
 	 *
 	 */
-	if (f2fs_sb_has_lost_found(sbi->sb) &&
+	if (f2fs_sb_has_lost_found(sbi) &&
 			inode->i_ino == F2FS_ROOT_INO(sbi))
 		return -EPERM;
 
@@ -2187,6 +2190,11 @@
 	return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
 }
 
+static inline bool f2fs_is_encrypted(struct inode *inode)
+{
+	return f2fs_encrypted_file(inode);
+}
+
 static const struct fscrypt_operations f2fs_cryptops = {
 	.key_prefix	= "f2fs:",
 	.get_context	= f2fs_get_context,
@@ -2194,6 +2202,7 @@
 	.dummy_context	= f2fs_dummy_context,
 	.empty_dir	= f2fs_empty_dir,
 	.max_namelen	= F2FS_NAME_LEN,
+	.is_encrypted	= f2fs_is_encrypted,
 };
 #endif
 
@@ -2396,7 +2405,7 @@
 	__u32 crc = 0;
 
 	/* Check checksum_offset and crc in superblock */
-	if (le32_to_cpu(raw_super->feature) & F2FS_FEATURE_SB_CHKSUM) {
+	if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
 		crc_offset = le32_to_cpu(raw_super->checksum_offset);
 		if (crc_offset !=
 			offsetof(struct f2fs_super_block, crc)) {
@@ -2496,10 +2505,10 @@
 		return 1;
 	}
 
-	if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
+	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
 		f2fs_msg(sb, KERN_INFO,
-			"Wrong segment_count / block_count (%u > %u)",
-			segment_count, le32_to_cpu(raw_super->block_count));
+			"Wrong segment_count / block_count (%u > %llu)",
+			segment_count, le64_to_cpu(raw_super->block_count));
 		return 1;
 	}
 
@@ -2674,7 +2683,7 @@
 static void init_sb_info(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_super_block *raw_super = sbi->raw_super;
-	int i, j;
+	int i;
 
 	sbi->log_sectors_per_block =
 		le32_to_cpu(raw_super->log_sectors_per_block);
@@ -2692,7 +2701,10 @@
 	sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
 	sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
 	sbi->cur_victim_sec = NULL_SECNO;
+	sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
+	sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
 	sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
+	sbi->migration_granularity = sbi->segs_per_sec;
 
 	sbi->dir_level = DEF_DIR_LEVEL;
 	sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
@@ -2710,9 +2722,6 @@
 
 	INIT_LIST_HEAD(&sbi->s_list);
 	mutex_init(&sbi->umount_mutex);
-	for (i = 0; i < NR_PAGE_TYPE - 1; i++)
-		for (j = HOT; j < NR_TEMP_TYPE; j++)
-			mutex_init(&sbi->wio_mutex[i][j]);
 	init_rwsem(&sbi->io_order_lock);
 	spin_lock_init(&sbi->cp_lock);
 
@@ -2749,7 +2758,7 @@
 	unsigned int n = 0;
 	int err = -EIO;
 
-	if (!f2fs_sb_has_blkzoned(sbi->sb))
+	if (!f2fs_sb_has_blkzoned(sbi))
 		return 0;
 
 	if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
@@ -2800,7 +2809,7 @@
 		}
 	}
 
-	kfree(zones);
+	kvfree(zones);
 
 	return err;
 }
@@ -2860,7 +2869,7 @@
 
 	/* No valid superblock */
 	if (!*raw_super)
-		kfree(super);
+		kvfree(super);
 	else
 		err = 0;
 
@@ -2880,7 +2889,7 @@
 	}
 
 	/* we should update superblock crc here */
-	if (!recover && f2fs_sb_has_sb_chksum(sbi->sb)) {
+	if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
 		crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
 				offsetof(struct f2fs_super_block, crc));
 		F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
@@ -2968,7 +2977,7 @@
 
 #ifdef CONFIG_BLK_DEV_ZONED
 		if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
-				!f2fs_sb_has_blkzoned(sbi->sb)) {
+				!f2fs_sb_has_blkzoned(sbi)) {
 			f2fs_msg(sbi->sb, KERN_ERR,
 				"Zoned block device feature not enabled\n");
 			return -EINVAL;
@@ -3064,7 +3073,7 @@
 	sbi->raw_super = raw_super;
 
 	/* precompute checksum seed for metadata */
-	if (f2fs_sb_has_inode_chksum(sb))
+	if (f2fs_sb_has_inode_chksum(sbi))
 		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
 						sizeof(raw_super->uuid));
 
@@ -3074,7 +3083,7 @@
 	 * devices, but mandatory for host-managed zoned block devices.
 	 */
 #ifndef CONFIG_BLK_DEV_ZONED
-	if (f2fs_sb_has_blkzoned(sb)) {
+	if (f2fs_sb_has_blkzoned(sbi)) {
 		f2fs_msg(sb, KERN_ERR,
 			 "Zoned block device support is not enabled\n");
 		err = -EOPNOTSUPP;
@@ -3101,13 +3110,13 @@
 
 #ifdef CONFIG_QUOTA
 	sb->dq_op = &f2fs_quota_operations;
-	if (f2fs_sb_has_quota_ino(sb))
+	if (f2fs_sb_has_quota_ino(sbi))
 		sb->s_qcop = &dquot_quotactl_sysfile_ops;
 	else
 		sb->s_qcop = &f2fs_quotactl_ops;
 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
 
-	if (f2fs_sb_has_quota_ino(sbi->sb)) {
+	if (f2fs_sb_has_quota_ino(sbi)) {
 		for (i = 0; i < MAXQUOTAS; i++) {
 			if (f2fs_qf_ino(sbi->sb, i))
 				sbi->nquota_files++;
@@ -3259,30 +3268,30 @@
 
 	f2fs_build_gc_manager(sbi);
 
+	err = f2fs_build_stats(sbi);
+	if (err)
+		goto free_nm;
+
 	/* get an inode for node space */
 	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
 	if (IS_ERR(sbi->node_inode)) {
 		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
 		err = PTR_ERR(sbi->node_inode);
-		goto free_nm;
+		goto free_stats;
 	}
 
-	err = f2fs_build_stats(sbi);
-	if (err)
-		goto free_node_inode;
-
 	/* read root inode and dentry */
 	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
 	if (IS_ERR(root)) {
 		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
 		err = PTR_ERR(root);
-		goto free_stats;
+		goto free_node_inode;
 	}
 	if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
 			!root->i_size || !root->i_nlink) {
 		iput(root);
 		err = -EINVAL;
-		goto free_stats;
+		goto free_node_inode;
 	}
 
 	sb->s_root = d_make_root(root); /* allocate root dentry */
@@ -3297,7 +3306,7 @@
 
 #ifdef CONFIG_QUOTA
 	/* Enable quota usage during mount */
-	if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
+	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
 		err = f2fs_enable_quotas(sb);
 		if (err)
 			f2fs_msg(sb, KERN_ERR,
@@ -3369,7 +3378,7 @@
 		if (err)
 			goto free_meta;
 	}
-	kfree(options);
+	kvfree(options);
 
 	/* recover broken superblock */
 	if (recovery) {
@@ -3392,7 +3401,7 @@
 free_meta:
 #ifdef CONFIG_QUOTA
 	f2fs_truncate_quota_inode_pages(sb);
-	if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
+	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
 		f2fs_quota_off_umount(sbi->sb);
 #endif
 	/*
@@ -3406,41 +3415,43 @@
 free_root_inode:
 	dput(sb->s_root);
 	sb->s_root = NULL;
-free_stats:
-	f2fs_destroy_stats(sbi);
 free_node_inode:
 	f2fs_release_ino_entry(sbi, true);
 	truncate_inode_pages_final(NODE_MAPPING(sbi));
 	iput(sbi->node_inode);
+	sbi->node_inode = NULL;
+free_stats:
+	f2fs_destroy_stats(sbi);
 free_nm:
 	f2fs_destroy_node_manager(sbi);
 free_sm:
 	f2fs_destroy_segment_manager(sbi);
 free_devices:
 	destroy_device_list(sbi);
-	kfree(sbi->ckpt);
+	kvfree(sbi->ckpt);
 free_meta_inode:
 	make_bad_inode(sbi->meta_inode);
 	iput(sbi->meta_inode);
+	sbi->meta_inode = NULL;
 free_io_dummy:
 	mempool_destroy(sbi->write_io_dummy);
 free_percpu:
 	destroy_percpu_info(sbi);
 free_bio_info:
 	for (i = 0; i < NR_PAGE_TYPE; i++)
-		kfree(sbi->write_io[i]);
+		kvfree(sbi->write_io[i]);
 free_options:
 #ifdef CONFIG_QUOTA
 	for (i = 0; i < MAXQUOTAS; i++)
-		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+		kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
 #endif
-	kfree(options);
+	kvfree(options);
 free_sb_buf:
-	kfree(raw_super);
+	kvfree(raw_super);
 free_sbi:
 	if (sbi->s_chksum_driver)
 		crypto_free_shash(sbi->s_chksum_driver);
-	kfree(sbi);
+	kvfree(sbi);
 
 	/* give only one another chance */
 	if (retry) {
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index b777cbd..0575edb 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -90,34 +90,34 @@
 	if (!sb->s_bdev->bd_part)
 		return snprintf(buf, PAGE_SIZE, "0\n");
 
-	if (f2fs_sb_has_encrypt(sb))
+	if (f2fs_sb_has_encrypt(sbi))
 		len += snprintf(buf, PAGE_SIZE - len, "%s",
 						"encryption");
-	if (f2fs_sb_has_blkzoned(sb))
+	if (f2fs_sb_has_blkzoned(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "blkzoned");
-	if (f2fs_sb_has_extra_attr(sb))
+	if (f2fs_sb_has_extra_attr(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "extra_attr");
-	if (f2fs_sb_has_project_quota(sb))
+	if (f2fs_sb_has_project_quota(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "projquota");
-	if (f2fs_sb_has_inode_chksum(sb))
+	if (f2fs_sb_has_inode_chksum(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "inode_checksum");
-	if (f2fs_sb_has_flexible_inline_xattr(sb))
+	if (f2fs_sb_has_flexible_inline_xattr(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "flexible_inline_xattr");
-	if (f2fs_sb_has_quota_ino(sb))
+	if (f2fs_sb_has_quota_ino(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "quota_ino");
-	if (f2fs_sb_has_inode_crtime(sb))
+	if (f2fs_sb_has_inode_crtime(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "inode_crtime");
-	if (f2fs_sb_has_lost_found(sb))
+	if (f2fs_sb_has_lost_found(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "lost_found");
-	if (f2fs_sb_has_sb_chksum(sb))
+	if (f2fs_sb_has_sb_chksum(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "sb_checksum");
 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -246,6 +246,11 @@
 		return count;
 	}
 
+	if (!strcmp(a->attr.name, "migration_granularity")) {
+		if (t == 0 || t > sbi->segs_per_sec)
+			return -EINVAL;
+	}
+
 	if (!strcmp(a->attr.name, "trim_sections"))
 		return -EINVAL;
 
@@ -406,6 +411,7 @@
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, migration_granularity, migration_granularity);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
@@ -460,6 +466,7 @@
 	ATTR_LIST(min_hot_blocks),
 	ATTR_LIST(min_ssr_sections),
 	ATTR_LIST(max_victim_search),
+	ATTR_LIST(migration_granularity),
 	ATTR_LIST(dir_level),
 	ATTR_LIST(ram_thresh),
 	ATTR_LIST(ra_nid_pages),
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 7261245..18d5ffb 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -288,7 +288,7 @@
 static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
 				unsigned int index, unsigned int len,
 				const char *name, struct f2fs_xattr_entry **xe,
-				void **base_addr)
+				void **base_addr, int *base_size)
 {
 	void *cur_addr, *txattr_addr, *last_addr = NULL;
 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
@@ -299,8 +299,8 @@
 	if (!size && !inline_size)
 		return -ENODATA;
 
-	txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
-			inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
+	*base_size = inline_size + size + XATTR_PADDING_SIZE;
+	txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
 	if (!txattr_addr)
 		return -ENOMEM;
 
@@ -312,8 +312,10 @@
 
 		*xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
 						index, len, name);
-		if (*xe)
+		if (*xe) {
+			*base_size = inline_size;
 			goto check;
+		}
 	}
 
 	/* read from xattr node block */
@@ -415,7 +417,7 @@
 		}
 
 		f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
-							NODE, true);
+							NODE, true, true);
 		/* no need to use xattr node block */
 		if (hsize <= inline_size) {
 			err = f2fs_truncate_xattr_node(inode);
@@ -439,7 +441,7 @@
 			goto in_page_out;
 		}
 		f2fs_bug_on(sbi, new_nid);
-		f2fs_wait_on_page_writeback(xpage, NODE, true);
+		f2fs_wait_on_page_writeback(xpage, NODE, true, true);
 	} else {
 		struct dnode_of_data dn;
 		set_new_dnode(&dn, inode, NULL, NULL, new_nid);
@@ -474,6 +476,7 @@
 	int error = 0;
 	unsigned int size, len;
 	void *base_addr = NULL;
+	int base_size;
 
 	if (name == NULL)
 		return -EINVAL;
@@ -484,7 +487,7 @@
 
 	down_read(&F2FS_I(inode)->i_xattr_sem);
 	error = lookup_all_xattrs(inode, ipage, index, len, name,
-				&entry, &base_addr);
+				&entry, &base_addr, &base_size);
 	up_read(&F2FS_I(inode)->i_xattr_sem);
 	if (error)
 		return error;
@@ -498,6 +501,11 @@
 
 	if (buffer) {
 		char *pval = entry->e_name + entry->e_name_len;
+
+		if (base_size - (pval - (char *)base_addr) < size) {
+			error = -ERANGE;
+			goto out;
+		}
 		memcpy(buffer, pval, size);
 	}
 	error = size;
diff --git a/fs/file_table.c b/fs/file_table.c
index e49af4c..d58c11a 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -326,6 +326,12 @@
 
 static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
 
+void flush_delayed_fput_wait(void)
+{
+	delayed_fput(NULL);
+	flush_delayed_work(&delayed_fput_work);
+}
+
 void fput(struct file *file)
 {
 	if (atomic_long_dec_and_test(&file->f_count)) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 471d863..82ce6d4 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -331,11 +331,22 @@
 	struct work_struct	work;
 };
 
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+	down_write(&bdi->wb_switch_rwsem);
+}
+
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+	up_write(&bdi->wb_switch_rwsem);
+}
+
 static void inode_switch_wbs_work_fn(struct work_struct *work)
 {
 	struct inode_switch_wbs_context *isw =
 		container_of(work, struct inode_switch_wbs_context, work);
 	struct inode *inode = isw->inode;
+	struct backing_dev_info *bdi = inode_to_bdi(inode);
 	struct address_space *mapping = inode->i_mapping;
 	struct bdi_writeback *old_wb = inode->i_wb;
 	struct bdi_writeback *new_wb = isw->new_wb;
@@ -344,6 +355,12 @@
 	void **slot;
 
 	/*
+	 * If @inode switches cgwb membership while sync_inodes_sb() is
+	 * being issued, sync_inodes_sb() might miss it.  Synchronize.
+	 */
+	down_read(&bdi->wb_switch_rwsem);
+
+	/*
 	 * By the time control reaches here, RCU grace period has passed
 	 * since I_WB_SWITCH assertion and all wb stat update transactions
 	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
@@ -435,6 +452,8 @@
 	spin_unlock(&new_wb->list_lock);
 	spin_unlock(&old_wb->list_lock);
 
+	up_read(&bdi->wb_switch_rwsem);
+
 	if (switched) {
 		wb_wakeup(new_wb);
 		wb_put(old_wb);
@@ -475,9 +494,18 @@
 	if (inode->i_state & I_WB_SWITCH)
 		return;
 
+	/*
+	 * Avoid starting new switches while sync_inodes_sb() is in
+	 * progress.  Otherwise, if the down_write protected issue path
+	 * blocks heavily, we might end up starting a large number of
+	 * switches which will block on the rwsem.
+	 */
+	if (!down_read_trylock(&bdi->wb_switch_rwsem))
+		return;
+
 	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
 	if (!isw)
-		return;
+		goto out_unlock;
 
 	/* find and pin the new wb */
 	rcu_read_lock();
@@ -511,12 +539,14 @@
 	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 	 */
 	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
-	return;
+	goto out_unlock;
 
 out_free:
 	if (isw->new_wb)
 		wb_put(isw->new_wb);
 	kfree(isw);
+out_unlock:
+	up_read(&bdi->wb_switch_rwsem);
 }
 
 /**
@@ -894,6 +924,9 @@
 
 #else	/* CONFIG_CGROUP_WRITEBACK */
 
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+
 static struct bdi_writeback *
 locked_inode_to_wb_and_lock_list(struct inode *inode)
 	__releases(&inode->i_lock)
@@ -2420,8 +2453,11 @@
 		return;
 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
+	/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
+	bdi_down_write_wb_switch_rwsem(bdi);
 	bdi_split_work_to_wbs(bdi, &work, false);
 	wb_wait_for_completion(bdi, &done);
+	bdi_up_write_wb_switch_rwsem(bdi);
 
 	wait_sb_inodes(sb);
 }
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a8d24c0..414c534 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1699,7 +1699,6 @@
 	req->in.h.nodeid = outarg->nodeid;
 	req->in.numargs = 2;
 	req->in.argpages = 1;
-	req->page_descs[0].offset = offset;
 	req->end = fuse_retrieve_end;
 
 	index = outarg->offset >> PAGE_SHIFT;
@@ -1714,6 +1713,7 @@
 
 		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
 		req->pages[req->num_pages] = page;
+		req->page_descs[req->num_pages].offset = offset;
 		req->page_descs[req->num_pages].length = this_num;
 		req->num_pages++;
 
@@ -2039,8 +2039,10 @@
 
 	ret = fuse_dev_do_write(fud, &cs, len);
 
+	pipe_lock(pipe);
 	for (idx = 0; idx < nbuf; idx++)
 		pipe_buf_release(pipe, &bufs[idx]);
+	pipe_unlock(pipe);
 
 out:
 	kvfree(bufs);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index f3519d6..baa8fbc 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1778,7 +1778,7 @@
 		spin_unlock(&fc->lock);
 
 		dec_wb_stat(&bdi->wb, WB_WRITEBACK);
-		dec_node_page_state(page, NR_WRITEBACK_TEMP);
+		dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
 		wb_writeout_inc(&bdi->wb);
 		fuse_writepage_free(fc, new_req);
 		fuse_request_free(new_req);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4614ee2..9d566e6 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -107,7 +107,7 @@
 
 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
 {
-	u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
+	u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
 
 	return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
 }
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 648f0ca..998051c 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -744,17 +744,19 @@
 			       the gfs2 structures. */
 	if (default_acl) {
 		error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+		if (error)
+			goto fail_gunlock3;
 		posix_acl_release(default_acl);
+		default_acl = NULL;
 	}
 	if (acl) {
-		if (!error)
-			error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+		error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+		if (error)
+			goto fail_gunlock3;
 		posix_acl_release(acl);
+		acl = NULL;
 	}
 
-	if (error)
-		goto fail_gunlock3;
-
 	error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
 					     &gfs2_initxattrs, NULL);
 	if (error)
@@ -789,10 +791,8 @@
 	}
 	gfs2_rsqa_delete(ip, NULL);
 fail_free_acls:
-	if (default_acl)
-		posix_acl_release(default_acl);
-	if (acl)
-		posix_acl_release(acl);
+	posix_acl_release(default_acl);
+	posix_acl_release(acl);
 fail_gunlock:
 	gfs2_dir_no_add(&da);
 	gfs2_glock_dq_uninit(ghs);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1978581..b0eef00 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -859,6 +859,18 @@
 	rc = migrate_huge_page_move_mapping(mapping, newpage, page);
 	if (rc != MIGRATEPAGE_SUCCESS)
 		return rc;
+
+	/*
+	 * page_private is subpool pointer in hugetlb pages.  Transfer to
+	 * new page.  PagePrivate is not associated with page_private for
+	 * hugetlb pages and can not be set here as only page_huge_active
+	 * pages can be migrated.
+	 */
+	if (page_private(page)) {
+		set_page_private(newpage, page_private(page));
+		set_page_private(page, 0);
+	}
+
 	if (mode != MIGRATE_SYNC_NO_COPY)
 		migrate_page_copy(newpage, page);
 	else
diff --git a/fs/inode.c b/fs/inode.c
index 39f86b5..790552e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -730,11 +730,8 @@
 		return LRU_REMOVED;
 	}
 
-	/*
-	 * Recently referenced inodes and inodes with many attached pages
-	 * get one more pass.
-	 */
-	if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
+	/* recently referenced inodes get one more pass */
+	if (inode->i_state & I_REFERENCED) {
 		inode->i_state &= ~I_REFERENCED;
 		spin_unlock(&inode->i_lock);
 		return LRU_ROTATE;
diff --git a/fs/iomap.c b/fs/iomap.c
index 37da7a6..fac4520 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -495,16 +495,29 @@
 }
 EXPORT_SYMBOL_GPL(iomap_readpages);
 
+/*
+ * iomap_is_partially_uptodate checks whether blocks within a page are
+ * uptodate or not.
+ *
+ * Returns true if all blocks which correspond to a file portion
+ * we want to read within the page are uptodate.
+ */
 int
 iomap_is_partially_uptodate(struct page *page, unsigned long from,
 		unsigned long count)
 {
 	struct iomap_page *iop = to_iomap_page(page);
 	struct inode *inode = page->mapping->host;
-	unsigned first = from >> inode->i_blkbits;
-	unsigned last = (from + count - 1) >> inode->i_blkbits;
+	unsigned len, first, last;
 	unsigned i;
 
+	/* Limit range to one page */
+	len = min_t(unsigned, PAGE_SIZE - from, count);
+
+	/* First and last blocks in range within page */
+	first = from >> inode->i_blkbits;
+	last = (from + len - 1) >> inode->i_blkbits;
+
 	if (iop) {
 		for (i = first; i <= last; i++)
 			if (!test_bit(i, iop->uptodate))
@@ -559,8 +572,10 @@
 
 	if (page_has_private(page)) {
 		ClearPagePrivate(page);
+		get_page(newpage);
 		set_page_private(newpage, page_private(page));
 		set_page_private(page, 0);
+		put_page(page);
 		SetPagePrivate(newpage);
 	}
 
@@ -1772,6 +1787,7 @@
 	loff_t pos = iocb->ki_pos, start = pos;
 	loff_t end = iocb->ki_pos + count - 1, ret = 0;
 	unsigned int flags = IOMAP_DIRECT;
+	bool wait_for_completion = is_sync_kiocb(iocb);
 	struct blk_plug plug;
 	struct iomap_dio *dio;
 
@@ -1791,7 +1807,6 @@
 	dio->end_io = end_io;
 	dio->error = 0;
 	dio->flags = 0;
-	dio->wait_for_completion = is_sync_kiocb(iocb);
 
 	dio->submit.iter = iter;
 	dio->submit.waiter = current;
@@ -1846,7 +1861,7 @@
 		dio_warn_stale_pagecache(iocb->ki_filp);
 	ret = 0;
 
-	if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
+	if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
 	    !inode->i_sb->s_dio_done_wq) {
 		ret = sb_init_dio_done_wq(inode->i_sb);
 		if (ret < 0)
@@ -1862,7 +1877,7 @@
 		if (ret <= 0) {
 			/* magic error code to fall back to buffered I/O */
 			if (ret == -ENOTBLK) {
-				dio->wait_for_completion = true;
+				wait_for_completion = true;
 				ret = 0;
 			}
 			break;
@@ -1884,8 +1899,24 @@
 	if (dio->flags & IOMAP_DIO_WRITE_FUA)
 		dio->flags &= ~IOMAP_DIO_NEED_SYNC;
 
+	/*
+	 * We are about to drop our additional submission reference, which
+	 * might be the last reference to the dio.  There are three three
+	 * different ways we can progress here:
+	 *
+	 *  (a) If this is the last reference we will always complete and free
+	 *	the dio ourselves.
+	 *  (b) If this is not the last reference, and we serve an asynchronous
+	 *	iocb, we must never touch the dio after the decrement, the
+	 *	I/O completion handler will complete and free it.
+	 *  (c) If this is not the last reference, but we serve a synchronous
+	 *	iocb, the I/O completion handler will wake us up on the drop
+	 *	of the final reference, and we will complete and free it here
+	 *	after we got woken by the I/O completion handler.
+	 */
+	dio->wait_for_completion = wait_for_completion;
 	if (!atomic_dec_and_test(&dio->ref)) {
-		if (!dio->wait_for_completion)
+		if (!wait_for_completion)
 			return -EIOCBQUEUED;
 
 		for (;;) {
@@ -1902,9 +1933,7 @@
 		__set_current_state(TASK_RUNNING);
 	}
 
-	ret = iomap_dio_complete(dio);
-
-	return ret;
+	return iomap_dio_complete(dio);
 
 out_free_dio:
 	kfree(dio);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 902a7dd..bb6ae38 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -101,7 +101,8 @@
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 
 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
-	cancel_delayed_work_sync(&c->wbuf_dwork);
+	if (jffs2_is_writebuffered(c))
+		cancel_delayed_work_sync(&c->wbuf_dwork);
 #endif
 
 	mutex_lock(&c->alloc_sem);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index dbf5bc2..2d8b91f 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -832,26 +832,35 @@
  * to see if it supports poll (Neither 'poll' nor 'select' return
  * an appropriate error code).  When in doubt, set a suitable timeout value.
  */
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
+{
+	struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry);
+	struct kernfs_open_node *on = kn->attr.open;
+
+	poll_wait(of->file, &on->poll, wait);
+
+	if (of->event != atomic_read(&on->event))
+		return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+
+	return DEFAULT_POLLMASK;
+}
+
 static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
 {
 	struct kernfs_open_file *of = kernfs_of(filp);
 	struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
-	struct kernfs_open_node *on = kn->attr.open;
+	__poll_t ret;
 
 	if (!kernfs_get_active(kn))
-		goto trigger;
+		return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
 
-	poll_wait(filp, &on->poll, wait);
+	if (kn->attr.ops->poll)
+		ret = kn->attr.ops->poll(of, wait);
+	else
+		ret = kernfs_generic_poll(of, wait);
 
 	kernfs_put_active(kn);
-
-	if (of->event != atomic_read(&on->event))
-		goto trigger;
-
-	return DEFAULT_POLLMASK;
-
- trigger:
-	return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+	return ret;
 }
 
 static void kernfs_notify_workfn(struct work_struct *work)
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index d20b92f..0a67dd4 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -442,7 +442,7 @@
 			fl->fl_start = req->a_res.lock.fl.fl_start;
 			fl->fl_end = req->a_res.lock.fl.fl_end;
 			fl->fl_type = req->a_res.lock.fl.fl_type;
-			fl->fl_pid = 0;
+			fl->fl_pid = -req->a_res.lock.fl.fl_pid;
 			break;
 		default:
 			status = nlm_stat_to_errno(req->a_res.status);
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 7147e4a..9846f7e 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -127,7 +127,7 @@
 
 	locks_init_lock(fl);
 	fl->fl_owner = current->files;
-	fl->fl_pid   = (pid_t)lock->svid;
+	fl->fl_pid   = current->tgid;
 	fl->fl_flags = FL_POSIX;
 	fl->fl_type  = F_RDLCK;		/* as good as anything else */
 	start = ntohl(*p++);
@@ -269,7 +269,7 @@
 	memset(lock, 0, sizeof(*lock));
 	locks_init_lock(&lock->fl);
 	lock->svid = ~(u32) 0;
-	lock->fl.fl_pid = (pid_t)lock->svid;
+	lock->fl.fl_pid = current->tgid;
 
 	if (!(p = nlm_decode_cookie(p, &argp->cookie))
 	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 7ed9edf..70154f3 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -119,7 +119,7 @@
 
 	locks_init_lock(fl);
 	fl->fl_owner = current->files;
-	fl->fl_pid   = (pid_t)lock->svid;
+	fl->fl_pid   = current->tgid;
 	fl->fl_flags = FL_POSIX;
 	fl->fl_type  = F_RDLCK;		/* as good as anything else */
 	p = xdr_decode_hyper(p, &start);
@@ -266,7 +266,7 @@
 	memset(lock, 0, sizeof(*lock));
 	locks_init_lock(&lock->fl);
 	lock->svid = ~(u32) 0;
-	lock->fl.fl_pid = (pid_t)lock->svid;
+	lock->fl.fl_pid = current->tgid;
 
 	if (!(p = nlm4_decode_cookie(p, &argp->cookie))
 	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
diff --git a/fs/namei.c b/fs/namei.c
index bd04eef..dfb16d8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2926,6 +2926,11 @@
 	if (error)
 		return error;
 	error = dir->i_op->create(dir, dentry, mode, want_excl);
+	if (error)
+		return error;
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
 	if (!error)
 		fsnotify_create(dir, dentry);
 	return error;
@@ -3735,8 +3740,7 @@
 	if (error)
 		return error;
 
-	if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
-	    !ns_capable(dentry->d_sb->s_user_ns, CAP_MKNOD))
+	if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
 		return -EPERM;
 
 	if (!dir->i_op->mknod)
@@ -3751,6 +3755,11 @@
 		return error;
 
 	error = dir->i_op->mknod(dir, dentry, mode, dev);
+	if (error)
+		return error;
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
 	if (!error)
 		fsnotify_create(dir, dentry);
 	return error;
diff --git a/fs/namespace.c b/fs/namespace.c
index 6b14059..c2340e8 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -21,6 +21,7 @@
 #include <linux/fs_struct.h>	/* get_fs_root et.al. */
 #include <linux/fsnotify.h>	/* fsnotify_vfsmount_delete */
 #include <linux/uaccess.h>
+#include <linux/file.h>
 #include <linux/proc_ns.h>
 #include <linux/magic.h>
 #include <linux/bootmem.h>
@@ -1134,6 +1135,12 @@
 }
 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
 
+void flush_delayed_mntput_wait(void)
+{
+	delayed_mntput(NULL);
+	flush_delayed_work(&delayed_mntput_work);
+}
+
 static void mntput_no_expire(struct mount *mnt)
 {
 	rcu_read_lock();
@@ -1650,6 +1657,7 @@
 	struct mount *mnt;
 	int retval;
 	int lookup_flags = 0;
+	bool user_request = !(current->flags & PF_KTHREAD);
 
 	if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
 		return -EINVAL;
@@ -1675,11 +1683,36 @@
 	if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
 		goto dput_and_out;
 
+	/* flush delayed_fput to put mnt_count */
+	if (user_request)
+		flush_delayed_fput_wait();
+
 	retval = do_umount(mnt, flags);
 dput_and_out:
 	/* we mustn't call path_put() as that would clear mnt_expiry_mark */
 	dput(path.dentry);
 	mntput_no_expire(mnt);
+
+	if (!user_request)
+		goto out;
+
+	if (!retval) {
+		/*
+		 * If the last delayed_fput() is called during do_umount()
+		 * and makes mnt_count zero, we need to guarantee to register
+		 * delayed_mntput by waiting for delayed_fput work again.
+		 */
+		flush_delayed_fput_wait();
+
+		/* flush delayed_mntput_work to put sb->s_active */
+		flush_delayed_mntput_wait();
+	}
+	if (!retval || (flags & MNT_FORCE)) {
+		/* filesystem needs to handle unclosed namespaces */
+		if (mnt->mnt.mnt_sb->s_op->umount_end)
+			mnt->mnt.mnt_sb->s_op->umount_end(mnt->mnt.mnt_sb,
+					flags);
+	}
 out:
 	return retval;
 }
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ac4b2f0..6b666d1 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1906,6 +1906,11 @@
 	size_t len;
 	char *end;
 
+	if (unlikely(!dev_name || !*dev_name)) {
+		dfprintk(MOUNT, "NFS: device name not specified\n");
+		return -EINVAL;
+	}
+
 	/* Is the host name protected with square brakcets? */
 	if (*dev_name == '[') {
 		end = strchr(++dev_name, ']');
@@ -2409,8 +2414,7 @@
 		goto Ebusy;
 	if (a->acdirmax != b->acdirmax)
 		goto Ebusy;
-	if (b->auth_info.flavor_len > 0 &&
-	   clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
+	if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
 		goto Ebusy;
 	return 1;
 Ebusy:
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 586726a..d790faf 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -621,11 +621,12 @@
 	nfs_set_page_writeback(page);
 	WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
 
-	ret = 0;
+	ret = req->wb_context->error;
 	/* If there is a fatal error that covers this write, just exit */
-	if (nfs_error_is_fatal_on_server(req->wb_context->error))
+	if (nfs_error_is_fatal_on_server(ret))
 		goto out_launder;
 
+	ret = 0;
 	if (!nfs_pageio_add_request(pgio, req)) {
 		ret = pgio->pg_error;
 		/*
@@ -635,9 +636,9 @@
 			nfs_context_set_write_error(req->wb_context, ret);
 			if (nfs_error_is_fatal_on_server(ret))
 				goto out_launder;
-		}
+		} else
+			ret = -EAGAIN;
 		nfs_redirty_request(req);
-		ret = -EAGAIN;
 	} else
 		nfs_add_stats(page_file_mapping(page)->host,
 				NFSIOS_WRITEPAGES, 1);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 9d6b4f0..f35aa9f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1015,8 +1015,6 @@
 
 	nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist,
 				      &write->wr_head, write->wr_buflen);
-	if (!nvecs)
-		return nfserr_io;
 	WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
 
 	status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7fb9f7c..39b835d 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1126,6 +1126,8 @@
 		case 'Y':
 		case 'y':
 		case '1':
+			if (nn->nfsd_serv)
+				return -EBUSY;
 			nfsd4_end_grace(nn);
 			break;
 		default:
@@ -1237,8 +1239,8 @@
 	retval = nfsd_idmap_init(net);
 	if (retval)
 		goto out_idmap_error;
-	nn->nfsd4_lease = 45;	/* default lease time */
-	nn->nfsd4_grace = 45;
+	nn->nfsd4_lease = 90;	/* default lease time */
+	nn->nfsd4_grace = 90;
 	nn->somebody_reclaimed = false;
 	nn->clverifier_counter = prandom_u32();
 	nn->clientid_counter = prandom_u32();
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index f9a95fb..0bc716c 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -726,8 +726,10 @@
 		return -EBADF;
 
 	/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
-	if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE)))
-		return -EINVAL;
+	if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
+		ret = -EINVAL;
+		goto fput_and_out;
+	}
 
 	/* verify that this is indeed an inotify instance */
 	if (unlikely(f.file->f_op != &inotify_fops)) {
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 99ee093..cc9b32b 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Ifs/ocfs2
+ccflags-y := -I$(src)
 
 obj-$(CONFIG_OCFS2_FS) += 	\
 	ocfs2.o			\
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 1d098c3..9f8250d 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -152,7 +152,6 @@
 #endif
 		}
 
-		clear_buffer_uptodate(bh);
 		get_bh(bh); /* for end_buffer_read_sync() */
 		bh->b_end_io = end_buffer_read_sync;
 		submit_bh(REQ_OP_READ, 0, bh);
@@ -306,7 +305,6 @@
 				continue;
 			}
 
-			clear_buffer_uptodate(bh);
 			get_bh(bh); /* for end_buffer_read_sync() */
 			if (validate)
 				set_buffer_needs_validate(bh);
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index bd1aab1f..ef28544 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Ifs/ocfs2
+ccflags-y := -I$(src)/..
 
 obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
 
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
index eed3db8c..33431a0 100644
--- a/fs/ocfs2/dlmfs/Makefile
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Ifs/ocfs2
+ccflags-y := -I$(src)/..
 
 obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
 
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 7642b67..3020823 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -345,13 +345,18 @@
 	if (num_used
 	    || alloc->id1.bitmap1.i_used
 	    || alloc->id1.bitmap1.i_total
-	    || la->la_bm_off)
-		mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
+	    || la->la_bm_off) {
+		mlog(ML_ERROR, "inconsistent detected, clean journal with"
+		     " unrecovered local alloc, please run fsck.ocfs2!\n"
 		     "found = %u, set = %u, taken = %u, off = %u\n",
 		     num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
 		     le32_to_cpu(alloc->id1.bitmap1.i_total),
 		     OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
 
+		status = -EINVAL;
+		goto bail;
+	}
+
 	osb->local_alloc_bh = alloc_bh;
 	osb->local_alloc_state = OCFS2_LA_ENABLED;
 
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 1cc797a..e6b5d62 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -829,7 +829,7 @@
 		dput(parent);
 		dput(next);
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index b2aadd3..2e4af5f 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -567,7 +567,8 @@
 		override_cred->fsgid = inode->i_gid;
 		if (!attr->hardlink) {
 			err = security_dentry_create_files_as(dentry,
-					attr->mode, &dentry->d_name, old_cred,
+					attr->mode, &dentry->d_name,
+					old_cred ? old_cred : current_cred(),
 					override_cred);
 			if (err) {
 				put_cred(override_cred);
@@ -583,7 +584,7 @@
 			err = ovl_create_over_whiteout(dentry, inode, attr);
 	}
 out_revert_creds:
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	return err;
 }
 
@@ -659,7 +660,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	err = ovl_set_redirect(dentry, false);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
@@ -857,7 +858,7 @@
 		err = ovl_remove_upper(dentry, is_dir, &list);
 	else
 		err = ovl_remove_and_whiteout(dentry, &list);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	if (!err) {
 		if (is_dir)
 			clear_nlink(dentry->d_inode);
@@ -1225,7 +1226,7 @@
 out_unlock:
 	unlock_rename(new_upperdir, old_upperdir);
 out_revert_creds:
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	ovl_nlink_end(new, locked);
 out_drop_write:
 	ovl_drop_write(old);
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 986313d..da7d785 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -33,7 +33,7 @@
 	old_cred = ovl_override_creds(inode->i_sb);
 	realfile = open_with_fake_path(&file->f_path, file->f_flags | O_NOATIME,
 				       realinode, current_cred());
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
 		 file, file, ovl_whatisit(inode, realinode), file->f_flags,
@@ -208,7 +208,7 @@
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
 			    ovl_iocb_to_rwf(iocb));
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	ovl_file_accessed(file);
 
@@ -244,7 +244,7 @@
 	ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
 			     ovl_iocb_to_rwf(iocb));
 	file_end_write(real.file);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	/* Update size */
 	ovl_copyattr(ovl_inode_real(inode), inode);
@@ -271,7 +271,7 @@
 	if (file_inode(real.file) == ovl_inode_upper(file_inode(file))) {
 		old_cred = ovl_override_creds(file_inode(file)->i_sb);
 		ret = vfs_fsync_range(real.file, start, end, datasync);
-		revert_creds(old_cred);
+		ovl_revert_creds(old_cred);
 	}
 
 	fdput(real);
@@ -295,7 +295,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = call_mmap(vma->vm_file, vma);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	if (ret) {
 		/* Drop reference count from new vm_file value */
@@ -323,7 +323,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_fallocate(real.file, mode, offset, len);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	/* Update size */
 	ovl_copyattr(ovl_inode_real(inode), inode);
@@ -345,7 +345,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_fadvise(real.file, offset, len, advice);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	fdput(real);
 
@@ -365,7 +365,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_ioctl(real.file, cmd, arg);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	fdput(real);
 
@@ -470,7 +470,7 @@
 						real_out.file, pos_out, len);
 		break;
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	/* Update size */
 	ovl_copyattr(ovl_inode_real(inode_out), inode_out);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 3b7ed5d..b3c6126 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -64,7 +64,7 @@
 		inode_lock(upperdentry->d_inode);
 		old_cred = ovl_override_creds(dentry->d_sb);
 		err = notify_change(upperdentry, attr, NULL);
-		revert_creds(old_cred);
+		ovl_revert_creds(old_cred);
 		if (!err)
 			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
 		inode_unlock(upperdentry->d_inode);
@@ -260,7 +260,7 @@
 		stat->nlink = dentry->d_inode->i_nlink;
 
 out:
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
@@ -294,7 +294,7 @@
 		mask |= MAY_READ;
 	}
 	err = inode_permission(realinode, mask);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
@@ -311,7 +311,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	p = vfs_get_link(ovl_dentry_real(dentry), done);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	return p;
 }
 
@@ -354,7 +354,7 @@
 		WARN_ON(flags != XATTR_REPLACE);
 		err = vfs_removexattr(realdentry, name);
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	/* copy c/mtime */
 	ovl_copyattr(d_inode(realdentry), inode);
@@ -375,7 +375,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	res = vfs_getxattr(realdentry, name, value, size);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	return res;
 }
 
@@ -399,7 +399,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	res = vfs_listxattr(realdentry, list, size);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	if (res <= 0 || size == 0)
 		return res;
 
@@ -434,7 +434,7 @@
 
 	old_cred = ovl_override_creds(inode->i_sb);
 	acl = get_acl(realinode, type);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return acl;
 }
@@ -472,7 +472,7 @@
 		filemap_write_and_wait(realinode->i_mapping);
 
 	err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index efd3723..2fd199e 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -1069,7 +1069,7 @@
 			goto out_free_oe;
 	}
 
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	if (origin_path) {
 		dput(origin_path->dentry);
 		kfree(origin_path);
@@ -1096,7 +1096,7 @@
 	kfree(upperredirect);
 out:
 	kfree(d.redirect);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	return ERR_PTR(err);
 }
 
@@ -1150,7 +1150,7 @@
 			dput(this);
 		}
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return positive;
 }
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index a3c0d95..552a19a 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -208,6 +208,7 @@
 void ovl_drop_write(struct dentry *dentry);
 struct dentry *ovl_workdir(struct dentry *dentry);
 const struct cred *ovl_override_creds(struct super_block *sb);
+void ovl_revert_creds(const struct cred *oldcred);
 struct super_block *ovl_same_sb(struct super_block *sb);
 int ovl_can_decode_fh(struct super_block *sb);
 struct dentry *ovl_indexdir(struct super_block *sb);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index ec23703..e38eea8 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -20,6 +20,7 @@
 	bool nfs_export;
 	int xino;
 	bool metacopy;
+	bool override_creds;
 };
 
 struct ovl_sb {
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index cc8303a..ec591b4 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -289,7 +289,7 @@
 		}
 		inode_unlock(dir->d_inode);
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
@@ -921,7 +921,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	err = ovl_dir_read_merged(dentry, list, &root);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	if (err)
 		return err;
 
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 0fb0a59..df77062 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -56,6 +56,11 @@
 MODULE_PARM_DESC(ovl_xino_auto_def,
 		 "Auto enable xino feature");
 
+static bool __read_mostly ovl_override_creds_def = true;
+module_param_named(override_creds, ovl_override_creds_def, bool, 0644);
+MODULE_PARM_DESC(ovl_override_creds_def,
+		 "Use mounter's credentials for accesses");
+
 static void ovl_entry_stack_free(struct ovl_entry *oe)
 {
 	unsigned int i;
@@ -362,6 +367,9 @@
 	if (ofs->config.metacopy != ovl_metacopy_def)
 		seq_printf(m, ",metacopy=%s",
 			   ofs->config.metacopy ? "on" : "off");
+	if (ofs->config.override_creds != ovl_override_creds_def)
+		seq_show_option(m, "override_creds",
+				ofs->config.override_creds ? "on" : "off");
 	return 0;
 }
 
@@ -401,6 +409,8 @@
 	OPT_XINO_AUTO,
 	OPT_METACOPY_ON,
 	OPT_METACOPY_OFF,
+	OPT_OVERRIDE_CREDS_ON,
+	OPT_OVERRIDE_CREDS_OFF,
 	OPT_ERR,
 };
 
@@ -419,6 +429,8 @@
 	{OPT_XINO_AUTO,			"xino=auto"},
 	{OPT_METACOPY_ON,		"metacopy=on"},
 	{OPT_METACOPY_OFF,		"metacopy=off"},
+	{OPT_OVERRIDE_CREDS_ON,		"override_creds=on"},
+	{OPT_OVERRIDE_CREDS_OFF,	"override_creds=off"},
 	{OPT_ERR,			NULL}
 };
 
@@ -477,6 +489,7 @@
 	config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
 	if (!config->redirect_mode)
 		return -ENOMEM;
+	config->override_creds = ovl_override_creds_def;
 
 	while ((p = ovl_next_opt(&opt)) != NULL) {
 		int token;
@@ -557,6 +570,14 @@
 			config->metacopy = false;
 			break;
 
+		case OPT_OVERRIDE_CREDS_ON:
+			config->override_creds = true;
+			break;
+
+		case OPT_OVERRIDE_CREDS_OFF:
+			config->override_creds = false;
+			break;
+
 		default:
 			pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
 			return -EINVAL;
@@ -1521,7 +1542,6 @@
 		       ovl_dentry_lower(root_dentry), NULL);
 
 	sb->s_root = root_dentry;
-
 	return 0;
 
 out_free_oe:
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index ace4fe4..470310e 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -40,9 +40,17 @@
 {
 	struct ovl_fs *ofs = sb->s_fs_info;
 
+	if (!ofs->config.override_creds)
+		return NULL;
 	return override_creds(ofs->creator_cred);
 }
 
+void ovl_revert_creds(const struct cred *old_cred)
+{
+	if (old_cred)
+		revert_creds(old_cred);
+}
+
 struct super_block *ovl_same_sb(struct super_block *sb)
 {
 	struct ovl_fs *ofs = sb->s_fs_info;
@@ -783,7 +791,7 @@
 	 * value relative to the upper inode nlink in an upper inode xattr.
 	 */
 	err = ovl_set_nlink_upper(dentry);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 out:
 	if (err)
@@ -803,7 +811,7 @@
 
 			old_cred = ovl_override_creds(dentry->d_sb);
 			ovl_cleanup_index(dentry);
-			revert_creds(old_cred);
+			ovl_revert_creds(old_cred);
 		}
 
 		mutex_unlock(&OVL_I(d_inode(dentry))->lock);
diff --git a/fs/pnode.c b/fs/pnode.c
index 56f9a28..681916d 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -608,36 +608,18 @@
 	return 0;
 }
 
-/*
- *  Iterates over all slaves, and slaves of slaves.
- */
-static struct mount *next_descendent(struct mount *root, struct mount *cur)
-{
-	if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
-		return first_slave(cur);
-	do {
-		struct mount *master = cur->mnt_master;
-
-		if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
-			struct mount *next = next_slave(cur);
-
-			return (next == root) ? NULL : next;
-		}
-		cur = master;
-	} while (cur != root);
-	return NULL;
-}
-
 void propagate_remount(struct mount *mnt)
 {
-	struct mount *m = mnt;
+	struct mount *parent = mnt->mnt_parent;
+	struct mount *p = mnt, *m;
 	struct super_block *sb = mnt->mnt.mnt_sb;
 
-	if (sb->s_op->copy_mnt_data) {
-		m = next_descendent(mnt, m);
-		while (m) {
+	if (!sb->s_op->copy_mnt_data)
+		return;
+	for (p = propagation_next(parent, parent); p;
+				p = propagation_next(p, parent)) {
+		m = __lookup_mnt(&p->mnt, mnt->mnt_mountpoint);
+		if (m)
 			sb->s_op->copy_mnt_data(m->mnt.data, mnt->mnt.data);
-			m = next_descendent(mnt, m);
-		}
 	}
 }
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 4d96a7c..cad2c60 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -100,7 +100,6 @@
 
 config PROC_UID
 	bool "Include /proc/uid/ files"
-	default y
 	depends on PROC_FS && RT_MUTEXES
 	help
 	Provides aggregated per-uid information under /proc/uid.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c6a8cba..1d56f1f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1085,10 +1085,6 @@
 
 			task_lock(p);
 			if (!p->vfork_done && process_shares_mm(p, mm)) {
-				pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
-						task_pid_nr(p), p->comm,
-						p->signal->oom_score_adj, oom_adj,
-						task_pid_nr(task), task->comm);
 				p->signal->oom_score_adj = oom_adj;
 				if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
 					p->signal->oom_score_adj_min = (short)oom_adj;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 8ae1094..e39bac9 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -256,7 +256,7 @@
 		inode = proc_get_inode(dir->i_sb, de);
 		if (!inode)
 			return ERR_PTR(-ENOMEM);
-		d_set_d_op(dentry, &proc_misc_dentry_ops);
+		d_set_d_op(dentry, de->proc_dops);
 		return d_splice_alias(inode, dentry);
 	}
 	read_unlock(&proc_subdir_lock);
@@ -429,6 +429,8 @@
 	INIT_LIST_HEAD(&ent->pde_openers);
 	proc_set_user(ent, (*parent)->uid, (*parent)->gid);
 
+	ent->proc_dops = &proc_misc_dentry_ops;
+
 out:
 	return ent;
 }
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index c0c7abb..bacad3e 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -44,6 +44,7 @@
 	struct completion *pde_unload_completion;
 	const struct inode_operations *proc_iops;
 	const struct file_operations *proc_fops;
+	const struct dentry_operations *proc_dops;
 	union {
 		const struct seq_operations *seq_ops;
 		int (*single_show)(struct seq_file *, void *);
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index d066947..8468bae 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -10,9 +10,6 @@
 #include <linux/seqlock.h>
 #include <linux/time.h>
 
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
 static int loadavg_proc_show(struct seq_file *m, void *v)
 {
 	unsigned long avnrun[3];
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index d5e0fcb..a7b1243 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -38,6 +38,22 @@
 	return maybe_get_net(PDE_NET(PDE(inode)));
 }
 
+static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+	return 0;
+}
+
+static const struct dentry_operations proc_net_dentry_ops = {
+	.d_revalidate	= proc_net_d_revalidate,
+	.d_delete	= always_delete_dentry,
+};
+
+static void pde_force_lookup(struct proc_dir_entry *pde)
+{
+	/* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
+	pde->proc_dops = &proc_net_dentry_ops;
+}
+
 static int seq_open_net(struct inode *inode, struct file *file)
 {
 	unsigned int state_size = PDE(inode)->state_size;
@@ -90,6 +106,7 @@
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
+	pde_force_lookup(p);
 	p->proc_fops = &proc_net_seq_fops;
 	p->seq_ops = ops;
 	p->state_size = state_size;
@@ -133,6 +150,7 @@
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
+	pde_force_lookup(p);
 	p->proc_fops = &proc_net_seq_fops;
 	p->seq_ops = ops;
 	p->state_size = state_size;
@@ -181,6 +199,7 @@
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
+	pde_force_lookup(p);
 	p->proc_fops = &proc_net_single_fops;
 	p->single_show = show;
 	return proc_register(parent, p);
@@ -223,6 +242,7 @@
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
+	pde_force_lookup(p);
 	p->proc_fops = &proc_net_single_fops;
 	p->single_show = show;
 	p->write = write;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 89921a0..4d598a3 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -464,7 +464,7 @@
 
 	inode = new_inode(sb);
 	if (!inode)
-		goto out;
+		return ERR_PTR(-ENOMEM);
 
 	inode->i_ino = get_next_ino();
 
@@ -474,8 +474,7 @@
 	if (unlikely(head->unregistering)) {
 		spin_unlock(&sysctl_lock);
 		iput(inode);
-		inode = NULL;
-		goto out;
+		return ERR_PTR(-ENOENT);
 	}
 	ei->sysctl = head;
 	ei->sysctl_entry = table;
@@ -500,7 +499,6 @@
 	if (root->set_ownership)
 		root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
 
-out:
 	return inode;
 }
 
@@ -549,10 +547,11 @@
 			goto out;
 	}
 
-	err = ERR_PTR(-ENOMEM);
 	inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
-	if (!inode)
+	if (IS_ERR(inode)) {
+		err = ERR_CAST(inode);
 		goto out;
+	}
 
 	d_set_d_op(dentry, &proc_sys_dentry_operations);
 	err = d_splice_alias(inode, dentry);
@@ -685,7 +684,7 @@
 		if (d_in_lookup(child)) {
 			struct dentry *res;
 			inode = proc_sys_make_inode(dir->d_sb, head, table);
-			if (!inode) {
+			if (IS_ERR(inode)) {
 				d_lookup_done(child);
 				dput(child);
 				return false;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ab3a089..38ed88e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -480,7 +480,7 @@
 };
 
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
-		bool compound, bool young, bool dirty)
+		bool compound, bool young, bool dirty, bool locked)
 {
 	int i, nr = compound ? 1 << compound_order(page) : 1;
 	unsigned long size = nr * PAGE_SIZE;
@@ -507,24 +507,31 @@
 		else
 			mss->private_clean += size;
 		mss->pss += (u64)size << PSS_SHIFT;
+		if (locked)
+			mss->pss_locked += (u64)size << PSS_SHIFT;
 		return;
 	}
 
 	for (i = 0; i < nr; i++, page++) {
 		int mapcount = page_mapcount(page);
+		unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
 
 		if (mapcount >= 2) {
 			if (dirty || PageDirty(page))
 				mss->shared_dirty += PAGE_SIZE;
 			else
 				mss->shared_clean += PAGE_SIZE;
-			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+			mss->pss += pss / mapcount;
+			if (locked)
+				mss->pss_locked += pss / mapcount;
 		} else {
 			if (dirty || PageDirty(page))
 				mss->private_dirty += PAGE_SIZE;
 			else
 				mss->private_clean += PAGE_SIZE;
-			mss->pss += PAGE_SIZE << PSS_SHIFT;
+			mss->pss += pss;
+			if (locked)
+				mss->pss_locked += pss;
 		}
 	}
 }
@@ -547,6 +554,7 @@
 {
 	struct mem_size_stats *mss = walk->private;
 	struct vm_area_struct *vma = walk->vma;
+	bool locked = !!(vma->vm_flags & VM_LOCKED);
 	struct page *page = NULL;
 
 	if (pte_present(*pte)) {
@@ -589,7 +597,7 @@
 	if (!page)
 		return;
 
-	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
+	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -598,6 +606,7 @@
 {
 	struct mem_size_stats *mss = walk->private;
 	struct vm_area_struct *vma = walk->vma;
+	bool locked = !!(vma->vm_flags & VM_LOCKED);
 	struct page *page;
 
 	/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -612,7 +621,7 @@
 		/* pass */;
 	else
 		VM_BUG_ON_PAGE(1, page);
-	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
+	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
 }
 #else
 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -794,11 +803,8 @@
 		}
 	}
 #endif
-
 	/* mmap_sem is held in m_start */
 	walk_page_vma(vma, &smaps_walk);
-	if (vma->vm_flags & VM_LOCKED)
-		mss->pss_locked += mss->pss;
 }
 
 #define SEQ_PUT_DEC(str, val) \
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 03cd593..eb67bb7 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -713,18 +713,15 @@
 {
 	struct device *dev = &pdev->dev;
 	struct ramoops_platform_data *pdata = dev->platform_data;
+	struct ramoops_platform_data pdata_local;
 	struct ramoops_context *cxt = &oops_cxt;
 	size_t dump_mem_sz;
 	phys_addr_t paddr;
 	int err = -EINVAL;
 
 	if (dev_of_node(dev) && !pdata) {
-		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-		if (!pdata) {
-			pr_err("cannot allocate platform data buffer\n");
-			err = -ENOMEM;
-			goto fail_out;
-		}
+		pdata = &pdata_local;
+		memset(pdata, 0, sizeof(*pdata));
 
 		err = ramoops_parse_dt(pdev, pdata);
 		if (err < 0)
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 0792595..3c777ec 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -496,6 +496,11 @@
 	sig ^= PERSISTENT_RAM_SIG;
 
 	if (prz->buffer->sig == sig) {
+		if (buffer_size(prz) == 0) {
+			pr_debug("found existing empty buffer\n");
+			return 0;
+		}
+
 		if (buffer_size(prz) > prz->buffer_size ||
 		    buffer_start(prz) > buffer_size(prz))
 			pr_info("found existing invalid buffer, size %zu, start %zu\n",
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index f0cbf58..fd5dd80 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -791,7 +791,8 @@
 /* Return true if quotactl command is manipulating quota on/off state */
 static bool quotactl_cmd_onoff(int cmd)
 {
-	return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF);
+	return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
+		 (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
 }
 
 /*
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 4844538..c6f9b22 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -210,6 +210,38 @@
 }
 
 /**
+ * inode_still_linked - check whether inode in question will be re-linked.
+ * @c: UBIFS file-system description object
+ * @rino: replay entry to test
+ *
+ * O_TMPFILE files can be re-linked, this means link count goes from 0 to 1.
+ * This case needs special care, otherwise all references to the inode will
+ * be removed upon the first replay entry of an inode with link count 0
+ * is found.
+ */
+static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
+{
+	struct replay_entry *r;
+
+	ubifs_assert(c, rino->deletion);
+	ubifs_assert(c, key_type(c, &rino->key) == UBIFS_INO_KEY);
+
+	/*
+	 * Find the most recent entry for the inode behind @rino and check
+	 * whether it is a deletion.
+	 */
+	list_for_each_entry_reverse(r, &c->replay_list, list) {
+		ubifs_assert(c, r->sqnum >= rino->sqnum);
+		if (key_inum(c, &r->key) == key_inum(c, &rino->key))
+			return r->deletion == 0;
+
+	}
+
+	ubifs_assert(c, 0);
+	return false;
+}
+
+/**
  * apply_replay_entry - apply a replay entry to the TNC.
  * @c: UBIFS file-system description object
  * @r: replay entry to apply
@@ -236,6 +268,11 @@
 			{
 				ino_t inum = key_inum(c, &r->key);
 
+				if (inode_still_linked(c, r)) {
+					err = 0;
+					break;
+				}
+
 				err = ubifs_tnc_remove_ino(c, inum);
 				break;
 			}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 5df554a9..ae796e1 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1357,6 +1357,12 @@
 
 	iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
 							ICBTAG_FLAG_AD_MASK;
+	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
+	    iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
+	    iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+		ret = -EIO;
+		goto out;
+	}
 	iinfo->i_unique = 0;
 	iinfo->i_lenEAttr = 0;
 	iinfo->i_lenExtents = 0;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 6f9ba3b..9beff19 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -736,10 +736,18 @@
 	struct userfaultfd_ctx *ctx;
 
 	ctx = vma->vm_userfaultfd_ctx.ctx;
-	if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
+
+	if (!ctx)
+		return;
+
+	if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
 		vm_ctx->ctx = ctx;
 		userfaultfd_ctx_get(ctx);
 		WRITE_ONCE(ctx->mmap_changing, true);
+	} else {
+		/* Drop uffd context if remap feature not enabled */
+		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
 	}
 }
 
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 6fc5425..2652d00 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -243,7 +243,7 @@
 	struct xfs_mount		*mp = bp->b_target->bt_mount;
 	struct xfs_attr_leafblock	*leaf = bp->b_addr;
 	struct xfs_attr_leaf_entry	*entries;
-	uint16_t			end;
+	uint32_t			end;	/* must be 32bit - see below */
 	int				i;
 
 	xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -293,6 +293,11 @@
 	/*
 	 * Quickly check the freemap information.  Attribute data has to be
 	 * aligned to 4-byte boundaries, and likewise for the free space.
+	 *
+	 * Note that for 64k block size filesystems, the freemap entries cannot
+	 * overflow as they are only be16 fields. However, when checking end
+	 * pointer of the freemap, we have to be careful to detect overflows and
+	 * so use uint32_t for those checks.
 	 */
 	for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
 		if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
@@ -303,7 +308,9 @@
 			return __this_address;
 		if (ichdr.freemap[i].size & 0x3)
 			return __this_address;
-		end = ichdr.freemap[i].base + ichdr.freemap[i].size;
+
+		/* be care of 16 bit overflows here */
+		end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
 		if (end < ichdr.freemap[i].base)
 			return __this_address;
 		if (end > mp->m_attr_geo->blksize)
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index a476703..3a496ff 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1683,10 +1683,13 @@
 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
 		/*
 		 * Filling in all of a previously delayed allocation extent.
-		 * The right neighbor is contiguous, the left is not.
+		 * The right neighbor is contiguous, the left is not. Take care
+		 * with delay -> unwritten extent allocation here because the
+		 * delalloc record we are overwriting is always written.
 		 */
 		PREV.br_startblock = new->br_startblock;
 		PREV.br_blockcount += RIGHT.br_blockcount;
+		PREV.br_state = new->br_state;
 
 		xfs_iext_next(ifp, &bma->icur);
 		xfs_iext_remove(bma->ip, &bma->icur, state);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 34c6d7b..bbdae2b 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -330,7 +330,7 @@
 
 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 		if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
-			return __this_address;
+			return false;
 		return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
 	}
 
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 49f5f58..b697866 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -449,6 +449,7 @@
 	}
 
 	wpc->imap = imap;
+	xfs_trim_extent_eof(&wpc->imap, ip);
 	trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
 	return 0;
 allocate_blocks:
@@ -459,6 +460,7 @@
 	ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
 	       imap.br_startoff + imap.br_blockcount <= cow_fsb);
 	wpc->imap = imap;
+	xfs_trim_extent_eof(&wpc->imap, ip);
 	trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
 	return 0;
 }
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 6de8d90..211b06e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1175,9 +1175,9 @@
 	 * page could be mmap'd and iomap_zero_range doesn't do that for us.
 	 * Writeback of the eof page will do this, albeit clumsily.
 	 */
-	if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-				(offset + len) & ~PAGE_MASK, LLONG_MAX);
+				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
 	}
 
 	return error;
@@ -1824,6 +1824,12 @@
 	if (error)
 		goto out_unlock;
 
+	if (xfs_inode_has_cow_data(tip)) {
+		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
+		if (error)
+			return error;
+	}
+
 	/*
 	 * Extent "swapping" with rmap requires a permanent reservation and
 	 * a block reservation because it's really just a remap operation
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 12d8455..010db5f 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1233,9 +1233,23 @@
 }
 
 /*
- * Requeue a failed buffer for writeback
+ * Requeue a failed buffer for writeback.
  *
- * Return true if the buffer has been re-queued properly, false otherwise
+ * We clear the log item failed state here as well, but we have to be careful
+ * about reference counts because the only active reference counts on the buffer
+ * may be the failed log items. Hence if we clear the log item failed state
+ * before queuing the buffer for IO we can release all active references to
+ * the buffer and free it, leading to use after free problems in
+ * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
+ * order we process them in - the buffer is locked, and we own the buffer list
+ * so nothing on them is going to change while we are performing this action.
+ *
+ * Hence we can safely queue the buffer for IO before we clear the failed log
+ * item state, therefore  always having an active reference to the buffer and
+ * avoiding the transient zero-reference state that leads to use-after-free.
+ *
+ * Return true if the buffer was added to the buffer list, false if it was
+ * already on the buffer list.
  */
 bool
 xfs_buf_resubmit_failed_buffers(
@@ -1243,16 +1257,16 @@
 	struct list_head	*buffer_list)
 {
 	struct xfs_log_item	*lip;
+	bool			ret;
+
+	ret = xfs_buf_delwri_queue(bp, buffer_list);
 
 	/*
-	 * Clear XFS_LI_FAILED flag from all items before resubmit
-	 *
-	 * XFS_LI_FAILED set/clear is protected by ail_lock, caller  this
+	 * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
 	 * function already have it acquired
 	 */
 	list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
 		xfs_clear_li_failed(lip);
 
-	/* Add this buffer back to the delayed write list */
-	return xfs_buf_delwri_queue(bp, buffer_list);
+	return ret;
 }
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 0ef5ece..bad9047 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1616,7 +1616,7 @@
 	error = 0;
 out_free_buf:
 	kmem_free(buf);
-	return 0;
+	return error;
 }
 
 struct getfsmap_info {
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 73a1d77..3091e4b 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -40,7 +40,7 @@
 		statp->f_files = limit;
 		statp->f_ffree =
 			(statp->f_files > dqp->q_res_icount) ?
-			 (statp->f_ffree - dqp->q_res_icount) : 0;
+			 (statp->f_files - dqp->q_res_icount) : 0;
 	}
 }
 
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 42ea7ba..7088f44 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -302,6 +302,7 @@
 	if (error)
 		return error;
 
+	xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
 	trace_xfs_reflink_cow_alloc(ip, &got);
 	return 0;
 }
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 4e44231..740ac96 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -119,7 +119,7 @@
 	int j;
 
 	seq_printf(m, "qm");
-	for (j = XFSSTAT_END_IBT_V2; j < XFSSTAT_END_XQMSTAT; j++)
+	for (j = XFSSTAT_END_REFCOUNT; j < XFSSTAT_END_XQMSTAT; j++)
 		seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
 	seq_putc(m, '\n');
 	return 0;
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 89f3b03..e3667c9 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -3,7 +3,7 @@
 #define _4LEVEL_FIXUP_H
 
 #define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 #define PUD_SHIFT			PGDIR_SHIFT
 #define PUD_SIZE			PGDIR_SIZE
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 9c2e070..73474bb 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -3,7 +3,7 @@
 #define _5LEVEL_FIXUP_H
 
 #define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
 
 #define P4D_SHIFT			PGDIR_SHIFT
 #define P4D_SIZE			PGDIR_SIZE
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 0c34215..1d6dd38 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -5,7 +5,7 @@
 #ifndef __ASSEMBLY__
 #include <asm-generic/5level-fixup.h>
 
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 /*
  * Having the pud type consist of a pgd gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 1a29b2a..04cb913 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -4,7 +4,7 @@
 
 #ifndef __ASSEMBLY__
 
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
 
 typedef struct { pgd_t pgd; } p4d_t;
 
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index f35f6e8..b85b827 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -8,7 +8,7 @@
 
 struct mm_struct;
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 /*
  * Having the pmd type consist of a pud gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index e950b9c..9bef475 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -9,7 +9,7 @@
 #else
 #include <asm-generic/pgtable-nop4d.h>
 
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 /*
  * Having the pud type consist of a p4d gets the size right, and allows
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 88ebc61..15fd027 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1127,4 +1127,20 @@
 #endif
 #endif
 
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm)	__is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm)	__is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm)	__is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
 #endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index bfe1639..97fc498 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -47,6 +47,24 @@
 	return false;
 #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
 	return false;
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+	/*
+	 * The DRM driver stack is designed to work with cache coherent devices
+	 * only, but permits an optimization to be enabled in some cases, where
+	 * for some buffers, both the CPU and the GPU use uncached mappings,
+	 * removing the need for DMA snooping and allocation in the CPU caches.
+	 *
+	 * The use of uncached GPU mappings relies on the correct implementation
+	 * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
+	 * will use cached mappings nonetheless. On x86 platforms, this does not
+	 * seem to matter, as uncached CPU mappings will snoop the caches in any
+	 * case. However, on ARM and arm64, enabling this optimization on a
+	 * platform where NoSnoop is ignored results in loss of coherency, which
+	 * breaks correct operation of the device. Since we have no way of
+	 * detecting whether NoSnoop works or not, just disable this
+	 * optimization entirely for ARM and arm64.
+	 */
+	return false;
 #else
 	return true;
 #endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26..a72efa0 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -634,4 +634,12 @@
 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port, bool power_up);
 
+int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+			   struct drm_dp_mst_port *port,
+			   int offset, int size, u8 *bytes);
+
+int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+				 struct drm_dp_mst_port *port,
+				 int offset, int size, u8 *bytes);
+
 #endif
diff --git a/include/dt-bindings/clock/qcom,audio-ext-clk.h b/include/dt-bindings/clock/qcom,audio-ext-clk.h
index f4dbc28..dcdcb1c 100644
--- a/include/dt-bindings/clock/qcom,audio-ext-clk.h
+++ b/include/dt-bindings/clock/qcom,audio-ext-clk.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __AUDIO_EXT_CLK_H
@@ -15,5 +15,7 @@
 #define AUDIO_LPASS_MCLK_5	6
 #define AUDIO_LPASS_MCLK_6	7
 #define AUDIO_LPASS_MCLK_7	8
+#define AUDIO_LPASS_CORE_HW_VOTE	9
+#define AUDIO_LPASS_MCLK_8	10
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-kona.h b/include/dt-bindings/clock/qcom,dispcc-kona.h
index e85f00b..60b8d4a 100644
--- a/include/dt-bindings/clock/qcom,dispcc-kona.h
+++ b/include/dt-bindings/clock/qcom,dispcc-kona.h
@@ -1,98 +1,76 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
 
 #ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_KONA_H
 #define _DT_BINDINGS_CLK_QCOM_DISP_CC_KONA_H
 
-#define DISP_CC_DEBUG_CLK					0
-#define DISP_CC_MDSS_AHB_CLK					1
-#define DISP_CC_MDSS_AHB_CLK_SRC				2
-#define DISP_CC_MDSS_BYTE0_CLK					3
-#define DISP_CC_MDSS_BYTE0_CLK_SRC				4
-#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				5
-#define DISP_CC_MDSS_BYTE0_INTF_CLK				6
-#define DISP_CC_MDSS_BYTE1_CLK					7
-#define DISP_CC_MDSS_BYTE1_CLK_SRC				8
-#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				9
-#define DISP_CC_MDSS_BYTE1_INTF_CLK				10
-#define DISP_CC_MDSS_DP_AUX1_CLK				11
-#define DISP_CC_MDSS_DP_AUX1_CLK_SRC				12
-#define DISP_CC_MDSS_DP_AUX_CLK					13
-#define DISP_CC_MDSS_DP_AUX_CLK_SRC				14
-#define DISP_CC_MDSS_DP_CRYPTO1_CLK				15
-#define DISP_CC_MDSS_DP_CRYPTO1_CLK_SRC				16
-#define DISP_CC_MDSS_DP_CRYPTO_CLK				17
-#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC				18
-#define DISP_CC_MDSS_DP_LINK1_CLK				19
-#define DISP_CC_MDSS_DP_LINK1_CLK_SRC				20
-#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC			21
-#define DISP_CC_MDSS_DP_LINK1_INTF_CLK				22
-#define DISP_CC_MDSS_DP_LINK_CLK				23
-#define DISP_CC_MDSS_DP_LINK_CLK_SRC				24
-#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			25
-#define DISP_CC_MDSS_DP_LINK_INTF_CLK				26
-#define DISP_CC_MDSS_DP_PIXEL1_CLK				27
-#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC				28
-#define DISP_CC_MDSS_DP_PIXEL2_CLK				29
-#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC				30
-#define DISP_CC_MDSS_DP_PIXEL_CLK				31
-#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC				32
-#define DISP_CC_MDSS_EDP_AUX_CLK				33
-#define DISP_CC_MDSS_EDP_AUX_CLK_SRC				34
-#define DISP_CC_MDSS_EDP_GTC_CLK				35
-#define DISP_CC_MDSS_EDP_GTC_CLK_SRC				36
-#define DISP_CC_MDSS_EDP_LINK_CLK				37
-#define DISP_CC_MDSS_EDP_LINK_CLK_SRC				38
-#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC			39
-#define DISP_CC_MDSS_EDP_LINK_INTF_CLK				40
-#define DISP_CC_MDSS_EDP_PIXEL_CLK				41
-#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC				42
-#define DISP_CC_MDSS_ESC0_CLK					43
-#define DISP_CC_MDSS_ESC0_CLK_SRC				44
-#define DISP_CC_MDSS_ESC1_CLK					45
-#define DISP_CC_MDSS_ESC1_CLK_SRC				46
-#define DISP_CC_MDSS_MDP_CLK					47
-#define DISP_CC_MDSS_MDP_CLK_SRC				48
-#define DISP_CC_MDSS_MDP_LUT_CLK				49
-#define DISP_CC_MDSS_NON_GDSC_AHB_CLK				50
-#define DISP_CC_MDSS_PCLK0_CLK					51
-#define DISP_CC_MDSS_PCLK0_CLK_SRC				52
-#define DISP_CC_MDSS_PCLK1_CLK					53
-#define DISP_CC_MDSS_PCLK1_CLK_SRC				54
-#define DISP_CC_MDSS_ROT_CLK					55
-#define DISP_CC_MDSS_ROT_CLK_SRC				56
-#define DISP_CC_MDSS_RSCC_AHB_CLK				57
-#define DISP_CC_MDSS_RSCC_VSYNC_CLK				58
-#define DISP_CC_MDSS_SPDM_DEBUG_CLK				59
-#define DISP_CC_MDSS_SPDM_DP_CRYPTO_CLK				60
-#define DISP_CC_MDSS_SPDM_DP_CRYPTO_DIV_CLK_SRC			61
-#define DISP_CC_MDSS_SPDM_DP_PIXEL1_CLK				62
-#define DISP_CC_MDSS_SPDM_DP_PIXEL1_DIV_CLK_SRC			63
-#define DISP_CC_MDSS_SPDM_DP_PIXEL_CLK				64
-#define DISP_CC_MDSS_SPDM_DP_PIXEL_DIV_CLK_SRC			65
-#define DISP_CC_MDSS_SPDM_MDP_CLK				66
-#define DISP_CC_MDSS_SPDM_MDP_DIV_CLK_SRC			67
-#define DISP_CC_MDSS_SPDM_PCLK0_CLK				68
-#define DISP_CC_MDSS_SPDM_PCLK0_DIV_CLK_SRC			69
-#define DISP_CC_MDSS_SPDM_PCLK1_CLK				70
-#define DISP_CC_MDSS_SPDM_PCLK1_DIV_CLK_SRC			71
-#define DISP_CC_MDSS_SPDM_ROT_CLK				72
-#define DISP_CC_MDSS_SPDM_ROT_DIV_CLK_SRC			73
-#define DISP_CC_MDSS_VSYNC_CLK					74
-#define DISP_CC_MDSS_VSYNC_CLK_SRC				75
-#define DISP_CC_PLL0						76
-#define DISP_CC_PLL1						77
-#define DISP_CC_PLL_TEST_CLK					78
-#define DISP_CC_PLL_TEST_DIV_CLK_SRC				79
-#define DISP_CC_SLEEP_CLK					80
-#define DISP_CC_SLEEP_CLK_SRC					81
-#define DISP_CC_XO_CLK						82
-#define DISP_CC_XO_CLK_SRC					83
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB_CLK					0
+#define DISP_CC_MDSS_AHB_CLK_SRC				1
+#define DISP_CC_MDSS_BYTE0_CLK					2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC				3
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK				5
+#define DISP_CC_MDSS_BYTE1_CLK					6
+#define DISP_CC_MDSS_BYTE1_CLK_SRC				7
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				8
+#define DISP_CC_MDSS_BYTE1_INTF_CLK				9
+#define DISP_CC_MDSS_DP_AUX1_CLK				10
+#define DISP_CC_MDSS_DP_AUX1_CLK_SRC				11
+#define DISP_CC_MDSS_DP_AUX_CLK					12
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC				13
+#define DISP_CC_MDSS_DP_LINK1_CLK				14
+#define DISP_CC_MDSS_DP_LINK1_CLK_SRC				15
+#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC			16
+#define DISP_CC_MDSS_DP_LINK1_INTF_CLK				17
+#define DISP_CC_MDSS_DP_LINK_CLK				18
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC				19
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			20
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK				21
+#define DISP_CC_MDSS_DP_PIXEL1_CLK				22
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC				23
+#define DISP_CC_MDSS_DP_PIXEL2_CLK				24
+#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC				25
+#define DISP_CC_MDSS_DP_PIXEL_CLK				26
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC				27
+#define DISP_CC_MDSS_EDP_AUX_CLK				28
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC				29
+#define DISP_CC_MDSS_EDP_GTC_CLK				30
+#define DISP_CC_MDSS_EDP_GTC_CLK_SRC				31
+#define DISP_CC_MDSS_EDP_LINK_CLK				32
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC				33
+#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC			34
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK				35
+#define DISP_CC_MDSS_EDP_PIXEL_CLK				36
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC				37
+#define DISP_CC_MDSS_ESC0_CLK					38
+#define DISP_CC_MDSS_ESC0_CLK_SRC				39
+#define DISP_CC_MDSS_ESC1_CLK					40
+#define DISP_CC_MDSS_ESC1_CLK_SRC				41
+#define DISP_CC_MDSS_MDP_CLK					42
+#define DISP_CC_MDSS_MDP_CLK_SRC				43
+#define DISP_CC_MDSS_MDP_LUT_CLK				44
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK				45
+#define DISP_CC_MDSS_PCLK0_CLK					46
+#define DISP_CC_MDSS_PCLK0_CLK_SRC				47
+#define DISP_CC_MDSS_PCLK1_CLK					48
+#define DISP_CC_MDSS_PCLK1_CLK_SRC				49
+#define DISP_CC_MDSS_ROT_CLK					50
+#define DISP_CC_MDSS_ROT_CLK_SRC				51
+#define DISP_CC_MDSS_RSCC_AHB_CLK				52
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK				53
+#define DISP_CC_MDSS_VSYNC_CLK					54
+#define DISP_CC_MDSS_VSYNC_CLK_SRC				55
+#define DISP_CC_PLL0						56
+#define DISP_CC_PLL1						57
+#define DISP_CC_SLEEP_CLK					58
+#define DISP_CC_SLEEP_CLK_SRC					59
+#define DISP_CC_XO_CLK						60
 
-#define MDSS_CORE_GDSC						0
-
+/* DISP_CC resets */
 #define DISP_CC_MDSS_CORE_BCR					0
 #define DISP_CC_MDSS_RSCC_BCR					1
-#define DISP_CC_MDSS_SPDM_BCR					2
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-lito.h b/include/dt-bindings/clock/qcom,dispcc-lito.h
index 4a76f14c..de11f03 100644
--- a/include/dt-bindings/clock/qcom,dispcc-lito.h
+++ b/include/dt-bindings/clock/qcom,dispcc-lito.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
 
 #ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_LITO_H
 #define _DT_BINDINGS_CLK_QCOM_DISP_CC_LITO_H
@@ -44,5 +44,10 @@
 #define DISP_CC_MDSS_AHB_CLK					37
 #define DISP_CC_XO_CLK						38
 #define DISP_CC_XO_CLK_SRC					39
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				40
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			41
+#define DISP_CC_SLEEP_CLK_SRC					42
+#define DISP_CC_SLEEP_CLK					43
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				44
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gcc-kona.h b/include/dt-bindings/clock/qcom,gcc-kona.h
index 0f706be..f55639c 100644
--- a/include/dt-bindings/clock/qcom,gcc-kona.h
+++ b/include/dt-bindings/clock/qcom,gcc-kona.h
@@ -52,178 +52,176 @@
 #define GCC_GPU_IREF_EN						40
 #define GCC_GPU_MEMNOC_GFX_CLK					41
 #define GCC_GPU_SNOC_DVM_GFX_CLK				42
-#define GCC_NPU_AT_CLK						43
-#define GCC_NPU_AXI_CLK						44
-#define GCC_NPU_BWMON_AXI_CLK					45
-#define GCC_NPU_BWMON_CFG_AHB_CLK				46
-#define GCC_NPU_CFG_AHB_CLK					47
-#define GCC_NPU_DMA_CLK						48
-#define GCC_NPU_GPLL0_CLK_SRC					49
-#define GCC_NPU_GPLL0_DIV_CLK_SRC				50
-#define GCC_NPU_TRIG_CLK					51
-#define GCC_PCIE0_PHY_REFGEN_CLK				52
-#define GCC_PCIE1_PHY_REFGEN_CLK				53
-#define GCC_PCIE2_PHY_REFGEN_CLK				54
-#define GCC_PCIE_0_AUX_CLK					55
-#define GCC_PCIE_0_AUX_CLK_SRC					56
-#define GCC_PCIE_0_CFG_AHB_CLK					57
-#define GCC_PCIE_0_MSTR_AXI_CLK					58
-#define GCC_PCIE_0_PIPE_CLK					59
-#define GCC_PCIE_0_SLV_AXI_CLK					60
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				61
-#define GCC_PCIE_1_AUX_CLK					62
-#define GCC_PCIE_1_AUX_CLK_SRC					63
-#define GCC_PCIE_1_CFG_AHB_CLK					64
-#define GCC_PCIE_1_MSTR_AXI_CLK					65
-#define GCC_PCIE_1_PIPE_CLK					66
-#define GCC_PCIE_1_SLV_AXI_CLK					67
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				68
-#define GCC_PCIE_2_AUX_CLK					69
-#define GCC_PCIE_2_AUX_CLK_SRC					70
-#define GCC_PCIE_2_CFG_AHB_CLK					71
-#define GCC_PCIE_2_MSTR_AXI_CLK					72
-#define GCC_PCIE_2_PIPE_CLK					73
-#define GCC_PCIE_2_SLV_AXI_CLK					74
-#define GCC_PCIE_2_SLV_Q2A_AXI_CLK				75
-#define GCC_PCIE_MDM_CLKREF_EN					76
-#define GCC_PCIE_PHY_AUX_CLK					77
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC				78
-#define GCC_PCIE_WIFI_CLKREF_EN					79
-#define GCC_PCIE_WIGIG_CLKREF_EN				80
-#define GCC_PDM2_CLK						81
-#define GCC_PDM2_CLK_SRC					82
-#define GCC_PDM_AHB_CLK						83
-#define GCC_PDM_XO4_CLK						84
-#define GCC_PRNG_AHB_CLK					85
-#define GCC_QMIP_CAMERA_NRT_AHB_CLK				86
-#define GCC_QMIP_CAMERA_RT_AHB_CLK				87
-#define GCC_QMIP_DISP_AHB_CLK					88
-#define GCC_QMIP_VIDEO_CVP_AHB_CLK				89
-#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK				90
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK				91
-#define GCC_QUPV3_WRAP0_CORE_CLK				92
-#define GCC_QUPV3_WRAP0_S0_CLK					93
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				94
-#define GCC_QUPV3_WRAP0_S1_CLK					95
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				96
-#define GCC_QUPV3_WRAP0_S2_CLK					97
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				98
-#define GCC_QUPV3_WRAP0_S3_CLK					99
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				100
-#define GCC_QUPV3_WRAP0_S4_CLK					101
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				102
-#define GCC_QUPV3_WRAP0_S5_CLK					103
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				104
-#define GCC_QUPV3_WRAP0_S6_CLK					105
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC				106
-#define GCC_QUPV3_WRAP0_S7_CLK					107
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC				108
-#define GCC_QUPV3_WRAP1_CORE_2X_CLK				109
-#define GCC_QUPV3_WRAP1_CORE_CLK				110
-#define GCC_QUPV3_WRAP1_S0_CLK					111
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC				112
-#define GCC_QUPV3_WRAP1_S1_CLK					113
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC				114
-#define GCC_QUPV3_WRAP1_S2_CLK					115
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC				116
-#define GCC_QUPV3_WRAP1_S3_CLK					117
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC				118
-#define GCC_QUPV3_WRAP1_S4_CLK					119
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC				120
-#define GCC_QUPV3_WRAP1_S5_CLK					121
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC				122
-#define GCC_QUPV3_WRAP2_CORE_2X_CLK				123
-#define GCC_QUPV3_WRAP2_CORE_CLK				124
-#define GCC_QUPV3_WRAP2_S0_CLK					125
-#define GCC_QUPV3_WRAP2_S0_CLK_SRC				126
-#define GCC_QUPV3_WRAP2_S1_CLK					127
-#define GCC_QUPV3_WRAP2_S1_CLK_SRC				128
-#define GCC_QUPV3_WRAP2_S2_CLK					129
-#define GCC_QUPV3_WRAP2_S2_CLK_SRC				130
-#define GCC_QUPV3_WRAP2_S3_CLK					131
-#define GCC_QUPV3_WRAP2_S3_CLK_SRC				132
-#define GCC_QUPV3_WRAP2_S4_CLK					133
-#define GCC_QUPV3_WRAP2_S4_CLK_SRC				134
-#define GCC_QUPV3_WRAP2_S5_CLK					135
-#define GCC_QUPV3_WRAP2_S5_CLK_SRC				136
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				137
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				138
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK				139
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK				140
-#define GCC_QUPV3_WRAP_2_M_AHB_CLK				141
-#define GCC_QUPV3_WRAP_2_S_AHB_CLK				142
-#define GCC_SDCC2_AHB_CLK					143
-#define GCC_SDCC2_APPS_CLK					144
-#define GCC_SDCC2_APPS_CLK_SRC					145
-#define GCC_SDCC4_AHB_CLK					146
-#define GCC_SDCC4_APPS_CLK					147
-#define GCC_SDCC4_APPS_CLK_SRC					148
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				149
-#define GCC_TSIF_AHB_CLK					150
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK				151
-#define GCC_TSIF_REF_CLK					152
-#define GCC_TSIF_REF_CLK_SRC					153
-#define GCC_UFS_1X_CLKREF_EN					154
-#define GCC_UFS_CARD_AHB_CLK					155
-#define GCC_UFS_CARD_AXI_CLK					156
-#define GCC_UFS_CARD_AXI_CLK_SRC				157
-#define GCC_UFS_CARD_AXI_HW_CTL_CLK				158
-#define GCC_UFS_CARD_ICE_CORE_CLK				159
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				160
-#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			161
-#define GCC_UFS_CARD_PHY_AUX_CLK				162
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				163
-#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				164
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				165
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				166
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				167
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK				168
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			169
-#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			170
-#define GCC_UFS_PHY_AHB_CLK					171
-#define GCC_UFS_PHY_AXI_CLK					172
-#define GCC_UFS_PHY_AXI_CLK_SRC					173
-#define GCC_UFS_PHY_AXI_HW_CTL_CLK				174
-#define GCC_UFS_PHY_ICE_CORE_CLK				175
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				176
-#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				177
-#define GCC_UFS_PHY_PHY_AUX_CLK					178
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				179
-#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				180
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				181
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				182
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				183
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				184
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				185
-#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			186
-#define GCC_USB30_PRIM_MASTER_CLK				187
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				188
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				189
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			190
-#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC		191
-#define GCC_USB30_PRIM_SLEEP_CLK				192
-#define GCC_USB30_SEC_MASTER_CLK				193
-#define GCC_USB30_SEC_MASTER_CLK_SRC				194
-#define GCC_USB30_SEC_MOCK_UTMI_CLK				195
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				196
-#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC			197
-#define GCC_USB30_SEC_SLEEP_CLK					198
-#define GCC_USB3_PRIM_PHY_AUX_CLK				199
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				200
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				201
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				202
-#define GCC_USB3_SEC_CLKREF_EN					203
-#define GCC_USB3_SEC_PHY_AUX_CLK				204
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				205
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK				206
-#define GCC_USB3_SEC_PHY_PIPE_CLK				207
-#define GCC_VIDEO_AHB_CLK					208
-#define GCC_VIDEO_AXI0_CLK					209
-#define GCC_VIDEO_AXI1_CLK					210
-#define GCC_VIDEO_XO_CLK					211
-#define GPLL0							212
-#define GPLL0_OUT_EVEN						213
-#define GPLL9							214
+#define GCC_NPU_AXI_CLK						43
+#define GCC_NPU_BWMON_AXI_CLK					44
+#define GCC_NPU_BWMON_CFG_AHB_CLK				45
+#define GCC_NPU_CFG_AHB_CLK					46
+#define GCC_NPU_DMA_CLK						47
+#define GCC_NPU_GPLL0_CLK_SRC					48
+#define GCC_NPU_GPLL0_DIV_CLK_SRC				49
+#define GCC_PCIE0_PHY_REFGEN_CLK				50
+#define GCC_PCIE1_PHY_REFGEN_CLK				51
+#define GCC_PCIE2_PHY_REFGEN_CLK				52
+#define GCC_PCIE_0_AUX_CLK					53
+#define GCC_PCIE_0_AUX_CLK_SRC					54
+#define GCC_PCIE_0_CFG_AHB_CLK					55
+#define GCC_PCIE_0_MSTR_AXI_CLK					56
+#define GCC_PCIE_0_PIPE_CLK					57
+#define GCC_PCIE_0_SLV_AXI_CLK					58
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				59
+#define GCC_PCIE_1_AUX_CLK					60
+#define GCC_PCIE_1_AUX_CLK_SRC					61
+#define GCC_PCIE_1_CFG_AHB_CLK					62
+#define GCC_PCIE_1_MSTR_AXI_CLK					63
+#define GCC_PCIE_1_PIPE_CLK					64
+#define GCC_PCIE_1_SLV_AXI_CLK					65
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				66
+#define GCC_PCIE_2_AUX_CLK					67
+#define GCC_PCIE_2_AUX_CLK_SRC					68
+#define GCC_PCIE_2_CFG_AHB_CLK					69
+#define GCC_PCIE_2_MSTR_AXI_CLK					70
+#define GCC_PCIE_2_PIPE_CLK					71
+#define GCC_PCIE_2_SLV_AXI_CLK					72
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK				73
+#define GCC_PCIE_MDM_CLKREF_EN					74
+#define GCC_PCIE_PHY_AUX_CLK					75
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC				76
+#define GCC_PCIE_WIFI_CLKREF_EN					77
+#define GCC_PCIE_WIGIG_CLKREF_EN				78
+#define GCC_PDM2_CLK						79
+#define GCC_PDM2_CLK_SRC					80
+#define GCC_PDM_AHB_CLK						81
+#define GCC_PDM_XO4_CLK						82
+#define GCC_PRNG_AHB_CLK					83
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK				84
+#define GCC_QMIP_CAMERA_RT_AHB_CLK				85
+#define GCC_QMIP_DISP_AHB_CLK					86
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK				87
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK				88
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK				89
+#define GCC_QUPV3_WRAP0_CORE_CLK				90
+#define GCC_QUPV3_WRAP0_S0_CLK					91
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				92
+#define GCC_QUPV3_WRAP0_S1_CLK					93
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				94
+#define GCC_QUPV3_WRAP0_S2_CLK					95
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				96
+#define GCC_QUPV3_WRAP0_S3_CLK					97
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				98
+#define GCC_QUPV3_WRAP0_S4_CLK					99
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				100
+#define GCC_QUPV3_WRAP0_S5_CLK					101
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				102
+#define GCC_QUPV3_WRAP0_S6_CLK					103
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC				104
+#define GCC_QUPV3_WRAP0_S7_CLK					105
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC				106
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK				107
+#define GCC_QUPV3_WRAP1_CORE_CLK				108
+#define GCC_QUPV3_WRAP1_S0_CLK					109
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				110
+#define GCC_QUPV3_WRAP1_S1_CLK					111
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				112
+#define GCC_QUPV3_WRAP1_S2_CLK					113
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				114
+#define GCC_QUPV3_WRAP1_S3_CLK					115
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				116
+#define GCC_QUPV3_WRAP1_S4_CLK					117
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				118
+#define GCC_QUPV3_WRAP1_S5_CLK					119
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				120
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK				121
+#define GCC_QUPV3_WRAP2_CORE_CLK				122
+#define GCC_QUPV3_WRAP2_S0_CLK					123
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC				124
+#define GCC_QUPV3_WRAP2_S1_CLK					125
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC				126
+#define GCC_QUPV3_WRAP2_S2_CLK					127
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC				128
+#define GCC_QUPV3_WRAP2_S3_CLK					129
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC				130
+#define GCC_QUPV3_WRAP2_S4_CLK					131
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC				132
+#define GCC_QUPV3_WRAP2_S5_CLK					133
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC				134
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				135
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				136
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				137
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				138
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK				139
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK				140
+#define GCC_SDCC2_AHB_CLK					141
+#define GCC_SDCC2_APPS_CLK					142
+#define GCC_SDCC2_APPS_CLK_SRC					143
+#define GCC_SDCC4_AHB_CLK					144
+#define GCC_SDCC4_APPS_CLK					145
+#define GCC_SDCC4_APPS_CLK_SRC					146
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				147
+#define GCC_TSIF_AHB_CLK					148
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK				149
+#define GCC_TSIF_REF_CLK					150
+#define GCC_TSIF_REF_CLK_SRC					151
+#define GCC_UFS_1X_CLKREF_EN					152
+#define GCC_UFS_CARD_AHB_CLK					153
+#define GCC_UFS_CARD_AXI_CLK					154
+#define GCC_UFS_CARD_AXI_CLK_SRC				155
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK				156
+#define GCC_UFS_CARD_ICE_CORE_CLK				157
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				158
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			159
+#define GCC_UFS_CARD_PHY_AUX_CLK				160
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				161
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				162
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				163
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				164
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				165
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK				166
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			167
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			168
+#define GCC_UFS_PHY_AHB_CLK					169
+#define GCC_UFS_PHY_AXI_CLK					170
+#define GCC_UFS_PHY_AXI_CLK_SRC					171
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK				172
+#define GCC_UFS_PHY_ICE_CORE_CLK				173
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				174
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				175
+#define GCC_UFS_PHY_PHY_AUX_CLK					176
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				177
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				178
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				179
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				180
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				181
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				182
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				183
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			184
+#define GCC_USB30_PRIM_MASTER_CLK				185
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				186
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				187
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			188
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC		189
+#define GCC_USB30_PRIM_SLEEP_CLK				190
+#define GCC_USB30_SEC_MASTER_CLK				191
+#define GCC_USB30_SEC_MASTER_CLK_SRC				192
+#define GCC_USB30_SEC_MOCK_UTMI_CLK				193
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				194
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC			195
+#define GCC_USB30_SEC_SLEEP_CLK					196
+#define GCC_USB3_PRIM_PHY_AUX_CLK				197
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				198
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				199
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				200
+#define GCC_USB3_SEC_CLKREF_EN					201
+#define GCC_USB3_SEC_PHY_AUX_CLK				202
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				203
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				204
+#define GCC_USB3_SEC_PHY_PIPE_CLK				205
+#define GCC_VIDEO_AHB_CLK					206
+#define GCC_VIDEO_AXI0_CLK					207
+#define GCC_VIDEO_AXI1_CLK					208
+#define GCC_VIDEO_XO_CLK					209
+#define GPLL0							210
+#define GPLL0_OUT_EVEN						211
+#define GPLL9							212
 
 /* GCC resets */
 #define GCC_DPM_BCR						0
diff --git a/include/dt-bindings/clock/qcom,gcc-lito.h b/include/dt-bindings/clock/qcom,gcc-lito.h
index 25c44e6..a8d86ec 100644
--- a/include/dt-bindings/clock/qcom,gcc-lito.h
+++ b/include/dt-bindings/clock/qcom,gcc-lito.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
 
 #ifndef _DT_BINDINGS_CLK_QCOM_GCC_LITO_H
 #define _DT_BINDINGS_CLK_QCOM_GCC_LITO_H
@@ -153,5 +153,8 @@
 #define GCC_UFS_PHY_BCR						12
 #define GCC_USB30_PRIM_BCR					13
 #define GCC_USB_PHY_CFG_AHB2PHY_BCR				14
+#define GCC_QUSB2PHY_PRIM_BCR					15
+#define GCC_USB3_DP_PHY_PRIM_BCR				16
+#define GCC_USB3_PHY_PRIM_BCR					17
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-lito.h b/include/dt-bindings/clock/qcom,gpucc-lito.h
index 883c55d..e998b25 100644
--- a/include/dt-bindings/clock/qcom,gpucc-lito.h
+++ b/include/dt-bindings/clock/qcom,gpucc-lito.h
@@ -1,29 +1,27 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
 
 #ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_LITO_H
 #define _DT_BINDINGS_CLK_QCOM_GPU_CC_LITO_H
 
-#define GPU_CC_PLL0						0
-#define GPU_CC_PLL0_OUT_EVEN					1
-#define GPU_CC_PLL1						2
-#define GPU_CC_CX_GFX3D_CLK					3
-#define GPU_CC_CX_GFX3D_SLV_CLK					4
-#define GPU_CC_CX_GMU_CLK					5
-#define GPU_CC_CX_SNOC_DVM_CLK					6
-#define GPU_CC_CXO_AON_CLK					7
-#define GPU_CC_CXO_CLK						8
-#define GPU_CC_GMU_CLK_SRC					9
-#define GPU_CC_GX_CXO_CLK					10
-#define GPU_CC_GX_GFX3D_CLK					11
-#define GPU_CC_GX_GFX3D_CLK_SRC					12
-#define GPU_CC_GX_GMU_CLK					13
-#define GPU_CC_GX_VSENSE_CLK					14
-#define GPU_CC_AHB_CLK						15
-#define GPU_CC_CRC_AHB_CLK					16
-#define GPU_CC_CX_APB_CLK					17
-#define GPU_CC_RBCPR_AHB_CLK					18
-#define GPU_CC_RBCPR_CLK					19
-#define GPU_CC_RBCPR_CLK_SRC					20
+#define MEASURE_ONLY_GPU_CC_CX_GFX3D_CLK			0
+#define MEASURE_ONLY_GPU_CC_CX_GFX3D_SLV_CLK			1
+#define MEASURE_ONLY_GPU_CC_GX_GFX3D_CLK			2
+#define GPU_CC_PLL1						3
+#define GPU_CC_CX_GMU_CLK					4
+#define GPU_CC_CX_SNOC_DVM_CLK					5
+#define GPU_CC_CXO_AON_CLK					6
+#define GPU_CC_CXO_CLK						7
+#define GPU_CC_GMU_CLK_SRC					8
+#define GPU_CC_GX_CXO_CLK					9
+#define GPU_CC_GX_GMU_CLK					10
+#define GPU_CC_GX_VSENSE_CLK					11
+#define GPU_CC_AHB_CLK						12
+#define GPU_CC_CRC_AHB_CLK					13
+#define GPU_CC_CX_APB_CLK					14
+#define GPU_CC_RBCPR_AHB_CLK					15
+#define GPU_CC_RBCPR_CLK					16
+#define GPU_CC_RBCPR_CLK_SRC					17
+#define GPU_CC_SLEEP_CLK					18
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index 2b122c1..d6c1dff 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -23,5 +23,7 @@
 #define RPMH_RF_CLKD3_A				15
 #define RPMH_RF_CLKD4				16
 #define RPMH_RF_CLKD4_A				17
+#define RPMH_RF_CLK4				18
+#define RPMH_RF_CLK4_A				19
 
 #endif
diff --git a/include/dt-bindings/phy/qcom,lito-qmp-usb3.h b/include/dt-bindings/phy/qcom,lito-qmp-usb3.h
new file mode 100644
index 0000000..cea50b9
--- /dev/null
+++ b/include/dt-bindings/phy/qcom,lito-qmp-usb3.h
@@ -0,0 +1,727 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_PHY_QCOM_LITO_QMP_USB_H
+#define _DT_BINDINGS_PHY_QCOM_LITO_QMP_USB_H
+
+/* USB3-DP Combo PHY register offsets */
+
+#define USB3_DP_COM_PHY_MODE_CTRL				0x0000
+#define USB3_DP_COM_SW_RESET					0x0004
+#define USB3_DP_COM_POWER_DOWN_CTRL				0x0008
+#define USB3_DP_COM_SWI_CTRL					0x000c
+#define USB3_DP_COM_TYPEC_CTRL					0x0010
+#define USB3_DP_COM_TYPEC_PWRDN_CTRL				0x0014
+#define USB3_DP_COM_DP_BIST_CFG_0				0x0018
+#define USB3_DP_COM_RESET_OVRD_CTRL				0x001c
+#define USB3_DP_COM_DBG_CLK_MUX_CTRL				0x0020
+#define USB3_DP_COM_TYPEC_STATUS				0x0024
+#define USB3_DP_COM_PLACEHOLDER_STATUS				0x0028
+#define USB3_DP_COM_REVISION_ID0				0x002c
+#define USB3_DP_COM_REVISION_ID1				0x0030
+#define USB3_DP_COM_REVISION_ID2				0x0034
+#define USB3_DP_COM_REVISION_ID3				0x0038
+#define USB3_DP_QSERDES_COM_ATB_SEL1				0x1000
+#define USB3_DP_QSERDES_COM_ATB_SEL2				0x1004
+#define USB3_DP_QSERDES_COM_FREQ_UPDATE				0x1008
+#define USB3_DP_QSERDES_COM_BG_TIMER				0x100c
+#define USB3_DP_QSERDES_COM_SSC_EN_CENTER			0x1010
+#define USB3_DP_QSERDES_COM_SSC_ADJ_PER1			0x1014
+#define USB3_DP_QSERDES_COM_SSC_ADJ_PER2			0x1018
+#define USB3_DP_QSERDES_COM_SSC_PER1				0x101c
+#define USB3_DP_QSERDES_COM_SSC_PER2				0x1020
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE1_MODE0		0x1024
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE2_MODE0		0x1028
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE3_MODE0		0x102c
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE1_MODE1		0x1030
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE2_MODE1		0x1034
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE3_MODE1		0x1038
+#define USB3_DP_QSERDES_COM_POST_DIV				0x103c
+#define USB3_DP_QSERDES_COM_POST_DIV_MUX			0x1040
+#define USB3_DP_QSERDES_COM_BIAS_EN_CLKBUFLR_EN			0x1044
+#define USB3_DP_QSERDES_COM_CLK_ENABLE1				0x1048
+#define USB3_DP_QSERDES_COM_SYS_CLK_CTRL			0x104c
+#define USB3_DP_QSERDES_COM_SYSCLK_BUF_ENABLE			0x1050
+#define USB3_DP_QSERDES_COM_PLL_EN				0x1054
+#define USB3_DP_QSERDES_COM_PLL_IVCO				0x1058
+#define USB3_DP_QSERDES_COM_CMN_IETRIM				0x105c
+#define USB3_DP_QSERDES_COM_CMN_IPTRIM				0x1060
+#define USB3_DP_QSERDES_COM_EP_CLOCK_DETECT_CTRL		0x1064
+#define USB3_DP_QSERDES_COM_SYSCLK_DET_COMP_STATUS		0x1068
+#define USB3_DP_QSERDES_COM_CLK_EP_DIV_MODE0			0x106c
+#define USB3_DP_QSERDES_COM_CLK_EP_DIV_MODE1			0x1070
+#define USB3_DP_QSERDES_COM_CP_CTRL_MODE0			0x1074
+#define USB3_DP_QSERDES_COM_CP_CTRL_MODE1			0x1078
+#define USB3_DP_QSERDES_COM_PLL_RCTRL_MODE0			0x107c
+#define USB3_DP_QSERDES_COM_PLL_RCTRL_MODE1			0x1080
+#define USB3_DP_QSERDES_COM_PLL_CCTRL_MODE0			0x1084
+#define USB3_DP_QSERDES_COM_PLL_CCTRL_MODE1			0x1088
+#define USB3_DP_QSERDES_COM_PLL_CNTRL				0x108c
+#define USB3_DP_QSERDES_COM_BIAS_EN_CTRL_BY_PSM			0x1090
+#define USB3_DP_QSERDES_COM_SYSCLK_EN_SEL			0x1094
+#define USB3_DP_QSERDES_COM_CML_SYSCLK_SEL			0x1098
+#define USB3_DP_QSERDES_COM_RESETSM_CNTRL			0x109c
+#define USB3_DP_QSERDES_COM_RESETSM_CNTRL2			0x10a0
+#define USB3_DP_QSERDES_COM_LOCK_CMP_EN				0x10a4
+#define USB3_DP_QSERDES_COM_LOCK_CMP_CFG			0x10a8
+#define USB3_DP_QSERDES_COM_LOCK_CMP1_MODE0			0x10ac
+#define USB3_DP_QSERDES_COM_LOCK_CMP2_MODE0			0x10b0
+#define USB3_DP_QSERDES_COM_LOCK_CMP1_MODE1			0x10b4
+#define USB3_DP_QSERDES_COM_LOCK_CMP2_MODE1			0x10b8
+#define USB3_DP_QSERDES_COM_DEC_START_MODE0			0x10bc
+#define USB3_DP_QSERDES_COM_DEC_START_MSB_MODE0			0x10c0
+#define USB3_DP_QSERDES_COM_DEC_START_MODE1			0x10c4
+#define USB3_DP_QSERDES_COM_DEC_START_MSB_MODE1			0x10c8
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START1_MODE0		0x10cc
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START2_MODE0		0x10d0
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START3_MODE0		0x10d4
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START1_MODE1		0x10d8
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START2_MODE1		0x10dc
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START3_MODE1		0x10e0
+#define USB3_DP_QSERDES_COM_INTEGLOOP_INITVAL			0x10e4
+#define USB3_DP_QSERDES_COM_INTEGLOOP_EN			0x10e8
+#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN0_MODE0		0x10ec
+#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN1_MODE0		0x10f0
+#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN0_MODE1		0x10f4
+#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN1_MODE1		0x10f8
+#define USB3_DP_QSERDES_COM_INTEGLOOP_P_PATH_GAIN0		0x10fc
+#define USB3_DP_QSERDES_COM_INTEGLOOP_P_PATH_GAIN1		0x1100
+#define USB3_DP_QSERDES_COM_VCOCAL_DEADMAN_CTRL			0x1104
+#define USB3_DP_QSERDES_COM_VCO_TUNE_CTRL			0x1108
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MAP			0x110c
+#define USB3_DP_QSERDES_COM_VCO_TUNE1_MODE0			0x1110
+#define USB3_DP_QSERDES_COM_VCO_TUNE2_MODE0			0x1114
+#define USB3_DP_QSERDES_COM_VCO_TUNE1_MODE1			0x1118
+#define USB3_DP_QSERDES_COM_VCO_TUNE2_MODE1			0x111c
+#define USB3_DP_QSERDES_COM_VCO_TUNE_INITVAL1			0x1120
+#define USB3_DP_QSERDES_COM_VCO_TUNE_INITVAL2			0x1124
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MINVAL1			0x1128
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MINVAL2			0x112c
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MAXVAL1			0x1130
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MAXVAL2			0x1134
+#define USB3_DP_QSERDES_COM_VCO_TUNE_TIMER1			0x1138
+#define USB3_DP_QSERDES_COM_VCO_TUNE_TIMER2			0x113c
+#define USB3_DP_QSERDES_COM_CMN_STATUS				0x1140
+#define USB3_DP_QSERDES_COM_RESET_SM_STATUS			0x1144
+#define USB3_DP_QSERDES_COM_RESTRIM_CODE_STATUS			0x1148
+#define USB3_DP_QSERDES_COM_PLLCAL_CODE1_STATUS			0x114c
+#define USB3_DP_QSERDES_COM_PLLCAL_CODE2_STATUS			0x1150
+#define USB3_DP_QSERDES_COM_CLK_SELECT				0x1154
+#define USB3_DP_QSERDES_COM_HSCLK_SEL				0x1158
+#define USB3_DP_QSERDES_COM_HSCLK_HS_SWITCH_SEL			0x115c
+#define USB3_DP_QSERDES_COM_INTEGLOOP_BINCODE_STATUS		0x1160
+#define USB3_DP_QSERDES_COM_PLL_ANALOG				0x1164
+#define USB3_DP_QSERDES_COM_CORECLK_DIV_MODE0			0x1168
+#define USB3_DP_QSERDES_COM_CORECLK_DIV_MODE1			0x116c
+#define USB3_DP_QSERDES_COM_SW_RESET				0x1170
+#define USB3_DP_QSERDES_COM_CORE_CLK_EN				0x1174
+#define USB3_DP_QSERDES_COM_C_READY_STATUS			0x1178
+#define USB3_DP_QSERDES_COM_CMN_CONFIG				0x117c
+#define USB3_DP_QSERDES_COM_CMN_RATE_OVERRIDE			0x1180
+#define USB3_DP_QSERDES_COM_SVS_MODE_CLK_SEL			0x1184
+#define USB3_DP_QSERDES_COM_DEBUG_BUS0				0x1188
+#define USB3_DP_QSERDES_COM_DEBUG_BUS1				0x118c
+#define USB3_DP_QSERDES_COM_DEBUG_BUS2				0x1190
+#define USB3_DP_QSERDES_COM_DEBUG_BUS3				0x1194
+#define USB3_DP_QSERDES_COM_DEBUG_BUS_SEL			0x1198
+#define USB3_DP_QSERDES_COM_CMN_MISC1				0x119c
+#define USB3_DP_QSERDES_COM_CMN_MODE				0x11a0
+#define USB3_DP_QSERDES_COM_CMN_MODE_CONTD			0x11a4
+#define USB3_DP_QSERDES_COM_VCO_DC_LEVEL_CTRL			0x11a8
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0		0x11ac
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0		0x11b0
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE1		0x11b4
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE1		0x11b8
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_HSCLK_SEL		0x11bc
+#define USB3_DP_QSERDES_COM_RESERVED_1				0x11c0
+#define USB3_DP_QSERDES_TXA_BIST_MODE_LANENO			0x1200
+#define USB3_DP_QSERDES_TXA_BIST_INVERT				0x1204
+#define USB3_DP_QSERDES_TXA_CLKBUF_ENABLE			0x1208
+#define USB3_DP_QSERDES_TXA_TX_EMP_POST1_LVL			0x120c
+#define USB3_DP_QSERDES_TXA_TX_IDLE_LVL_LARGE_AMP		0x1210
+#define USB3_DP_QSERDES_TXA_TX_DRV_LVL				0x1214
+#define USB3_DP_QSERDES_TXA_TX_DRV_LVL_OFFSET			0x1218
+#define USB3_DP_QSERDES_TXA_RESET_TSYNC_EN			0x121c
+#define USB3_DP_QSERDES_TXA_PRE_STALL_LDO_BOOST_EN		0x1220
+#define USB3_DP_QSERDES_TXA_TX_BAND				0x1224
+#define USB3_DP_QSERDES_TXA_SLEW_CNTL				0x1228
+#define USB3_DP_QSERDES_TXA_INTERFACE_SELECT			0x122c
+#define USB3_DP_QSERDES_TXA_LPB_EN				0x1230
+#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_TX			0x1234
+#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_RX			0x1238
+#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_OFFSET_TX		0x123c
+#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_OFFSET_RX		0x1240
+#define USB3_DP_QSERDES_TXA_PERL_LENGTH1			0x1244
+#define USB3_DP_QSERDES_TXA_PERL_LENGTH2			0x1248
+#define USB3_DP_QSERDES_TXA_SERDES_BYP_EN_OUT			0x124c
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS_SEL			0x1250
+#define USB3_DP_QSERDES_TXA_TRANSCEIVER_BIAS_EN			0x1254
+#define USB3_DP_QSERDES_TXA_HIGHZ_DRVR_EN			0x1258
+#define USB3_DP_QSERDES_TXA_TX_POL_INV				0x125c
+#define USB3_DP_QSERDES_TXA_PARRATE_REC_DETECT_IDLE_EN		0x1260
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN1			0x1264
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN2			0x1268
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN3			0x126c
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN4			0x1270
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN5			0x1274
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN6			0x1278
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN7			0x127c
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN8			0x1280
+#define USB3_DP_QSERDES_TXA_LANE_MODE_1				0x1284
+#define USB3_DP_QSERDES_TXA_LANE_MODE_2				0x1288
+#define USB3_DP_QSERDES_TXA_LANE_MODE_3				0x128c
+#define USB3_DP_QSERDES_TXA_LANE_MODE_4				0x1290
+#define USB3_DP_QSERDES_TXA_LANE_MODE_5				0x1294
+#define USB3_DP_QSERDES_TXA_ATB_SEL1				0x1298
+#define USB3_DP_QSERDES_TXA_ATB_SEL2				0x129c
+#define USB3_DP_QSERDES_TXA_RCV_DETECT_LVL			0x12a0
+#define USB3_DP_QSERDES_TXA_RCV_DETECT_LVL_2			0x12a4
+#define USB3_DP_QSERDES_TXA_PRBS_SEED1				0x12a8
+#define USB3_DP_QSERDES_TXA_PRBS_SEED2				0x12ac
+#define USB3_DP_QSERDES_TXA_PRBS_SEED3				0x12b0
+#define USB3_DP_QSERDES_TXA_PRBS_SEED4				0x12b4
+#define USB3_DP_QSERDES_TXA_RESET_GEN				0x12b8
+#define USB3_DP_QSERDES_TXA_RESET_GEN_MUXES			0x12bc
+#define USB3_DP_QSERDES_TXA_TRAN_DRVR_EMP_EN			0x12c0
+#define USB3_DP_QSERDES_TXA_TX_INTERFACE_MODE			0x12c4
+#define USB3_DP_QSERDES_TXA_VMODE_CTRL1				0x12c8
+#define USB3_DP_QSERDES_TXA_ALOG_OBSV_BUS_CTRL_1		0x12cc
+#define USB3_DP_QSERDES_TXA_BIST_STATUS				0x12d0
+#define USB3_DP_QSERDES_TXA_BIST_ERROR_COUNT1			0x12d4
+#define USB3_DP_QSERDES_TXA_BIST_ERROR_COUNT2			0x12d8
+#define USB3_DP_QSERDES_TXA_ALOG_OBSV_BUS_STATUS_1		0x12dc
+#define USB3_DP_QSERDES_TXA_LANE_DIG_CONFIG			0x12e0
+#define USB3_DP_QSERDES_TXA_PI_QEC_CTRL				0x12e4
+#define USB3_DP_QSERDES_TXA_PRE_EMPH				0x12e8
+#define USB3_DP_QSERDES_TXA_SW_RESET				0x12ec
+#define USB3_DP_QSERDES_TXA_DCC_OFFSET				0x12f0
+#define USB3_DP_QSERDES_TXA_DIG_BKUP_CTRL			0x12f4
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS0				0x12f8
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS1				0x12fc
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS2				0x1300
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS3				0x1304
+#define USB3_DP_QSERDES_TXA_READ_EQCODE				0x1308
+#define USB3_DP_QSERDES_TXA_READ_OFFSETCODE			0x130c
+#define USB3_DP_QSERDES_TXA_IA_ERROR_COUNTER_LOW		0x1310
+#define USB3_DP_QSERDES_TXA_IA_ERROR_COUNTER_HIGH		0x1314
+#define USB3_DP_QSERDES_TXA_VGA_READ_CODE			0x1318
+#define USB3_DP_QSERDES_TXA_VTH_READ_CODE			0x131c
+#define USB3_DP_QSERDES_TXA_DFE_TAP1_READ_CODE			0x1320
+#define USB3_DP_QSERDES_TXA_DFE_TAP2_READ_CODE			0x1324
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_I			0x1328
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_IBAR			0x132c
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_Q			0x1330
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_QBAR			0x1334
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_A			0x1338
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_ABAR			0x133c
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_SM_ON			0x1340
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_CAL_DONE		0x1344
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_SIGNERROR		0x1348
+#define USB3_DP_QSERDES_TXA_DCC_CAL_STATUS			0x134c
+#define USB3_DP_QSERDES_RXA_UCDR_FO_GAIN_HALF			0x1400
+#define USB3_DP_QSERDES_RXA_UCDR_FO_GAIN_QUARTER		0x1404
+#define USB3_DP_QSERDES_RXA_UCDR_FO_GAIN			0x1408
+#define USB3_DP_QSERDES_RXA_UCDR_SO_GAIN_HALF			0x140c
+#define USB3_DP_QSERDES_RXA_UCDR_SO_GAIN_QUARTER		0x1410
+#define USB3_DP_QSERDES_RXA_UCDR_SO_GAIN			0x1414
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_FO_GAIN_HALF		0x1418
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_FO_GAIN_QUARTER		0x141c
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_FO_GAIN			0x1420
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_SO_GAIN_HALF		0x1424
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_SO_GAIN_QUARTER		0x1428
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_SO_GAIN			0x142c
+#define USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_FO_GAIN		0x1430
+#define USB3_DP_QSERDES_RXA_UCDR_SO_SATURATION_AND_ENABLE	0x1434
+#define USB3_DP_QSERDES_RXA_UCDR_FO_TO_SO_DELAY			0x1438
+#define USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_COUNT_LOW		0x143c
+#define USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_COUNT_HIGH		0x1440
+#define USB3_DP_QSERDES_RXA_UCDR_PI_CONTROLS			0x1444
+#define USB3_DP_QSERDES_RXA_UCDR_PI_CTRL2			0x1448
+#define USB3_DP_QSERDES_RXA_UCDR_SB2_THRESH1			0x144c
+#define USB3_DP_QSERDES_RXA_UCDR_SB2_THRESH2			0x1450
+#define USB3_DP_QSERDES_RXA_UCDR_SB2_GAIN1			0x1454
+#define USB3_DP_QSERDES_RXA_UCDR_SB2_GAIN2			0x1458
+#define USB3_DP_QSERDES_RXA_AUX_CONTROL				0x145c
+#define USB3_DP_QSERDES_RXA_AUX_DATA_TCOARSE_TFINE		0x1460
+#define USB3_DP_QSERDES_RXA_RCLK_AUXDATA_SEL			0x1464
+#define USB3_DP_QSERDES_RXA_AC_JTAG_ENABLE			0x1468
+#define USB3_DP_QSERDES_RXA_AC_JTAG_INITP			0x146c
+#define USB3_DP_QSERDES_RXA_AC_JTAG_INITN			0x1470
+#define USB3_DP_QSERDES_RXA_AC_JTAG_LVL				0x1474
+#define USB3_DP_QSERDES_RXA_AC_JTAG_MODE			0x1478
+#define USB3_DP_QSERDES_RXA_AC_JTAG_RESET			0x147c
+#define USB3_DP_QSERDES_RXA_RX_TERM_BW				0x1480
+#define USB3_DP_QSERDES_RXA_RX_RCVR_IQ_EN			0x1484
+#define USB3_DP_QSERDES_RXA_RX_IDAC_I_DC_OFFSETS		0x1488
+#define USB3_DP_QSERDES_RXA_RX_IDAC_IBAR_DC_OFFSETS		0x148c
+#define USB3_DP_QSERDES_RXA_RX_IDAC_Q_DC_OFFSETS		0x1490
+#define USB3_DP_QSERDES_RXA_RX_IDAC_QBAR_DC_OFFSETS		0x1494
+#define USB3_DP_QSERDES_RXA_RX_IDAC_A_DC_OFFSETS		0x1498
+#define USB3_DP_QSERDES_RXA_RX_IDAC_ABAR_DC_OFFSETS		0x149c
+#define USB3_DP_QSERDES_RXA_RX_IDAC_EN				0x14a0
+#define USB3_DP_QSERDES_RXA_RX_IDAC_ENABLES			0x14a4
+#define USB3_DP_QSERDES_RXA_RX_IDAC_SIGN			0x14a8
+#define USB3_DP_QSERDES_RXA_RX_HIGHZ_HIGHRATE			0x14ac
+#define USB3_DP_QSERDES_RXA_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET	0x14b0
+#define USB3_DP_QSERDES_RXA_DFE_1				0x14b4
+#define USB3_DP_QSERDES_RXA_DFE_2				0x14b8
+#define USB3_DP_QSERDES_RXA_DFE_3				0x14bc
+#define USB3_DP_QSERDES_RXA_DFE_4				0x14c0
+#define USB3_DP_QSERDES_RXA_TX_ADAPT_PRE_THRESH1		0x14c4
+#define USB3_DP_QSERDES_RXA_TX_ADAPT_PRE_THRESH2		0x14c8
+#define USB3_DP_QSERDES_RXA_TX_ADAPT_POST_THRESH		0x14cc
+#define USB3_DP_QSERDES_RXA_TX_ADAPT_MAIN_THRESH		0x14d0
+#define USB3_DP_QSERDES_RXA_VGA_CAL_CNTRL1			0x14d4
+#define USB3_DP_QSERDES_RXA_VGA_CAL_CNTRL2			0x14d8
+#define USB3_DP_QSERDES_RXA_GM_CAL				0x14dc
+#define USB3_DP_QSERDES_RXA_RX_VGA_GAIN2_LSB			0x14e0
+#define USB3_DP_QSERDES_RXA_RX_VGA_GAIN2_MSB			0x14e4
+#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL1		0x14e8
+#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL2		0x14ec
+#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL3		0x14f0
+#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL4		0x14f4
+#define USB3_DP_QSERDES_RXA_RX_IDAC_TSETTLE_LOW			0x14f8
+#define USB3_DP_QSERDES_RXA_RX_IDAC_TSETTLE_HIGH		0x14fc
+#define USB3_DP_QSERDES_RXA_RX_IDAC_MEASURE_TIME		0x1500
+#define USB3_DP_QSERDES_RXA_RX_IDAC_ACCUMULATOR			0x1504
+#define USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_LSB			0x1508
+#define USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_MSB			0x150c
+#define USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_ADAPTOR_CNTRL1		0x1510
+#define USB3_DP_QSERDES_RXA_RX_OFFSET_ADAPTOR_CNTRL2		0x1514
+#define USB3_DP_QSERDES_RXA_SIGDET_ENABLES			0x1518
+#define USB3_DP_QSERDES_RXA_SIGDET_CNTRL			0x151c
+#define USB3_DP_QSERDES_RXA_SIGDET_LVL				0x1520
+#define USB3_DP_QSERDES_RXA_SIGDET_DEGLITCH_CNTRL		0x1524
+#define USB3_DP_QSERDES_RXA_RX_BAND				0x1528
+#define USB3_DP_QSERDES_RXA_CDR_FREEZE_UP_DN			0x152c
+#define USB3_DP_QSERDES_RXA_CDR_RESET_OVERRIDE			0x1530
+#define USB3_DP_QSERDES_RXA_RX_INTERFACE_MODE			0x1534
+#define USB3_DP_QSERDES_RXA_JITTER_GEN_MODE			0x1538
+#define USB3_DP_QSERDES_RXA_SJ_AMP1				0x153c
+#define USB3_DP_QSERDES_RXA_SJ_AMP2				0x1540
+#define USB3_DP_QSERDES_RXA_SJ_PER1				0x1544
+#define USB3_DP_QSERDES_RXA_SJ_PER2				0x1548
+#define USB3_DP_QSERDES_RXA_PPM_OFFSET1				0x154c
+#define USB3_DP_QSERDES_RXA_PPM_OFFSET2				0x1550
+#define USB3_DP_QSERDES_RXA_SIGN_PPM_PERIOD1			0x1554
+#define USB3_DP_QSERDES_RXA_SIGN_PPM_PERIOD2			0x1558
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_LOW			0x155c
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH			0x1560
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH2			0x1564
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH3			0x1568
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH4			0x156c
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_LOW			0x1570
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH			0x1574
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH2			0x1578
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH3			0x157c
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH4			0x1580
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_LOW			0x1584
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH			0x1588
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH2			0x158c
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH3			0x1590
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH4			0x1594
+#define USB3_DP_QSERDES_RXA_PHPRE_CTRL				0x1598
+#define USB3_DP_QSERDES_RXA_PHPRE_INITVAL			0x159c
+#define USB3_DP_QSERDES_RXA_DFE_EN_TIMER			0x15a0
+#define USB3_DP_QSERDES_RXA_DFE_CTLE_POST_CAL_OFFSET		0x15a4
+#define USB3_DP_QSERDES_RXA_DCC_CTRL1				0x15a8
+#define USB3_DP_QSERDES_RXA_DCC_CTRL2				0x15ac
+#define USB3_DP_QSERDES_RXA_VTH_CODE				0x15b0
+#define USB3_DP_QSERDES_RXA_VTH_MIN_THRESH			0x15b4
+#define USB3_DP_QSERDES_RXA_VTH_MAX_THRESH			0x15b8
+#define USB3_DP_QSERDES_RXA_ALOG_OBSV_BUS_CTRL_1		0x15bc
+#define USB3_DP_QSERDES_RXA_PI_CTRL1				0x15c0
+#define USB3_DP_QSERDES_RXA_PI_CTRL2				0x15c4
+#define USB3_DP_QSERDES_RXA_PI_QUAD				0x15c8
+#define USB3_DP_QSERDES_RXA_IDATA1				0x15cc
+#define USB3_DP_QSERDES_RXA_IDATA2				0x15d0
+#define USB3_DP_QSERDES_RXA_AUX_DATA1				0x15d4
+#define USB3_DP_QSERDES_RXA_AUX_DATA2				0x15d8
+#define USB3_DP_QSERDES_RXA_AC_JTAG_OUTP			0x15dc
+#define USB3_DP_QSERDES_RXA_AC_JTAG_OUTN			0x15e0
+#define USB3_DP_QSERDES_RXA_RX_SIGDET				0x15e4
+#define USB3_DP_QSERDES_RXA_ALOG_OBSV_BUS_STATUS_1		0x15e8
+#define USB3_DP_QSERDES_TXB_BIST_MODE_LANENO			0x1600
+#define USB3_DP_QSERDES_TXB_BIST_INVERT				0x1604
+#define USB3_DP_QSERDES_TXB_CLKBUF_ENABLE			0x1608
+#define USB3_DP_QSERDES_TXB_TX_EMP_POST1_LVL			0x160c
+#define USB3_DP_QSERDES_TXB_TX_IDLE_LVL_LARGE_AMP		0x1610
+#define USB3_DP_QSERDES_TXB_TX_DRV_LVL				0x1614
+#define USB3_DP_QSERDES_TXB_TX_DRV_LVL_OFFSET			0x1618
+#define USB3_DP_QSERDES_TXB_RESET_TSYNC_EN			0x161c
+#define USB3_DP_QSERDES_TXB_PRE_STALL_LDO_BOOST_EN		0x1620
+#define USB3_DP_QSERDES_TXB_TX_BAND				0x1624
+#define USB3_DP_QSERDES_TXB_SLEW_CNTL				0x1628
+#define USB3_DP_QSERDES_TXB_INTERFACE_SELECT			0x162c
+#define USB3_DP_QSERDES_TXB_LPB_EN				0x1630
+#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_TX			0x1634
+#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_RX			0x1638
+#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_OFFSET_TX		0x163c
+#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_OFFSET_RX		0x1640
+#define USB3_DP_QSERDES_TXB_PERL_LENGTH1			0x1644
+#define USB3_DP_QSERDES_TXB_PERL_LENGTH2			0x1648
+#define USB3_DP_QSERDES_TXB_SERDES_BYP_EN_OUT			0x164c
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS_SEL			0x1650
+#define USB3_DP_QSERDES_TXB_TRANSCEIVER_BIAS_EN			0x1654
+#define USB3_DP_QSERDES_TXB_HIGHZ_DRVR_EN			0x1658
+#define USB3_DP_QSERDES_TXB_TX_POL_INV				0x165c
+#define USB3_DP_QSERDES_TXB_PARRATE_REC_DETECT_IDLE_EN		0x1660
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN1			0x1664
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN2			0x1668
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN3			0x166c
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN4			0x1670
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN5			0x1674
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN6			0x1678
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN7			0x167c
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN8			0x1680
+#define USB3_DP_QSERDES_TXB_LANE_MODE_1				0x1684
+#define USB3_DP_QSERDES_TXB_LANE_MODE_2				0x1688
+#define USB3_DP_QSERDES_TXB_LANE_MODE_3				0x168c
+#define USB3_DP_QSERDES_TXB_LANE_MODE_4				0x1690
+#define USB3_DP_QSERDES_TXB_LANE_MODE_5				0x1694
+#define USB3_DP_QSERDES_TXB_ATB_SEL1				0x1698
+#define USB3_DP_QSERDES_TXB_ATB_SEL2				0x169c
+#define USB3_DP_QSERDES_TXB_RCV_DETECT_LVL			0x16a0
+#define USB3_DP_QSERDES_TXB_RCV_DETECT_LVL_2			0x16a4
+#define USB3_DP_QSERDES_TXB_PRBS_SEED1				0x16a8
+#define USB3_DP_QSERDES_TXB_PRBS_SEED2				0x16ac
+#define USB3_DP_QSERDES_TXB_PRBS_SEED3				0x16b0
+#define USB3_DP_QSERDES_TXB_PRBS_SEED4				0x16b4
+#define USB3_DP_QSERDES_TXB_RESET_GEN				0x16b8
+#define USB3_DP_QSERDES_TXB_RESET_GEN_MUXES			0x16bc
+#define USB3_DP_QSERDES_TXB_TRAN_DRVR_EMP_EN			0x16c0
+#define USB3_DP_QSERDES_TXB_TX_INTERFACE_MODE			0x16c4
+#define USB3_DP_QSERDES_TXB_VMODE_CTRL1				0x16c8
+#define USB3_DP_QSERDES_TXB_ALOG_OBSV_BUS_CTRL_1		0x16cc
+#define USB3_DP_QSERDES_TXB_BIST_STATUS				0x16d0
+#define USB3_DP_QSERDES_TXB_BIST_ERROR_COUNT1			0x16d4
+#define USB3_DP_QSERDES_TXB_BIST_ERROR_COUNT2			0x16d8
+#define USB3_DP_QSERDES_TXB_ALOG_OBSV_BUS_STATUS_1		0x16dc
+#define USB3_DP_QSERDES_TXB_LANE_DIG_CONFIG			0x16e0
+#define USB3_DP_QSERDES_TXB_PI_QEC_CTRL				0x16e4
+#define USB3_DP_QSERDES_TXB_PRE_EMPH				0x16e8
+#define USB3_DP_QSERDES_TXB_SW_RESET				0x16ec
+#define USB3_DP_QSERDES_TXB_DCC_OFFSET				0x16f0
+#define USB3_DP_QSERDES_TXB_DIG_BKUP_CTRL			0x16f4
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS0				0x16f8
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS1				0x16fc
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS2				0x1700
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS3				0x1704
+#define USB3_DP_QSERDES_TXB_READ_EQCODE				0x1708
+#define USB3_DP_QSERDES_TXB_READ_OFFSETCODE			0x170c
+#define USB3_DP_QSERDES_TXB_IA_ERROR_COUNTER_LOW		0x1710
+#define USB3_DP_QSERDES_TXB_IA_ERROR_COUNTER_HIGH		0x1714
+#define USB3_DP_QSERDES_TXB_VGA_READ_CODE			0x1718
+#define USB3_DP_QSERDES_TXB_VTH_READ_CODE			0x171c
+#define USB3_DP_QSERDES_TXB_DFE_TAP1_READ_CODE			0x1720
+#define USB3_DP_QSERDES_TXB_DFE_TAP2_READ_CODE			0x1724
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_I			0x1728
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_IBAR			0x172c
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_Q			0x1730
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_QBAR			0x1734
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_A			0x1738
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_ABAR			0x173c
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_SM_ON			0x1740
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_CAL_DONE		0x1744
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_SIGNERROR		0x1748
+#define USB3_DP_QSERDES_TXB_DCC_CAL_STATUS			0x174c
+#define USB3_DP_QSERDES_RXB_UCDR_FO_GAIN_HALF			0x1800
+#define USB3_DP_QSERDES_RXB_UCDR_FO_GAIN_QUARTER		0x1804
+#define USB3_DP_QSERDES_RXB_UCDR_FO_GAIN			0x1808
+#define USB3_DP_QSERDES_RXB_UCDR_SO_GAIN_HALF			0x180c
+#define USB3_DP_QSERDES_RXB_UCDR_SO_GAIN_QUARTER		0x1810
+#define USB3_DP_QSERDES_RXB_UCDR_SO_GAIN			0x1814
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_FO_GAIN_HALF		0x1818
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_FO_GAIN_QUARTER		0x181c
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_FO_GAIN			0x1820
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_SO_GAIN_HALF		0x1824
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_SO_GAIN_QUARTER		0x1828
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_SO_GAIN			0x182c
+#define USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_FO_GAIN		0x1830
+#define USB3_DP_QSERDES_RXB_UCDR_SO_SATURATION_AND_ENABLE	0x1834
+#define USB3_DP_QSERDES_RXB_UCDR_FO_TO_SO_DELAY			0x1838
+#define USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_COUNT_LOW		0x183c
+#define USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_COUNT_HIGH		0x1840
+#define USB3_DP_QSERDES_RXB_UCDR_PI_CONTROLS			0x1844
+#define USB3_DP_QSERDES_RXB_UCDR_PI_CTRL2			0x1848
+#define USB3_DP_QSERDES_RXB_UCDR_SB2_THRESH1			0x184c
+#define USB3_DP_QSERDES_RXB_UCDR_SB2_THRESH2			0x1850
+#define USB3_DP_QSERDES_RXB_UCDR_SB2_GAIN1			0x1854
+#define USB3_DP_QSERDES_RXB_UCDR_SB2_GAIN2			0x1858
+#define USB3_DP_QSERDES_RXB_AUX_CONTROL				0x185c
+#define USB3_DP_QSERDES_RXB_AUX_DATA_TCOARSE_TFINE		0x1860
+#define USB3_DP_QSERDES_RXB_RCLK_AUXDATA_SEL			0x1864
+#define USB3_DP_QSERDES_RXB_AC_JTAG_ENABLE			0x1868
+#define USB3_DP_QSERDES_RXB_AC_JTAG_INITP			0x186c
+#define USB3_DP_QSERDES_RXB_AC_JTAG_INITN			0x1870
+#define USB3_DP_QSERDES_RXB_AC_JTAG_LVL				0x1874
+#define USB3_DP_QSERDES_RXB_AC_JTAG_MODE			0x1878
+#define USB3_DP_QSERDES_RXB_AC_JTAG_RESET			0x187c
+#define USB3_DP_QSERDES_RXB_RX_TERM_BW				0x1880
+#define USB3_DP_QSERDES_RXB_RX_RCVR_IQ_EN			0x1884
+#define USB3_DP_QSERDES_RXB_RX_IDAC_I_DC_OFFSETS		0x1888
+#define USB3_DP_QSERDES_RXB_RX_IDAC_IBAR_DC_OFFSETS		0x188c
+#define USB3_DP_QSERDES_RXB_RX_IDAC_Q_DC_OFFSETS		0x1890
+#define USB3_DP_QSERDES_RXB_RX_IDAC_QBAR_DC_OFFSETS		0x1894
+#define USB3_DP_QSERDES_RXB_RX_IDAC_A_DC_OFFSETS		0x1898
+#define USB3_DP_QSERDES_RXB_RX_IDAC_ABAR_DC_OFFSETS		0x189c
+#define USB3_DP_QSERDES_RXB_RX_IDAC_EN				0x18a0
+#define USB3_DP_QSERDES_RXB_RX_IDAC_ENABLES			0x18a4
+#define USB3_DP_QSERDES_RXB_RX_IDAC_SIGN			0x18a8
+#define USB3_DP_QSERDES_RXB_RX_HIGHZ_HIGHRATE			0x18ac
+#define USB3_DP_QSERDES_RXB_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET	0x18b0
+#define USB3_DP_QSERDES_RXB_DFE_1				0x18b4
+#define USB3_DP_QSERDES_RXB_DFE_2				0x18b8
+#define USB3_DP_QSERDES_RXB_DFE_3				0x18bc
+#define USB3_DP_QSERDES_RXB_DFE_4				0x18c0
+#define USB3_DP_QSERDES_RXB_TX_ADAPT_PRE_THRESH1		0x18c4
+#define USB3_DP_QSERDES_RXB_TX_ADAPT_PRE_THRESH2		0x18c8
+#define USB3_DP_QSERDES_RXB_TX_ADAPT_POST_THRESH		0x18cc
+#define USB3_DP_QSERDES_RXB_TX_ADAPT_MAIN_THRESH		0x18d0
+#define USB3_DP_QSERDES_RXB_VGA_CAL_CNTRL1			0x18d4
+#define USB3_DP_QSERDES_RXB_VGA_CAL_CNTRL2			0x18d8
+#define USB3_DP_QSERDES_RXB_GM_CAL				0x18dc
+#define USB3_DP_QSERDES_RXB_RX_VGA_GAIN2_LSB			0x18e0
+#define USB3_DP_QSERDES_RXB_RX_VGA_GAIN2_MSB			0x18e4
+#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL1		0x18e8
+#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL2		0x18ec
+#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL3		0x18f0
+#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL4		0x18f4
+#define USB3_DP_QSERDES_RXB_RX_IDAC_TSETTLE_LOW			0x18f8
+#define USB3_DP_QSERDES_RXB_RX_IDAC_TSETTLE_HIGH		0x18fc
+#define USB3_DP_QSERDES_RXB_RX_IDAC_MEASURE_TIME		0x1900
+#define USB3_DP_QSERDES_RXB_RX_IDAC_ACCUMULATOR			0x1904
+#define USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_LSB			0x1908
+#define USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_MSB			0x190c
+#define USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_ADAPTOR_CNTRL1		0x1910
+#define USB3_DP_QSERDES_RXB_RX_OFFSET_ADAPTOR_CNTRL2		0x1914
+#define USB3_DP_QSERDES_RXB_SIGDET_ENABLES			0x1918
+#define USB3_DP_QSERDES_RXB_SIGDET_CNTRL			0x191c
+#define USB3_DP_QSERDES_RXB_SIGDET_LVL				0x1920
+#define USB3_DP_QSERDES_RXB_SIGDET_DEGLITCH_CNTRL		0x1924
+#define USB3_DP_QSERDES_RXB_RX_BAND				0x1928
+#define USB3_DP_QSERDES_RXB_CDR_FREEZE_UP_DN			0x192c
+#define USB3_DP_QSERDES_RXB_CDR_RESET_OVERRIDE			0x1930
+#define USB3_DP_QSERDES_RXB_RX_INTERFACE_MODE			0x1934
+#define USB3_DP_QSERDES_RXB_JITTER_GEN_MODE			0x1938
+#define USB3_DP_QSERDES_RXB_SJ_AMP1				0x193c
+#define USB3_DP_QSERDES_RXB_SJ_AMP2				0x1940
+#define USB3_DP_QSERDES_RXB_SJ_PER1				0x1944
+#define USB3_DP_QSERDES_RXB_SJ_PER2				0x1948
+#define USB3_DP_QSERDES_RXB_PPM_OFFSET1				0x194c
+#define USB3_DP_QSERDES_RXB_PPM_OFFSET2				0x1950
+#define USB3_DP_QSERDES_RXB_SIGN_PPM_PERIOD1			0x1954
+#define USB3_DP_QSERDES_RXB_SIGN_PPM_PERIOD2			0x1958
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_LOW			0x195c
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH			0x1960
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH2			0x1964
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH3			0x1968
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH4			0x196c
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_LOW			0x1970
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH			0x1974
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH2			0x1978
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH3			0x197c
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH4			0x1980
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_LOW			0x1984
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH			0x1988
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH2			0x198c
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH3			0x1990
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH4			0x1994
+#define USB3_DP_QSERDES_RXB_PHPRE_CTRL				0x1998
+#define USB3_DP_QSERDES_RXB_PHPRE_INITVAL			0x199c
+#define USB3_DP_QSERDES_RXB_DFE_EN_TIMER			0x19a0
+#define USB3_DP_QSERDES_RXB_DFE_CTLE_POST_CAL_OFFSET		0x19a4
+#define USB3_DP_QSERDES_RXB_DCC_CTRL1				0x19a8
+#define USB3_DP_QSERDES_RXB_DCC_CTRL2				0x19ac
+#define USB3_DP_QSERDES_RXB_VTH_CODE				0x19b0
+#define USB3_DP_QSERDES_RXB_VTH_MIN_THRESH			0x19b4
+#define USB3_DP_QSERDES_RXB_VTH_MAX_THRESH			0x19b8
+#define USB3_DP_QSERDES_RXB_ALOG_OBSV_BUS_CTRL_1		0x19bc
+#define USB3_DP_QSERDES_RXB_PI_CTRL1				0x19c0
+#define USB3_DP_QSERDES_RXB_PI_CTRL2				0x19c4
+#define USB3_DP_QSERDES_RXB_PI_QUAD				0x19c8
+#define USB3_DP_QSERDES_RXB_IDATA1				0x19cc
+#define USB3_DP_QSERDES_RXB_IDATA2				0x19d0
+#define USB3_DP_QSERDES_RXB_AUX_DATA1				0x19d4
+#define USB3_DP_QSERDES_RXB_AUX_DATA2				0x19d8
+#define USB3_DP_QSERDES_RXB_AC_JTAG_OUTP			0x19dc
+#define USB3_DP_QSERDES_RXB_AC_JTAG_OUTN			0x19e0
+#define USB3_DP_QSERDES_RXB_RX_SIGDET				0x19e4
+#define USB3_DP_QSERDES_RXB_ALOG_OBSV_BUS_STATUS_1		0x19e8
+#define USB3_DP_PCS_MISC_TYPEC_CTRL				0x1a00
+#define USB3_DP_PCS_MISC_TYPEC_PWRDN_CTRL			0x1a04
+#define USB3_DP_PCS_MISC_PCS_MISC_CONFIG1			0x1a08
+#define USB3_DP_PCS_MISC_CLAMP_ENABLE				0x1a0c
+#define USB3_DP_PCS_MISC_TYPEC_STATUS				0x1a10
+#define USB3_DP_PCS_MISC_PLACEHOLDER_STATUS			0x1a14
+#define USB3_DP_PCS_LN_PCS_STATUS1				0x1b00
+#define USB3_DP_PCS_LN_PCS_STATUS2				0x1b04
+#define USB3_DP_PCS_LN_PCS_STATUS2_CLEAR			0x1b08
+#define USB3_DP_PCS_LN_PCS_STATUS3				0x1b0c
+#define USB3_DP_PCS_LN_BIST_CHK_ERR_CNT_L_STATUS		0x1b10
+#define USB3_DP_PCS_LN_BIST_CHK_ERR_CNT_H_STATUS		0x1b14
+#define USB3_DP_PCS_LN_BIST_CHK_STATUS				0x1b18
+#define USB3_DP_PCS_LN_INSIG_SW_CTRL1				0x1b1c
+#define USB3_DP_PCS_LN_INSIG_MX_CTRL1				0x1b20
+#define USB3_DP_PCS_LN_OUTSIG_SW_CTRL1				0x1b24
+#define USB3_DP_PCS_LN_OUTSIG_MX_CTRL1				0x1b28
+#define USB3_DP_PCS_LN_TEST_CONTROL1				0x1b2c
+#define USB3_DP_PCS_LN_BIST_CTRL				0x1b30
+#define USB3_DP_PCS_LN_PRBS_SEED0				0x1b34
+#define USB3_DP_PCS_LN_PRBS_SEED1				0x1b38
+#define USB3_DP_PCS_LN_FIXED_PAT_CTRL				0x1b3c
+#define USB3_DP_PCS_LN_EQ_CONFIG				0x1b40
+#define USB3_DP_PCS_LN_TEST_CONTROL2				0x1b44
+#define USB3_DP_PCS_LN_TEST_CONTROL3				0x1b48
+#define USB3_DP_PCS_SW_RESET					0x1c00
+#define USB3_DP_PCS_REVISION_ID0				0x1c04
+#define USB3_DP_PCS_REVISION_ID1				0x1c08
+#define USB3_DP_PCS_REVISION_ID2				0x1c0c
+#define USB3_DP_PCS_REVISION_ID3				0x1c10
+#define USB3_DP_PCS_PCS_STATUS1					0x1c14
+#define USB3_DP_PCS_PCS_STATUS2					0x1c18
+#define USB3_DP_PCS_PCS_STATUS3					0x1c1c
+#define USB3_DP_PCS_PCS_STATUS4					0x1c20
+#define USB3_DP_PCS_PCS_STATUS5					0x1c24
+#define USB3_DP_PCS_PCS_STATUS6					0x1c28
+#define USB3_DP_PCS_PCS_STATUS7					0x1c2c
+#define USB3_DP_PCS_DEBUG_BUS_0_STATUS				0x1c30
+#define USB3_DP_PCS_DEBUG_BUS_1_STATUS				0x1c34
+#define USB3_DP_PCS_DEBUG_BUS_2_STATUS				0x1c38
+#define USB3_DP_PCS_DEBUG_BUS_3_STATUS				0x1c3c
+#define USB3_DP_PCS_POWER_DOWN_CONTROL				0x1c40
+#define USB3_DP_PCS_START_CONTROL				0x1c44
+#define USB3_DP_PCS_INSIG_SW_CTRL1				0x1c48
+#define USB3_DP_PCS_INSIG_SW_CTRL2				0x1c4c
+#define USB3_DP_PCS_INSIG_SW_CTRL3				0x1c50
+#define USB3_DP_PCS_INSIG_SW_CTRL4				0x1c54
+#define USB3_DP_PCS_INSIG_SW_CTRL5				0x1c58
+#define USB3_DP_PCS_INSIG_SW_CTRL6				0x1c5c
+#define USB3_DP_PCS_INSIG_SW_CTRL7				0x1c60
+#define USB3_DP_PCS_INSIG_SW_CTRL8				0x1c64
+#define USB3_DP_PCS_INSIG_MX_CTRL1				0x1c68
+#define USB3_DP_PCS_INSIG_MX_CTRL2				0x1c6c
+#define USB3_DP_PCS_INSIG_MX_CTRL3				0x1c70
+#define USB3_DP_PCS_INSIG_MX_CTRL4				0x1c74
+#define USB3_DP_PCS_INSIG_MX_CTRL5				0x1c78
+#define USB3_DP_PCS_INSIG_MX_CTRL7				0x1c7c
+#define USB3_DP_PCS_INSIG_MX_CTRL8				0x1c80
+#define USB3_DP_PCS_OUTSIG_SW_CTRL1				0x1c84
+#define USB3_DP_PCS_OUTSIG_MX_CTRL1				0x1c88
+#define USB3_DP_PCS_CLAMP_ENABLE				0x1c8c
+#define USB3_DP_PCS_POWER_STATE_CONFIG1				0x1c90
+#define USB3_DP_PCS_POWER_STATE_CONFIG2				0x1c94
+#define USB3_DP_PCS_FLL_CNTRL1					0x1c98
+#define USB3_DP_PCS_FLL_CNTRL2					0x1c9c
+#define USB3_DP_PCS_FLL_CNT_VAL_L				0x1ca0
+#define USB3_DP_PCS_FLL_CNT_VAL_H_TOL				0x1ca4
+#define USB3_DP_PCS_FLL_MAN_CODE				0x1ca8
+#define USB3_DP_PCS_TEST_CONTROL1				0x1cac
+#define USB3_DP_PCS_TEST_CONTROL2				0x1cb0
+#define USB3_DP_PCS_TEST_CONTROL3				0x1cb4
+#define USB3_DP_PCS_TEST_CONTROL4				0x1cb8
+#define USB3_DP_PCS_TEST_CONTROL5				0x1cbc
+#define USB3_DP_PCS_TEST_CONTROL6				0x1cc0
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG1				0x1cc4
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG2				0x1cc8
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG3				0x1ccc
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG4				0x1cd0
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG5				0x1cd4
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG6				0x1cd8
+#define USB3_DP_PCS_REFGEN_REQ_CONFIG1				0x1cdc
+#define USB3_DP_PCS_REFGEN_REQ_CONFIG2				0x1ce0
+#define USB3_DP_PCS_REFGEN_REQ_CONFIG3				0x1ce4
+#define USB3_DP_PCS_BIST_CTRL					0x1ce8
+#define USB3_DP_PCS_PRBS_POLY0					0x1cec
+#define USB3_DP_PCS_PRBS_POLY1					0x1cf0
+#define USB3_DP_PCS_FIXED_PAT0					0x1cf4
+#define USB3_DP_PCS_FIXED_PAT1					0x1cf8
+#define USB3_DP_PCS_FIXED_PAT2					0x1cfc
+#define USB3_DP_PCS_FIXED_PAT3					0x1d00
+#define USB3_DP_PCS_FIXED_PAT4					0x1d04
+#define USB3_DP_PCS_FIXED_PAT5					0x1d08
+#define USB3_DP_PCS_FIXED_PAT6					0x1d0c
+#define USB3_DP_PCS_FIXED_PAT7					0x1d10
+#define USB3_DP_PCS_FIXED_PAT8					0x1d14
+#define USB3_DP_PCS_FIXED_PAT9					0x1d18
+#define USB3_DP_PCS_FIXED_PAT10					0x1d1c
+#define USB3_DP_PCS_FIXED_PAT11					0x1d20
+#define USB3_DP_PCS_FIXED_PAT12					0x1d24
+#define USB3_DP_PCS_FIXED_PAT13					0x1d28
+#define USB3_DP_PCS_FIXED_PAT14					0x1d2c
+#define USB3_DP_PCS_FIXED_PAT15					0x1d30
+#define USB3_DP_PCS_TXMGN_CONFIG				0x1d34
+#define USB3_DP_PCS_G12S1_TXMGN_V0				0x1d38
+#define USB3_DP_PCS_G12S1_TXMGN_V1				0x1d3c
+#define USB3_DP_PCS_G12S1_TXMGN_V2				0x1d40
+#define USB3_DP_PCS_G12S1_TXMGN_V3				0x1d44
+#define USB3_DP_PCS_G12S1_TXMGN_V4				0x1d48
+#define USB3_DP_PCS_G12S1_TXMGN_V0_RS				0x1d4c
+#define USB3_DP_PCS_G12S1_TXMGN_V1_RS				0x1d50
+#define USB3_DP_PCS_G12S1_TXMGN_V2_RS				0x1d54
+#define USB3_DP_PCS_G12S1_TXMGN_V3_RS				0x1d58
+#define USB3_DP_PCS_G12S1_TXMGN_V4_RS				0x1d5c
+#define USB3_DP_PCS_G3S2_TXMGN_MAIN				0x1d60
+#define USB3_DP_PCS_G3S2_TXMGN_MAIN_RS				0x1d64
+#define USB3_DP_PCS_G12S1_TXDEEMPH_M6DB				0x1d68
+#define USB3_DP_PCS_G12S1_TXDEEMPH_M3P5DB			0x1d6c
+#define USB3_DP_PCS_G3S2_PRE_GAIN				0x1d70
+#define USB3_DP_PCS_G3S2_POST_GAIN				0x1d74
+#define USB3_DP_PCS_G3S2_PRE_POST_OFFSET			0x1d78
+#define USB3_DP_PCS_G3S2_PRE_GAIN_RS				0x1d7c
+#define USB3_DP_PCS_G3S2_POST_GAIN_RS				0x1d80
+#define USB3_DP_PCS_G3S2_PRE_POST_OFFSET_RS			0x1d84
+#define USB3_DP_PCS_RX_SIGDET_LVL				0x1d88
+#define USB3_DP_PCS_RX_SIGDET_DTCT_CNTRL			0x1d8c
+#define USB3_DP_PCS_RCVR_DTCT_DLY_P1U2_L			0x1d90
+#define USB3_DP_PCS_RCVR_DTCT_DLY_P1U2_H			0x1d94
+#define USB3_DP_PCS_RATE_SLEW_CNTRL1				0x1d98
+#define USB3_DP_PCS_RATE_SLEW_CNTRL2				0x1d9c
+#define USB3_DP_PCS_PWRUP_RESET_DLY_TIME_AUXCLK			0x1da0
+#define USB3_DP_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L		0x1da4
+#define USB3_DP_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H		0x1da8
+#define USB3_DP_PCS_TSYNC_RSYNC_TIME				0x1dac
+#define USB3_DP_PCS_CDR_RESET_TIME				0x1db0
+#define USB3_DP_PCS_TSYNC_DLY_TIME				0x1db4
+#define USB3_DP_PCS_ELECIDLE_DLY_SEL				0x1db8
+#define USB3_DP_PCS_CMN_ACK_OUT_SEL				0x1dbc
+#define USB3_DP_PCS_ALIGN_DETECT_CONFIG1			0x1dc0
+#define USB3_DP_PCS_ALIGN_DETECT_CONFIG2			0x1dc4
+#define USB3_DP_PCS_ALIGN_DETECT_CONFIG3			0x1dc8
+#define USB3_DP_PCS_ALIGN_DETECT_CONFIG4			0x1dcc
+#define USB3_DP_PCS_PCS_TX_RX_CONFIG				0x1dd0
+#define USB3_DP_PCS_RX_IDLE_DTCT_CNTRL				0x1dd4
+#define USB3_DP_PCS_RX_DCC_CAL_CONFIG				0x1dd8
+#define USB3_DP_PCS_EQ_CONFIG1					0x1ddc
+#define USB3_DP_PCS_EQ_CONFIG2					0x1de0
+#define USB3_DP_PCS_EQ_CONFIG3					0x1de4
+#define USB3_DP_PCS_EQ_CONFIG4					0x1de8
+#define USB3_DP_PCS_EQ_CONFIG5					0x1dec
+#define USB3_DP_PCS_USB3_POWER_STATE_CONFIG1			0x1f00
+#define USB3_DP_PCS_USB3_AUTONOMOUS_MODE_STATUS			0x1f04
+#define USB3_DP_PCS_USB3_AUTONOMOUS_MODE_CTRL			0x1f08
+#define USB3_DP_PCS_USB3_AUTONOMOUS_MODE_CTRL2			0x1f0c
+#define USB3_DP_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS		0x1f10
+#define USB3_DP_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR			0x1f14
+#define USB3_DP_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL		0x1f18
+#define USB3_DP_PCS_USB3_LFPS_TX_ECSTART			0x1f1c
+#define USB3_DP_PCS_USB3_LFPS_PER_TIMER_VAL			0x1f20
+#define USB3_DP_PCS_USB3_LFPS_TX_END_CNT_U3_START		0x1f24
+#define USB3_DP_PCS_USB3_RXEQTRAINING_LOCK_TIME			0x1f28
+#define USB3_DP_PCS_USB3_RXEQTRAINING_WAIT_TIME			0x1f2c
+#define USB3_DP_PCS_USB3_RXEQTRAINING_CTLE_TIME			0x1f30
+#define USB3_DP_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2		0x1f34
+#define USB3_DP_PCS_USB3_RXEQTRAINING_DFE_TIME_S2		0x1f38
+#define USB3_DP_PCS_USB3_RCVR_DTCT_DLY_U3_L			0x1f3c
+#define USB3_DP_PCS_USB3_RCVR_DTCT_DLY_U3_H			0x1f40
+#define USB3_DP_PCS_USB3_ARCVR_DTCT_EN_PERIOD			0x1f44
+#define USB3_DP_PCS_USB3_ARCVR_DTCT_CM_DLY			0x1f48
+#define USB3_DP_PCS_USB3_TXONESZEROS_RUN_LENGTH			0x1f4c
+#define USB3_DP_PCS_USB3_ALFPS_DEGLITCH_VAL			0x1f50
+#define USB3_DP_PCS_USB3_SIGDET_STARTUP_TIMER_VAL		0x1f54
+#define USB3_DP_PCS_USB3_TEST_CONTROL				0x1f58
+#define USB3_DP_PCS_USB3_RXTERMINATION_DLY_SEL			0x1f5c
+
+#endif /* _DT_BINDINGS_PHY_QCOM_LITO_QMP_USB_H */
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h b/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h
index 65c9644..086c4de 100644
--- a/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h
@@ -12,6 +12,7 @@
 #define RPMH_REGULATOR_LEVEL_LOW_SVS	64
 #define RPMH_REGULATOR_LEVEL_SVS	128
 #define RPMH_REGULATOR_LEVEL_SVS_L1	192
+#define RPMH_REGULATOR_LEVEL_SVS_L2	224
 #define RPMH_REGULATOR_LEVEL_NOM	256
 #define RPMH_REGULATOR_LEVEL_NOM_L1	320
 #define RPMH_REGULATOR_LEVEL_NOM_L2	336
diff --git a/include/dt-bindings/sound/qcom,bolero-clk-rsc.h b/include/dt-bindings/sound/qcom,bolero-clk-rsc.h
new file mode 100644
index 0000000..038c066
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,bolero-clk-rsc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __BOLERO_CODEC_CLK_RSC_H
+#define __BOLERO_CODEC_CLK_RSC_H
+
+/* Bolero clock types */
+#define TX_CORE_CLK	0
+#define RX_CORE_CLK	1
+#define WSA_CORE_CLK	2
+#define VA_CORE_CLK	3
+#define TX_NPL_CLK	4
+#define RX_NPL_CLK	5
+#define WSA_NPL_CLK	6
+#define VA_NPL_CLK	7
+#define MAX_CLK	8
+
+#endif /* __BOLERO_CODEC_CLK_RSC_H */
diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h
index b5e6b00..edd2a15 100644
--- a/include/dt-bindings/thermal/thermal.h
+++ b/include/dt-bindings/thermal/thermal.h
@@ -12,6 +12,7 @@
 
 /* On cooling devices upper and lower limits */
 #define THERMAL_NO_LIMIT		(~0)
+#define THERMAL_MAX_LIMIT		(THERMAL_NO_LIMIT - 1)
 
 #endif
 
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index e098cbe..12babe9 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -31,7 +31,7 @@
 struct user_key_payload {
 	struct rcu_head	rcu;		/* RCU destructor */
 	unsigned short	datalen;	/* length of this data */
-	char		data[0];	/* actual data */
+	char		data[0] __aligned(__alignof__(u64)); /* actual data */
 };
 
 extern struct key_type key_type_user;
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 9a6bc09..07e02d6 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -190,6 +190,7 @@
 	struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
 	struct rb_root cgwb_congested_tree; /* their congested states */
 	struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
+	struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
 #else
 	struct bdi_writeback_congested *wb_congested;
 #endif
@@ -258,6 +259,14 @@
  */
 static inline void wb_put(struct bdi_writeback *wb)
 {
+	if (WARN_ON_ONCE(!wb->bdi)) {
+		/*
+		 * A driver bug might cause a file to be removed before bdi was
+		 * initialized.
+		 */
+		return;
+	}
+
 	if (wb != &wb->bdi->wb)
 		percpu_ref_put(&wb->refcnt);
 }
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 7cca5f8..f3c4351 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -6,6 +6,7 @@
 
 struct bcma_soc {
 	struct bcma_bus bus;
+	struct device *dev;
 };
 
 int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5137174..58f02ec 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -73,6 +73,9 @@
 
 #define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
 #define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
+#define bio_dun(bio)		((bio)->bi_iter.bi_dun)
+#define bio_duns(bio)		(bio_sectors(bio) >> 3) /* 4KB unit */
+#define bio_end_dun(bio)	(bio_dun(bio) + bio_duns(bio))
 
 /*
  * Return the data direction, READ or WRITE.
@@ -170,6 +173,11 @@
 {
 	iter->bi_sector += bytes >> 9;
 
+#ifdef CONFIG_PFK
+	if (iter->bi_dun)
+		iter->bi_dun += bytes >> 12;
+#endif
+
 	if (bio_no_advance_iter(bio)) {
 		iter->bi_size -= bytes;
 		iter->bi_done += bytes;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d178dce..d300296 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -187,6 +187,13 @@
 		struct bio_integrity_payload *bi_integrity; /* data integrity */
 #endif
 	};
+#ifdef CONFIG_PFK
+	/* Encryption key to use (NULL if none) */
+	const struct blk_encryption_key	*bi_crypt_key;
+#endif
+#ifdef CONFIG_DM_DEFAULT_KEY
+	int bi_crypt_skip;
+#endif
 
 	unsigned short		bi_vcnt;	/* how many bio_vec's */
 
@@ -201,7 +208,9 @@
 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
 
 	struct bio_set		*bi_pool;
-
+#ifdef CONFIG_PFK
+	struct inode		*bi_dio_inode;
+#endif
 	/*
 	 * We can inline a number of vecs at the end of the bio, to avoid
 	 * double allocations for a small number of bio_vecs. This member
@@ -324,6 +333,11 @@
 
 	__REQ_SORTED = __REQ_RAHEAD, /* elevator knows about this request */
 	__REQ_URGENT,		/* urgent request */
+	/* Android specific flags */
+	__REQ_NOENCRYPT,	/*
+				 * ok to not encrypt (already encrypted at fs
+				 * level)
+				 */
 	/* command specific flags for REQ_OP_WRITE_ZEROES: */
 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
 
@@ -348,6 +362,7 @@
 #define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
 #define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
 #define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
+#define REQ_NOENCRYPT		(1ULL << __REQ_NOENCRYPT)
 
 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6980014..1b4dbe94 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -161,6 +161,7 @@
 	unsigned int __data_len;	/* total data len */
 	int tag;
 	sector_t __sector;		/* sector cursor */
+	u64 __dun;			/* dun for UFS */
 
 	struct bio *bio;
 	struct bio *biotail;
@@ -699,6 +700,7 @@
 #define QUEUE_FLAG_SCSI_PASSTHROUGH 27	/* queue supports SCSI commands */
 #define QUEUE_FLAG_QUIESCED    28	/* queue has been quiesced */
 #define QUEUE_FLAG_PREEMPT_ONLY	29	/* only process REQ_PREEMPT requests */
+#define QUEUE_FLAG_INLINECRYPT 30	/* inline encryption support */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
@@ -731,6 +733,8 @@
 #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
 #define blk_queue_scsi_passthrough(q)	\
 	test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
+#define blk_queue_inlinecrypt(q) \
+	test_bit(QUEUE_FLAG_INLINECRYPT, &(q)->queue_flags)
 
 #define blk_noretry_request(rq) \
 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -878,6 +882,15 @@
 	return q->nr_requests;
 }
 
+static inline void queue_flag_set_unlocked(unsigned int flag,
+					   struct request_queue *q)
+{
+	if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
+	    kref_read(&q->kobj.kref))
+		lockdep_assert_held(q->queue_lock);
+	__set_bit(flag, &q->queue_flags);
+}
+
 /*
  * q->prep_rq_fn return values
  */
@@ -1043,6 +1056,11 @@
 	return rq->__sector;
 }
 
+static inline sector_t blk_rq_dun(const struct request *rq)
+{
+	return rq->__dun;
+}
+
 static inline unsigned int blk_rq_bytes(const struct request *rq)
 {
 	return rq->__data_len;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1fd6fa8..9139372 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -134,6 +134,7 @@
 	struct bpf_func_state *frame[MAX_CALL_FRAMES];
 	struct bpf_verifier_state *parent;
 	u32 curframe;
+	bool speculative;
 };
 
 /* linked list of verifier states used to prune search */
@@ -142,15 +143,25 @@
 	struct bpf_verifier_state_list *next;
 };
 
+/* Possible states for alu_state member. */
+#define BPF_ALU_SANITIZE_SRC		1U
+#define BPF_ALU_SANITIZE_DST		2U
+#define BPF_ALU_NEG_VALUE		(1U << 2)
+#define BPF_ALU_NON_POINTER		(1U << 3)
+#define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
+					 BPF_ALU_SANITIZE_DST)
+
 struct bpf_insn_aux_data {
 	union {
 		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
 		unsigned long map_state;	/* pointer/poison value for maps */
 		s32 call_imm;			/* saved imm field of call insn */
+		u32 alu_limit;			/* limit for add/sub register with pointer */
 	};
 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
 	int sanitize_stack_off; /* stack slot to be cleared */
 	bool seen; /* this insn was processed by the verifier */
+	u8 alu_state; /* used in combination with alu_limit */
 };
 
 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -186,6 +197,8 @@
  * one verifier_env per bpf_check() call
  */
 struct bpf_verifier_env {
+	u32 insn_idx;
+	u32 prev_insn_idx;
 	struct bpf_prog *prog;		/* eBPF program being verified */
 	const struct bpf_verifier_ops *ops;
 	struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index fe7a22d..543bb5f 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -44,6 +44,7 @@
 
 	unsigned int            bi_bvec_done;	/* number of bytes completed in
 						   current bvec */
+	u64			bi_dun;		/* DUN setting for bio */
 };
 
 /*
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 22254c1..6f9ea86 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -20,6 +20,7 @@
 #include <linux/u64_stats_sync.h>
 #include <linux/workqueue.h>
 #include <linux/bpf-cgroup.h>
+#include <linux/psi_types.h>
 
 #ifdef CONFIG_CGROUPS
 
@@ -31,6 +32,7 @@
 struct kernfs_ops;
 struct kernfs_open_file;
 struct seq_file;
+struct poll_table_struct;
 
 #define MAX_CGROUP_TYPE_NAMELEN 32
 #define MAX_CGROUP_ROOT_NAMELEN 64
@@ -436,6 +438,9 @@
 	/* used to schedule release agent */
 	struct work_struct release_agent_work;
 
+	/* used to track pressure stalls */
+	struct psi_group psi;
+
 	/* used to store eBPF programs */
 	struct cgroup_bpf bpf;
 
@@ -569,6 +574,9 @@
 	ssize_t (*write)(struct kernfs_open_file *of,
 			 char *buf, size_t nbytes, loff_t off);
 
+	__poll_t (*poll)(struct kernfs_open_file *of,
+			 struct poll_table_struct *pt);
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lock_class_key	lockdep_key;
 #endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 32c5535..6399b32 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -657,6 +657,11 @@
 	pr_cont_kernfs_path(cgrp->kn);
 }
 
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+	return &cgrp->psi;
+}
+
 static inline void cgroup_init_kthreadd(void)
 {
 	/*
@@ -710,6 +715,16 @@
 	return NULL;
 }
 
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+	return NULL;
+}
+
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+	return NULL;
+}
+
 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
 					       struct cgroup *ancestor)
 {
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 34d2179..2764fab 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -46,7 +46,7 @@
 					 * hand-off enable_count & prepare_count
 					 * to first consumer that enables clk
 					 */
-#define CLK_IS_MEASURE          BIT(14) /* measure clock */
+#define CLK_IS_MEASURE          BIT(15) /* measure clock */
 
 struct clk;
 struct clk_hw;
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 426c9e9..0294807 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -3,9 +3,8 @@
 #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
 #endif
 
-/* Some compiler specific definitions are overwritten here
- * for Clang compiler
- */
+/* Compiler specific definitions for Clang compiler */
+
 #define uninitialized_var(x) x = *(&(x))
 
 /* same as gcc, this was present in clang-2.6 so we can assume it works
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 4d36b27..a8ff0ca 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -58,10 +58,6 @@
 	(typeof(ptr)) (__ptr + (off));					\
 })
 
-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var)						\
-	__asm__ ("" : "=r" (var) : "0" (var))
-
 /*
  * A trick to suppress uninitialized variable warning without generating any
  * code
@@ -75,7 +71,7 @@
 #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 #endif
 
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
 #define __noretpoline __attribute__((indirect_branch("keep")))
 #endif
 
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 4c7f9be..f1fc60f 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -5,9 +5,7 @@
 
 #ifdef __ECC
 
-/* Some compiler specific definitions are overwritten here
- * for Intel ECC compiler
- */
+/* Compiler specific definitions for Intel ECC compiler */
 
 #include <asm/intrinsics.h>
 
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8e8e85f..81c3744 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -158,7 +158,9 @@
 #endif
 
 #ifndef OPTIMIZER_HIDE_VAR
-#define OPTIMIZER_HIDE_VAR(var) barrier()
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var)						\
+	__asm__ ("" : "=r" (var) : "0" (var))
 #endif
 
 /* Not-quite-unique ID. */
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f39488c3..d508950 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -90,6 +90,20 @@
 };
 
 /**
+ * struct coresight_reg_clk - regulators and clocks need by coresight
+ * @nr_reg:	number of regulators
+ * @nr_clk:	number of clocks
+ * @reg:	regulator list
+ * @clk:	clock list
+ */
+struct coresight_reg_clk {
+	int nr_reg;
+	int nr_clk;
+	struct regulator **reg;
+	struct clk **clk;
+};
+
+/**
  * struct coresight_platform_data - data harvested from the DT specification
  * @cpu:	the CPU a source belongs to. Only applicable for ETM/PTMs.
  * @name:	name of the component as shown under sysfs.
@@ -100,6 +114,8 @@
  * @child_ports:child component port number the current component is
 		connected  to.
  * @nr_outport:	number of output ports for this component.
+ * @clk:	The clock this component is associated to.
+ * @reg_clk:	as defined by @coresight_reg_clk.
  */
 struct coresight_platform_data {
 	int cpu;
@@ -110,6 +126,8 @@
 	const char **child_names;
 	int *child_ports;
 	int nr_outport;
+	struct clk *clk;
+	struct coresight_reg_clk *reg_clk;
 };
 
 /**
@@ -165,6 +183,8 @@
  * @activated:	'true' only if a _sink_ has been activated.  A sink can be
 		activated but not yet enabled.  Enabling for a _sink_
 		happens when a source has been selected for that it.
+ * @abort:     captures sink trace on abort.
+ * @reg_clk:	as defined by @coresight_reg_clk.
  */
 struct coresight_device {
 	struct coresight_connection *conns;
@@ -179,6 +199,7 @@
 	bool orphan;
 	bool enable;	/* true only if configured as part of a path */
 	bool activated;	/* true only if a sink is part of a path */
+	struct coresight_reg_clk *reg_clk;
 };
 
 #define to_coresight_device(d) container_of(d, struct coresight_device, dev)
@@ -275,6 +296,9 @@
 extern void coresight_disable(struct coresight_device *csdev);
 extern int coresight_timeout(void __iomem *addr, u32 offset,
 			     int position, int value);
+extern void coresight_abort(void);
+extern void coresight_disable_reg_clk(struct coresight_device *csdev);
+extern int coresight_enable_reg_clk(struct coresight_device *csdev);
 #else
 static inline struct coresight_device *
 coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -284,6 +308,9 @@
 static inline void coresight_disable(struct coresight_device *csdev) {}
 static inline int coresight_timeout(void __iomem *addr, u32 offset,
 				     int position, int value) { return 1; }
+static inline void coresight_abort(void) {}
+static inline void coresight_disable_reg_clk(struct coresight_device *csdev) {}
+static inline int coresight_enable_reg_clk(struct coresight_device *csdev) {}
 #endif
 
 #ifdef CONFIG_OF
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 1bf10aa..f172204 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -180,12 +180,10 @@
 #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
 extern enum cpuhp_smt_control cpu_smt_control;
 extern void cpu_smt_disable(bool force);
-extern void cpu_smt_check_topology_early(void);
 extern void cpu_smt_check_topology(void);
 #else
 # define cpu_smt_control		(CPU_SMT_ENABLED)
 static inline void cpu_smt_disable(bool force) { }
-static inline void cpu_smt_check_topology_early(void) { }
 static inline void cpu_smt_check_topology(void) { }
 #endif
 
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index 7775dd7..6a35863 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -44,6 +44,14 @@
 struct thermal_cooling_device *
 cpufreq_cooling_register(struct cpufreq_policy *policy);
 
+/**
+ * cpufreq_platform_cooling_register - create cpufreq cooling device with
+ * additional platform specific mitigation function.
+ *
+ * @policy: cpufreq policy
+ * @plat_ops: the platform mitigation functions that will be called insted of
+ * cpufreq, if provided.
+ */
 struct thermal_cooling_device *
 cpufreq_platform_cooling_register(struct cpufreq_policy *policy,
 					struct cpu_cooling_ops *ops);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index eb7b26f..dae9863 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -254,20 +254,12 @@
 static struct freq_attr _name =			\
 __ATTR(_name, 0200, NULL, store_##_name)
 
-struct global_attr {
-	struct attribute attr;
-	ssize_t (*show)(struct kobject *kobj,
-			struct attribute *attr, char *buf);
-	ssize_t (*store)(struct kobject *a, struct attribute *b,
-			 const char *c, size_t count);
-};
-
 #define define_one_global_ro(_name)		\
-static struct global_attr _name =		\
+static struct kobj_attribute _name =		\
 __ATTR(_name, 0444, show_##_name, NULL)
 
 #define define_one_global_rw(_name)		\
-static struct global_attr _name =		\
+static struct kobj_attribute _name =		\
 __ATTR(_name, 0644, show_##_name, store_##_name)
 
 
@@ -950,6 +942,14 @@
 }
 #endif
 
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+			struct cpufreq_governor *old_gov);
+#else
+static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+			struct cpufreq_governor *old_gov) { }
+#endif
+
 extern void arch_freq_prepare_all(void);
 extern unsigned int arch_freq_get_on_cpu(int cpu);
 
diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h
index 757bf0c..0eb6dc9 100644
--- a/include/linux/cpufreq_times.h
+++ b/include/linux/cpufreq_times.h
@@ -27,7 +27,8 @@
 			    struct pid *pid, struct task_struct *p);
 void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
 void cpufreq_times_create_policy(struct cpufreq_policy *policy);
-void cpufreq_times_record_transition(struct cpufreq_freqs *freq);
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+                                     unsigned int new_freq);
 void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
 int single_uid_time_in_state_open(struct inode *inode, struct file *file);
 #else
@@ -38,7 +39,7 @@
 					     u64 cputime) {}
 static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
 static inline void cpufreq_times_record_transition(
-	struct cpufreq_freqs *freq) {}
+	struct cpufreq_policy *policy, unsigned int new_freq) {}
 static inline void cpufreq_task_times_remove_uids(uid_t uid_start,
 						  uid_t uid_end) {}
 #endif /* CONFIG_CPU_FREQ_TIMES */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 30491e4..0dd5ffb 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -138,9 +138,9 @@
 	/* Must be the last timer callback */
 	CPUHP_AP_DUMMY_TIMER_STARTING,
 	CPUHP_AP_ARM_XEN_STARTING,
-	CPUHP_AP_ARM_CORESIGHT_STARTING,
 	CPUHP_AP_ARM_SAVE_RESTORE_CORESIGHT4_STARTING,
 	CPUHP_AP_ARM_MM_CORESIGHT4_STARTING,
+	CPUHP_AP_ARM_CORESIGHT_STARTING,
 	CPUHP_AP_ARM64_ISNDEP_STARTING,
 	CPUHP_AP_SMPCFD_DYING,
 	CPUHP_AP_X86_TBOOT_DYING,
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 31c865d..577d1b2 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -57,7 +57,12 @@
 
 	u64 freepages_start;
 	u64 freepages_delay;	/* wait for memory reclaim */
+
+	u64 thrashing_start;
+	u64 thrashing_delay;	/* wait for thrashing page */
+
 	u32 freepages_count;	/* total count of memory reclaim */
+	u32 thrashing_count;	/* total count of thrash waits */
 };
 #endif
 
@@ -76,6 +81,8 @@
 extern __u64 __delayacct_blkio_ticks(struct task_struct *);
 extern void __delayacct_freepages_start(void);
 extern void __delayacct_freepages_end(void);
+extern void __delayacct_thrashing_start(void);
+extern void __delayacct_thrashing_end(void);
 
 static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
 {
@@ -156,6 +163,18 @@
 		__delayacct_freepages_end();
 }
 
+static inline void delayacct_thrashing_start(void)
+{
+	if (current->delays)
+		__delayacct_thrashing_start();
+}
+
+static inline void delayacct_thrashing_end(void)
+{
+	if (current->delays)
+		__delayacct_thrashing_end();
+}
+
 #else
 static inline void delayacct_set_flag(int flag)
 {}
@@ -182,6 +201,10 @@
 {}
 static inline void delayacct_freepages_end(void)
 {}
+static inline void delayacct_thrashing_start(void)
+{}
+static inline void delayacct_thrashing_end(void)
+{}
 
 #endif /* CONFIG_TASK_DELAY_ACCT */
 
diff --git a/include/linux/dma-buf-ref.h b/include/linux/dma-buf-ref.h
new file mode 100644
index 0000000..5bdf1f2
--- /dev/null
+++ b/include/linux/dma-buf-ref.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DMA_BUF_REF_H
+#define _DMA_BUF_REF_H
+
+struct dma_buf;
+struct seq_file;
+
+#ifdef CONFIG_DEBUG_DMA_BUF_REF
+void dma_buf_ref_init(struct dma_buf *b);
+void dma_buf_ref_destroy(struct dma_buf *b);
+void dma_buf_ref_mod(struct dma_buf *b, int nr);
+int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf);
+
+#else
+static inline void dma_buf_ref_init(struct dma_buf *b) {}
+static inline void dma_buf_ref_destroy(struct dma_buf *b) {}
+static inline void dma_buf_ref_mod(struct dma_buf *b, int nr) {}
+static inline int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf)
+{
+	return -ENOMEM;
+}
+#endif
+
+
+#endif /* _DMA_BUF_REF_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 38ebfdc..2ba99cc 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -31,6 +31,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/fs.h>
 #include <linux/dma-fence.h>
+#include <linux/dma-buf-ref.h>
 #include <linux/wait.h>
 
 struct device;
@@ -381,6 +382,7 @@
  * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
  * @exp_name: name of the exporter; useful for debugging.
  * @name: unique name for the buffer
+ * @ktime: time (in jiffies) at which the buffer was born
  * @owner: pointer to exporter module; used for refcounting when exporter is a
  *         kernel module.
  * @list_node: node for dma_buf accounting and debugging.
@@ -409,6 +411,7 @@
 	void *vmap_ptr;
 	const char *exp_name;
 	char *name;
+	ktime_t ktime;
 	struct module *owner;
 	struct list_head list_node;
 	void *priv;
@@ -423,6 +426,8 @@
 
 		__poll_t active;
 	} cb_excl, cb_shared;
+
+	struct list_head refs;
 };
 
 /**
@@ -495,6 +500,7 @@
 static inline void get_dma_buf(struct dma_buf *dmabuf)
 {
 	get_file(dmabuf->file);
+	dma_buf_ref_mod(dmabuf, 1);
 }
 
 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index dc2a6c8..9e15527 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -823,6 +823,10 @@
 void dma_release_declared_memory(struct device *dev);
 void *dma_mark_declared_memory_occupied(struct device *dev,
 					dma_addr_t device_addr, size_t size);
+dma_addr_t dma_get_device_base(struct device *dev,
+			       struct dma_coherent_mem *mem);
+unsigned long dma_get_size(struct dma_coherent_mem *mem);
+
 #else
 static inline int
 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
@@ -842,6 +846,17 @@
 {
 	return ERR_PTR(-EBUSY);
 }
+static inline dma_addr_t
+dma_get_device_base(struct device *dev, struct dma_coherent_mem *mem)
+{
+	return 0;
+}
+
+static inline unsigned long dma_get_size(struct dma_coherent_mem *mem)
+{
+	return 0;
+}
+
 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
 
 #ifdef CONFIG_HAS_DMA
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 55deab2..aa027f7 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -27,7 +27,6 @@
  * em_perf_domain - Performance domain
  * @table:		List of capacity states, in ascending order
  * @nr_cap_states:	Number of capacity states
- * @kobj:		Kobject used to expose the domain in sysfs
  * @cpus:		Cpumask covering the CPUs of the domain
  *
  * A "performance domain" represents a group of CPUs whose performance is
@@ -38,7 +37,6 @@
 struct em_perf_domain {
 	struct em_cap_state *table;
 	int nr_cap_states;
-	struct kobject kobj;
 	unsigned long cpus[0];
 };
 
diff --git a/include/linux/file.h b/include/linux/file.h
index 6b2fb03..97265cf 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -87,6 +87,7 @@
 extern void fd_install(unsigned int fd, struct file *file);
 
 extern void flush_delayed_fput(void);
+extern void flush_delayed_fput_wait(void);
 extern void __fput_sync(struct file *);
 
 #endif /* __LINUX_FILE_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6791a0a..1a39d57 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -53,14 +53,10 @@
 #define BPF_REG_D	BPF_REG_8	/* data, callee-saved */
 #define BPF_REG_H	BPF_REG_9	/* hlen, callee-saved */
 
-/* Kernel hidden auxiliary/helper register for hardening step.
- * Only used by eBPF JITs. It's nothing more than a temporary
- * register that JITs use internally, only that here it's part
- * of eBPF instructions that have been rewritten for blinding
- * constants. See JIT pre-step in bpf_jit_blind_constants().
- */
+/* Kernel hidden auxiliary/helper register. */
 #define BPF_REG_AX		MAX_BPF_REG
-#define MAX_BPF_JIT_REG		(MAX_BPF_REG + 1)
+#define MAX_BPF_EXT_REG		(MAX_BPF_REG + 1)
+#define MAX_BPF_JIT_REG		MAX_BPF_EXT_REG
 
 /* unused opcode to mark special call to bpf_tail_call() helper */
 #define BPF_TAIL_CALL	0xf0
@@ -665,24 +661,10 @@
 	return size;
 }
 
-static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
-					   u32 size_default)
-{
-	size_default = bpf_ctx_off_adjust_machine(size_default);
-	size_access  = bpf_ctx_off_adjust_machine(size_access);
-
-#ifdef __LITTLE_ENDIAN
-	return (off & (size_default - 1)) == 0;
-#else
-	return (off & (size_default - 1)) + size_access == size_default;
-#endif
-}
-
 static inline bool
 bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
 {
-	return bpf_ctx_narrow_align_ok(off, size, size_default) &&
-	       size <= size_default && (size & (size - 1)) == 0;
+	return size <= size_default && (size & (size - 1)) == 0;
 }
 
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 421ef90..79eed97 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1878,6 +1878,7 @@
 	void *(*clone_mnt_data) (void *);
 	void (*copy_mnt_data) (void *, void *);
 	void (*umount_begin) (struct super_block *);
+	void (*umount_end)(struct super_block *sb, int flags);
 
 	int (*show_options)(struct seq_file *, struct dentry *);
 	int (*show_options2)(struct vfsmount *,struct seq_file *, struct dentry *);
@@ -3096,6 +3097,8 @@
 		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio);
+
 extern void inode_set_flags(struct inode *inode, unsigned int flags,
 			    unsigned int mask);
 
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 952ab97..ddd5b3a3 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -19,6 +19,11 @@
 #define FS_CRYPTO_BLOCK_SIZE		16
 
 struct fscrypt_ctx;
+
+/* iv sector for security/pfe/pfk_fscrypt.c and f2fs */
+#define PG_DUN(i, p)                                            \
+	(((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p)->index & 0xffffffff))
+
 struct fscrypt_info;
 
 struct fscrypt_str {
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index ee8b43e..0abc588 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -174,6 +174,21 @@
 	return -EOPNOTSUPP;
 }
 
+/* fscrypt_ice.c */
+static inline int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline void fscrypt_set_ice_dun(const struct inode *inode,
+		struct bio *bio, u64 dun) {}
+
+static inline bool fscrypt_mergeable_bio(struct bio *bio,
+		sector_t iv_block, bool bio_encrypted)
+{
+	return true;
+}
+
 /* hooks.c */
 
 static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 6456c6b..435fa38 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -30,6 +30,7 @@
 	bool (*dummy_context)(struct inode *);
 	bool (*empty_dir)(struct inode *);
 	unsigned int max_namelen;
+	bool (*is_encrypted)(struct inode *inode);
 };
 
 struct fscrypt_ctx {
@@ -182,6 +183,12 @@
 extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
 				 unsigned int);
 
+/* fscrypt_ice.c */
+extern int fscrypt_using_hardware_encryption(const struct inode *inode);
+extern void fscrypt_set_ice_dun(const struct inode *inode,
+		struct bio *bio, u64 dun);
+extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted);
+
 /* hooks.c */
 extern int fscrypt_file_open(struct inode *inode, struct file *filp);
 extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 25c08c6..f767293 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -129,7 +129,7 @@
 	struct disk_stats dkstats;
 #endif
 	struct percpu_ref ref;
-	struct rcu_head rcu_head;
+	struct rcu_work rcu_work;
 };
 
 #define GENHD_FL_REMOVABLE			1
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index 5972e49..eeae59d 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -191,6 +191,7 @@
 {
 	switch (0) {
 #include GENL_MAGIC_INCLUDE_FILE
+	case 0:
 		;
 	}
 }
@@ -209,6 +210,7 @@
 {
 	switch (0) {
 #include GENL_MAGIC_INCLUDE_FILE
+	case 0:
 		;
 	}
 }
@@ -218,7 +220,8 @@
 static inline void ct_assert_unique_ ## s_name ## _attributes(void)	\
 {									\
 	switch (0) {							\
-		s_fields						\
+	s_fields							\
+	case 0:								\
 			;						\
 	}								\
 }
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 21ddbe4..acc4279 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -142,7 +142,7 @@
 int gpiod_cansleep(const struct gpio_desc *desc);
 
 int gpiod_to_irq(const struct gpio_desc *desc);
-void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
+int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
 
 /* Convert between the old gpio_ and new gpiod_ interfaces */
 struct gpio_desc *gpio_to_desc(unsigned gpio);
@@ -465,10 +465,12 @@
 	return -EINVAL;
 }
 
-static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
+					  const char *name)
 {
 	/* GPIO can never have been requested */
 	WARN_ON(1);
+	return -EINVAL;
 }
 
 static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h
index 96b24a1..34ffb9f 100644
--- a/include/linux/hdcp_qseecom.h
+++ b/include/linux/hdcp_qseecom.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __HDCP_QSEECOM_H
@@ -11,6 +11,7 @@
 
 enum hdcp2_app_cmd {
 	HDCP2_CMD_START,
+	HDCP2_CMD_START_AUTH,
 	HDCP2_CMD_STOP,
 	HDCP2_CMD_PROCESS_MSG,
 	HDCP2_CMD_TIMEOUT,
@@ -35,6 +36,8 @@
 	switch (cmd) {
 	case HDCP2_CMD_START:
 		return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START);
+	case HDCP2_CMD_START_AUTH:
+		return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START_AUTH);
 	case HDCP2_CMD_STOP:
 		return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_STOP);
 	case HDCP2_CMD_PROCESS_MSG:
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8663f21..2d6100e 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -24,7 +24,10 @@
 
 #ifdef CONFIG_DEBUG_FS
 
+#include <linux/kfifo.h>
+
 #define HID_DEBUG_BUFSIZE 512
+#define HID_DEBUG_FIFOSIZE 512
 
 void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
 void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +40,8 @@
 void hid_debug_exit(void);
 void hid_debug_event(struct hid_device *, char *);
 
-
 struct hid_debug_list {
-	char *hid_debug_buf;
-	int head;
-	int tail;
+	DECLARE_KFIFO_PTR(hid_debug_fifo, char);
 	struct fasync_struct *fasync;
 	struct hid_device *hdev;
 	struct list_head node;
@@ -64,4 +64,3 @@
 #endif
 
 #endif
-
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 4c92e3b..5ec8635 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -499,8 +499,7 @@
  * enough and allocate struct page for it.
  *
  * The device driver can wrap the hmm_devmem struct inside a private device
- * driver struct. The device driver must call hmm_devmem_remove() before the
- * device goes away and before freeing the hmm_devmem struct memory.
+ * driver struct.
  */
 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
 				  struct device *device,
@@ -508,7 +507,6 @@
 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
 					   struct device *device,
 					   struct resource *res);
-void hmm_devmem_remove(struct hmm_devmem *devmem);
 
 /*
  * hmm_devmem_page_set_drvdata - set per-page driver data field
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5185a16..bbde887 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1166,8 +1166,9 @@
 	u32 bytes_avail_towrite;
 };
 
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
-			    struct hv_ring_buffer_debug_info *debug_info);
+
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+				struct hv_ring_buffer_debug_info *debug_info);
 
 /* Vmbus interface */
 #define vmbus_driver_register(driver)	\
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
new file mode 100644
index 0000000..73b0982
--- /dev/null
+++ b/include/linux/i3c/ccc.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_CCC_H
+#define I3C_CCC_H
+
+#include <linux/bitops.h>
+#include <linux/i3c/device.h>
+
+/* I3C CCC (Common Command Codes) related definitions */
+#define I3C_CCC_DIRECT			BIT(7)
+
+#define I3C_CCC_ID(id, broadcast)	\
+	((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT))
+
+/* Commands valid in both broadcast and unicast modes */
+#define I3C_CCC_ENEC(broadcast)		I3C_CCC_ID(0x0, broadcast)
+#define I3C_CCC_DISEC(broadcast)	I3C_CCC_ID(0x1, broadcast)
+#define I3C_CCC_ENTAS(as, broadcast)	I3C_CCC_ID(0x2 + (as), broadcast)
+#define I3C_CCC_RSTDAA(broadcast)	I3C_CCC_ID(0x6, broadcast)
+#define I3C_CCC_SETMWL(broadcast)	I3C_CCC_ID(0x9, broadcast)
+#define I3C_CCC_SETMRL(broadcast)	I3C_CCC_ID(0xa, broadcast)
+#define I3C_CCC_SETXTIME(broadcast)	((broadcast) ? 0x28 : 0x98)
+#define I3C_CCC_VENDOR(id, broadcast)	((id) + ((broadcast) ? 0x61 : 0xe0))
+
+/* Broadcast-only commands */
+#define I3C_CCC_ENTDAA			I3C_CCC_ID(0x7, true)
+#define I3C_CCC_DEFSLVS			I3C_CCC_ID(0x8, true)
+#define I3C_CCC_ENTTM			I3C_CCC_ID(0xb, true)
+#define I3C_CCC_ENTHDR(x)		I3C_CCC_ID(0x20 + (x), true)
+
+/* Unicast-only commands */
+#define I3C_CCC_SETDASA			I3C_CCC_ID(0x7, false)
+#define I3C_CCC_SETNEWDA		I3C_CCC_ID(0x8, false)
+#define I3C_CCC_GETMWL			I3C_CCC_ID(0xb, false)
+#define I3C_CCC_GETMRL			I3C_CCC_ID(0xc, false)
+#define I3C_CCC_GETPID			I3C_CCC_ID(0xd, false)
+#define I3C_CCC_GETBCR			I3C_CCC_ID(0xe, false)
+#define I3C_CCC_GETDCR			I3C_CCC_ID(0xf, false)
+#define I3C_CCC_GETSTATUS		I3C_CCC_ID(0x10, false)
+#define I3C_CCC_GETACCMST		I3C_CCC_ID(0x11, false)
+#define I3C_CCC_SETBRGTGT		I3C_CCC_ID(0x13, false)
+#define I3C_CCC_GETMXDS			I3C_CCC_ID(0x14, false)
+#define I3C_CCC_GETHDRCAP		I3C_CCC_ID(0x15, false)
+#define I3C_CCC_GETXTIME		I3C_CCC_ID(0x19, false)
+
+#define I3C_CCC_EVENT_SIR		BIT(0)
+#define I3C_CCC_EVENT_MR		BIT(1)
+#define I3C_CCC_EVENT_HJ		BIT(3)
+
+/**
+ * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC
+ *
+ * @events: bitmask of I3C_CCC_EVENT_xxx events.
+ *
+ * Depending on the CCC command, the specific events coming from all devices
+ * (broadcast version) or a specific device (unicast version) will be
+ * enabled (ENEC) or disabled (DISEC).
+ */
+struct i3c_ccc_events {
+	u8 events;
+};
+
+/**
+ * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC
+ *
+ * @len: maximum write length in bytes
+ *
+ * The maximum write length is only applicable to SDR private messages or
+ * extended Write CCCs (like SETXTIME).
+ */
+struct i3c_ccc_mwl {
+	__be16 len;
+};
+
+/**
+ * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC
+ *
+ * @len: maximum read length in bytes
+ * @ibi_len: maximum IBI payload length
+ *
+ * The maximum read length is only applicable to SDR private messages or
+ * extended Read CCCs (like GETXTIME).
+ * The IBI length is only valid if the I3C slave is IBI capable
+ * (%I3C_BCR_IBI_REQ_CAP is set).
+ */
+struct i3c_ccc_mrl {
+	__be16 read_len;
+	u8 ibi_len;
+} __packed;
+
+/**
+ * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS
+ *
+ * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is
+ *	      describing an I2C slave.
+ * @dcr: DCR value (not applicable to entries describing I2C devices)
+ * @lvr: LVR value (not applicable to entries describing I3C devices)
+ * @bcr: BCR value or 0 if this entry is describing an I2C slave
+ * @static_addr: static address or 0 if the device does not have a static
+ *		 address
+ *
+ * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc
+ * descriptors (one entry per I3C/I2C dev controlled by the master).
+ */
+struct i3c_ccc_dev_desc {
+	u8 dyn_addr;
+	union {
+		u8 dcr;
+		u8 lvr;
+	};
+	u8 bcr;
+	u8 static_addr;
+};
+
+/**
+ * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC
+ *
+ * @count: number of dev descriptors
+ * @master: descriptor describing the current master
+ * @slaves: array of descriptors describing slaves controlled by the
+ *	    current master
+ *
+ * Information passed to the broadcast DEFSLVS to propagate device
+ * information to all masters currently acting as slaves on the bus.
+ * This is only meaningful if you have more than one master.
+ */
+struct i3c_ccc_defslvs {
+	u8 count;
+	struct i3c_ccc_dev_desc master;
+	struct i3c_ccc_dev_desc slaves[0];
+} __packed;
+
+/**
+ * enum i3c_ccc_test_mode - enum listing all available test modes
+ *
+ * @I3C_CCC_EXIT_TEST_MODE: exit test mode
+ * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode
+ */
+enum i3c_ccc_test_mode {
+	I3C_CCC_EXIT_TEST_MODE,
+	I3C_CCC_VENDOR_TEST_MODE,
+};
+
+/**
+ * struct i3c_ccc_enttm - payload passed to ENTTM CCC
+ *
+ * @mode: one of the &enum i3c_ccc_test_mode modes
+ *
+ * Information passed to the ENTTM CCC to instruct an I3C device to enter a
+ * specific test mode.
+ */
+struct i3c_ccc_enttm {
+	u8 mode;
+};
+
+/**
+ * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs
+ *
+ * @addr: dynamic address to assign to an I3C device
+ *
+ * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the
+ * dynamic address of an I3C device.
+ */
+struct i3c_ccc_setda {
+	u8 addr;
+};
+
+/**
+ * struct i3c_ccc_getpid - payload passed to GETPID CCC
+ *
+ * @pid: 48 bits PID in big endian
+ */
+struct i3c_ccc_getpid {
+	u8 pid[6];
+};
+
+/**
+ * struct i3c_ccc_getbcr - payload passed to GETBCR CCC
+ *
+ * @bcr: BCR (Bus Characteristic Register) value
+ */
+struct i3c_ccc_getbcr {
+	u8 bcr;
+};
+
+/**
+ * struct i3c_ccc_getdcr - payload passed to GETDCR CCC
+ *
+ * @dcr: DCR (Device Characteristic Register) value
+ */
+struct i3c_ccc_getdcr {
+	u8 dcr;
+};
+
+#define I3C_CCC_STATUS_PENDING_INT(status)	((status) & GENMASK(3, 0))
+#define I3C_CCC_STATUS_PROTOCOL_ERROR		BIT(5)
+#define I3C_CCC_STATUS_ACTIVITY_MODE(status)	\
+	(((status) & GENMASK(7, 6)) >> 6)
+
+/**
+ * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC
+ *
+ * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more
+ *	    information).
+ */
+struct i3c_ccc_getstatus {
+	__be16 status;
+};
+
+/**
+ * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC
+ *
+ * @newmaster: address of the master taking bus ownership
+ */
+struct i3c_ccc_getaccmst {
+	u8 newmaster;
+};
+
+/**
+ * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor
+ *
+ * @addr: dynamic address of the bridged device
+ * @id: ID of the slave device behind the bridge
+ */
+struct i3c_ccc_bridged_slave_desc {
+	u8 addr;
+	__be16 id;
+} __packed;
+
+/**
+ * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC
+ *
+ * @count: number of bridged slaves
+ * @bslaves: bridged slave descriptors
+ */
+struct i3c_ccc_setbrgtgt {
+	u8 count;
+	struct i3c_ccc_bridged_slave_desc bslaves[0];
+} __packed;
+
+/**
+ * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers
+ */
+enum i3c_sdr_max_data_rate {
+	I3C_SDR0_FSCL_MAX,
+	I3C_SDR1_FSCL_8MHZ,
+	I3C_SDR2_FSCL_6MHZ,
+	I3C_SDR3_FSCL_4MHZ,
+	I3C_SDR4_FSCL_2MHZ,
+};
+
+/**
+ * enum i3c_tsco - clock to data turn-around
+ */
+enum i3c_tsco {
+	I3C_TSCO_8NS,
+	I3C_TSCO_9NS,
+	I3C_TSCO_10NS,
+	I3C_TSCO_11NS,
+	I3C_TSCO_12NS,
+};
+
+#define I3C_CCC_MAX_SDR_FSCL_MASK	GENMASK(2, 0)
+#define I3C_CCC_MAX_SDR_FSCL(x)		((x) & I3C_CCC_MAX_SDR_FSCL_MASK)
+
+/**
+ * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC
+ *
+ * @maxwr: write limitations
+ * @maxrd: read limitations
+ * @maxrdturn: maximum read turn-around expressed micro-seconds and
+ *	       little-endian formatted
+ */
+struct i3c_ccc_getmxds {
+	u8 maxwr;
+	u8 maxrd;
+	u8 maxrdturn[3];
+} __packed;
+
+#define I3C_CCC_HDR_MODE(mode)		BIT(mode)
+
+/**
+ * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC
+ *
+ * @modes: bitmap of supported HDR modes
+ */
+struct i3c_ccc_gethdrcap {
+	u8 modes;
+} __packed;
+
+/**
+ * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands
+ */
+enum i3c_ccc_setxtime_subcmd {
+	I3C_CCC_SETXTIME_ST = 0x7f,
+	I3C_CCC_SETXTIME_DT = 0xbf,
+	I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf,
+	I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef,
+	I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7,
+	I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb,
+	I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd,
+	I3C_CCC_SETXTIME_TPH = 0x3f,
+	I3C_CCC_SETXTIME_TU = 0x9f,
+	I3C_CCC_SETXTIME_ODR = 0x8f,
+};
+
+/**
+ * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC
+ *
+ * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd
+ * @data: sub-command payload. Amount of data is determined by
+ *	  &i3c_ccc_setxtime->subcmd
+ */
+struct i3c_ccc_setxtime {
+	u8 subcmd;
+	u8 data[0];
+} __packed;
+
+#define I3C_CCC_GETXTIME_SYNC_MODE	BIT(0)
+#define I3C_CCC_GETXTIME_ASYNC_MODE(x)	BIT((x) + 1)
+#define I3C_CCC_GETXTIME_OVERFLOW	BIT(7)
+
+/**
+ * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC
+ *
+ * @supported_modes: bitmap describing supported XTIME modes
+ * @state: current status (enabled mode and overflow status)
+ * @frequency: slave's internal oscillator frequency in 500KHz steps
+ * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps
+ */
+struct i3c_ccc_getxtime {
+	u8 supported_modes;
+	u8 state;
+	u8 frequency;
+	u8 inaccuracy;
+} __packed;
+
+/**
+ * struct i3c_ccc_cmd_payload - CCC payload
+ *
+ * @len: payload length
+ * @data: payload data. This buffer must be DMA-able
+ */
+struct i3c_ccc_cmd_payload {
+	u16 len;
+	void *data;
+};
+
+/**
+ * struct i3c_ccc_cmd_dest - CCC command destination
+ *
+ * @addr: can be an I3C device address or the broadcast address if this is a
+ *	  broadcast CCC
+ * @payload: payload to be sent to this device or broadcasted
+ */
+struct i3c_ccc_cmd_dest {
+	u8 addr;
+	struct i3c_ccc_cmd_payload payload;
+};
+
+/**
+ * struct i3c_ccc_cmd - CCC command
+ *
+ * @rnw: true if the CCC should retrieve data from the device. Only valid for
+ *	 unicast commands
+ * @id: CCC command id
+ * @ndests: number of destinations. Should always be one for broadcast commands
+ * @dests: array of destinations and associated payload for this CCC. Most of
+ *	   the time, only one destination is provided
+ * @err: I3C error code
+ */
+struct i3c_ccc_cmd {
+	u8 rnw;
+	u8 id;
+	unsigned int ndests;
+	struct i3c_ccc_cmd_dest *dests;
+	enum i3c_error_code err;
+};
+
+#endif /* I3C_CCC_H */
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
new file mode 100644
index 0000000..5ecb055
--- /dev/null
+++ b/include/linux/i3c/device.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_DEV_H
+#define I3C_DEV_H
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kconfig.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+/**
+ * enum i3c_error_code - I3C error codes
+ *
+ * These are the standard error codes as defined by the I3C specification.
+ * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * i3c_device_send_hdr_cmds() one can check the error code in
+ * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * what went wrong.
+ *
+ * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
+ *		       related
+ * @I3C_ERROR_M0: M0 error
+ * @I3C_ERROR_M1: M1 error
+ * @I3C_ERROR_M2: M2 error
+ */
+enum i3c_error_code {
+	I3C_ERROR_UNKNOWN = 0,
+	I3C_ERROR_M0 = 1,
+	I3C_ERROR_M1,
+	I3C_ERROR_M2,
+};
+
+/**
+ * enum i3c_hdr_mode - HDR mode ids
+ * @I3C_HDR_DDR: DDR mode
+ * @I3C_HDR_TSP: TSP mode
+ * @I3C_HDR_TSL: TSL mode
+ */
+enum i3c_hdr_mode {
+	I3C_HDR_DDR,
+	I3C_HDR_TSP,
+	I3C_HDR_TSL,
+};
+
+/**
+ * struct i3c_priv_xfer - I3C SDR private transfer
+ * @rnw: encodes the transfer direction. true for a read, false for a write
+ * @len: transfer length in bytes of the transfer
+ * @data: input/output buffer
+ * @data.in: input buffer. Must point to a DMA-able buffer
+ * @data.out: output buffer. Must point to a DMA-able buffer
+ * @err: I3C error code
+ */
+struct i3c_priv_xfer {
+	u8 rnw;
+	u16 len;
+	union {
+		void *in;
+		const void *out;
+	} data;
+	enum i3c_error_code err;
+};
+
+/**
+ * enum i3c_dcr - I3C DCR values
+ * @I3C_DCR_GENERIC_DEVICE: generic I3C device
+ */
+enum i3c_dcr {
+	I3C_DCR_GENERIC_DEVICE = 0,
+};
+
+#define I3C_PID_MANUF_ID(pid)		(((pid) & GENMASK_ULL(47, 33)) >> 33)
+#define I3C_PID_RND_LOWER_32BITS(pid)	(!!((pid) & BIT_ULL(32)))
+#define I3C_PID_RND_VAL(pid)		((pid) & GENMASK_ULL(31, 0))
+#define I3C_PID_PART_ID(pid)		(((pid) & GENMASK_ULL(31, 16)) >> 16)
+#define I3C_PID_INSTANCE_ID(pid)	(((pid) & GENMASK_ULL(15, 12)) >> 12)
+#define I3C_PID_EXTRA_INFO(pid)		((pid) & GENMASK_ULL(11, 0))
+
+#define I3C_BCR_DEVICE_ROLE(bcr)	((bcr) & GENMASK(7, 6))
+#define I3C_BCR_I3C_SLAVE		(0 << 6)
+#define I3C_BCR_I3C_MASTER		(1 << 6)
+#define I3C_BCR_HDR_CAP			BIT(5)
+#define I3C_BCR_BRIDGE			BIT(4)
+#define I3C_BCR_OFFLINE_CAP		BIT(3)
+#define I3C_BCR_IBI_PAYLOAD		BIT(2)
+#define I3C_BCR_IBI_REQ_CAP		BIT(1)
+#define I3C_BCR_MAX_DATA_SPEED_LIM	BIT(0)
+
+/**
+ * struct i3c_device_info - I3C device information
+ * @pid: Provisional ID
+ * @bcr: Bus Characteristic Register
+ * @dcr: Device Characteristic Register
+ * @static_addr: static/I2C address
+ * @dyn_addr: dynamic address
+ * @hdr_cap: supported HDR modes
+ * @max_read_ds: max read speed information
+ * @max_write_ds: max write speed information
+ * @max_ibi_len: max IBI payload length
+ * @max_read_turnaround: max read turn-around time in micro-seconds
+ * @max_read_len: max private SDR read length in bytes
+ * @max_write_len: max private SDR write length in bytes
+ *
+ * These are all basic information that should be advertised by an I3C device.
+ * Some of them are optional depending on the device type and device
+ * capabilities.
+ * For each I3C slave attached to a master with
+ * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command
+ * to retrieve these data.
+ */
+struct i3c_device_info {
+	u64 pid;
+	u8 bcr;
+	u8 dcr;
+	u8 static_addr;
+	u8 dyn_addr;
+	u8 hdr_cap;
+	u8 max_read_ds;
+	u8 max_write_ds;
+	u8 max_ibi_len;
+	u32 max_read_turnaround;
+	u16 max_read_len;
+	u16 max_write_len;
+};
+
+/*
+ * I3C device internals are kept hidden from I3C device users. It's just
+ * simpler to refactor things when everything goes through getter/setters, and
+ * I3C device drivers should not have to worry about internal representation
+ * anyway.
+ */
+struct i3c_device;
+
+/* These macros should be used to i3c_device_id entries. */
+#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART)
+
+#define I3C_DEVICE(_manufid, _partid, _drvdata)				\
+	{								\
+		.match_flags = I3C_MATCH_MANUF_AND_PART,		\
+		.manuf_id = _manufid,					\
+		.part_id = _partid,					\
+		.data = _drvdata,					\
+	}
+
+#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata)	\
+	{								\
+		.match_flags = I3C_MATCH_MANUF_AND_PART |		\
+			       I3C_MATCH_EXTRA_INFO,			\
+		.manuf_id = _manufid,					\
+		.part_id = _partid,					\
+		.extra_info = _info,					\
+		.data = _drvdata,					\
+	}
+
+#define I3C_CLASS(_dcr, _drvdata)					\
+	{								\
+		.match_flags = I3C_MATCH_DCR,				\
+		.dcr = _dcr,						\
+	}
+
+/**
+ * struct i3c_driver - I3C device driver
+ * @driver: inherit from device_driver
+ * @probe: I3C device probe method
+ * @remove: I3C device remove method
+ * @id_table: I3C device match table. Will be used by the framework to decide
+ *	      which device to bind to this driver
+ */
+struct i3c_driver {
+	struct device_driver driver;
+	int (*probe)(struct i3c_device *dev);
+	int (*remove)(struct i3c_device *dev);
+	const struct i3c_device_id *id_table;
+};
+
+static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
+{
+	return container_of(drv, struct i3c_driver, driver);
+}
+
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
+struct i3c_device *dev_to_i3cdev(struct device *dev);
+
+static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev,
+				      void *data)
+{
+	struct device *dev = i3cdev_to_dev(i3cdev);
+
+	dev_set_drvdata(dev, data);
+}
+
+static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev)
+{
+	struct device *dev = i3cdev_to_dev(i3cdev);
+
+	return dev_get_drvdata(dev);
+}
+
+int i3c_driver_register_with_owner(struct i3c_driver *drv,
+				   struct module *owner);
+void i3c_driver_unregister(struct i3c_driver *drv);
+
+#define i3c_driver_register(__drv)		\
+	i3c_driver_register_with_owner(__drv, THIS_MODULE)
+
+/**
+ * module_i3c_driver() - Register a module providing an I3C driver
+ * @__drv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * driver.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_driver(__drv)		\
+	module_driver(__drv, i3c_driver_register, i3c_driver_unregister)
+
+/**
+ * i3c_i2c_driver_register() - Register an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function registers both @i2cdev and @i3cdev, and fails if one of these
+ * registrations fails. This is mainly useful for devices that support both I2C
+ * and I3C modes.
+ * Note that when CONFIG_I3C is not enabled, this function only registers the
+ * I2C driver.
+ *
+ * Return: 0 if both registrations succeeds, a negative error code otherwise.
+ */
+static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
+					  struct i2c_driver *i2cdrv)
+{
+	int ret;
+
+	ret = i2c_add_driver(i2cdrv);
+	if (ret || !IS_ENABLED(CONFIG_I3C))
+		return ret;
+
+	ret = i3c_driver_register(i3cdrv);
+	if (ret)
+		i2c_del_driver(i2cdrv);
+
+	return ret;
+}
+
+/**
+ * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function unregisters both @i3cdrv and @i2cdrv.
+ * Note that when CONFIG_I3C is not enabled, this function only unregisters the
+ * @i2cdrv.
+ */
+static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
+					     struct i2c_driver *i2cdrv)
+{
+	if (IS_ENABLED(CONFIG_I3C))
+		i3c_driver_unregister(i3cdrv);
+
+	i2c_del_driver(i2cdrv);
+}
+
+/**
+ * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
+ *			     driver
+ * @__i3cdrv: the I3C driver to register
+ * @__i2cdrv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * and an I2C driver.
+ * This macro can be used even if CONFIG_I3C is disabled, in this case, only
+ * the I2C driver will be registered.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv)	\
+	module_driver(__i3cdrv,				\
+		      i3c_i2c_driver_register,		\
+		      i3c_i2c_driver_unregister)
+
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+			     struct i3c_priv_xfer *xfers,
+			     int nxfers);
+
+void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info);
+
+struct i3c_ibi_payload {
+	unsigned int len;
+	const void *data;
+};
+
+/**
+ * struct i3c_ibi_setup - IBI setup object
+ * @max_payload_len: maximum length of the payload associated to an IBI. If one
+ *		     IBI appears to have a payload that is bigger than this
+ *		     number, the IBI will be rejected.
+ * @num_slots: number of pre-allocated IBI slots. This should be chosen so that
+ *	       the system never runs out of IBI slots, otherwise you'll lose
+ *	       IBIs.
+ * @handler: IBI handler, every time an IBI is received. This handler is called
+ *	     in a workqueue context. It is allowed to sleep and send new
+ *	     messages on the bus, though it's recommended to keep the
+ *	     processing done there as fast as possible to avoid delaying
+ *	     processing of other queued on the same workqueue.
+ *
+ * Temporary structure used to pass information to i3c_device_request_ibi().
+ * This object can be allocated on the stack since i3c_device_request_ibi()
+ * copies every bit of information and do not use it after
+ * i3c_device_request_ibi() has returned.
+ */
+struct i3c_ibi_setup {
+	unsigned int max_payload_len;
+	unsigned int num_slots;
+	void (*handler)(struct i3c_device *dev,
+			const struct i3c_ibi_payload *payload);
+};
+
+int i3c_device_request_ibi(struct i3c_device *dev,
+			   const struct i3c_ibi_setup *setup);
+void i3c_device_free_ibi(struct i3c_device *dev);
+int i3c_device_enable_ibi(struct i3c_device *dev);
+int i3c_device_disable_ibi(struct i3c_device *dev);
+
+#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
new file mode 100644
index 0000000..f13fd8b
--- /dev/null
+++ b/include/linux/i3c/master.h
@@ -0,0 +1,648 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_MASTER_H
+#define I3C_MASTER_H
+
+#include <asm/bitsperlong.h>
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/i3c/ccc.h>
+#include <linux/i3c/device.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define I3C_HOT_JOIN_ADDR		0x2
+#define I3C_BROADCAST_ADDR		0x7e
+#define I3C_MAX_ADDR			GENMASK(6, 0)
+
+struct i3c_master_controller;
+struct i3c_bus;
+struct i2c_device;
+struct i3c_device;
+
+/**
+ * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
+ * @node: node element used to insert the slot into the I2C or I3C device
+ *	  list
+ * @master: I3C master that instantiated this device. Will be used to do
+ *	    I2C/I3C transfers
+ * @master_priv: master private data assigned to the device. Can be used to
+ *		 add master specific information
+ *
+ * This structure is describing common I3C/I2C dev information.
+ */
+struct i3c_i2c_dev_desc {
+	struct list_head node;
+	struct i3c_master_controller *master;
+	void *master_priv;
+};
+
+#define I3C_LVR_I2C_INDEX_MASK		GENMASK(7, 5)
+#define I3C_LVR_I2C_INDEX(x)		((x) << 5)
+#define I3C_LVR_I2C_FM_MODE		BIT(4)
+
+#define I2C_MAX_ADDR			GENMASK(9, 0)
+
+/**
+ * struct i2c_dev_boardinfo - I2C device board information
+ * @node: used to insert the boardinfo object in the I2C boardinfo list
+ * @base: regular I2C board information
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ *	 the I2C device limitations
+ *
+ * This structure is used to attach board-level information to an I2C device.
+ * Each I2C device connected on the I3C bus should have one.
+ */
+struct i2c_dev_boardinfo {
+	struct list_head node;
+	struct i2c_board_info base;
+	u8 lvr;
+};
+
+/**
+ * struct i2c_dev_desc - I2C device descriptor
+ * @common: common part of the I2C device descriptor
+ * @boardinfo: pointer to the boardinfo attached to this I2C device
+ * @dev: I2C device object registered to the I2C framework
+ *
+ * Each I2C device connected on the bus will have an i2c_dev_desc.
+ * This object is created by the core and later attached to the controller
+ * using &struct_i3c_master_controller->ops->attach_i2c_dev().
+ *
+ * &struct_i2c_dev_desc is the internal representation of an I2C device
+ * connected on an I3C bus. This object is also passed to all
+ * &struct_i3c_master_controller_ops hooks.
+ */
+struct i2c_dev_desc {
+	struct i3c_i2c_dev_desc common;
+	const struct i2c_dev_boardinfo *boardinfo;
+	struct i2c_client *dev;
+};
+
+/**
+ * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot
+ * @work: work associated to this slot. The IBI handler will be called from
+ *	  there
+ * @dev: the I3C device that has generated this IBI
+ * @len: length of the payload associated to this IBI
+ * @data: payload buffer
+ *
+ * An IBI slot is an object pre-allocated by the controller and used when an
+ * IBI comes in.
+ * Every time an IBI comes in, the I3C master driver should find a free IBI
+ * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using
+ * i3c_master_queue_ibi().
+ *
+ * How IBI slots are allocated is left to the I3C master driver, though, for
+ * simple kmalloc-based allocation, the generic IBI slot pool can be used.
+ */
+struct i3c_ibi_slot {
+	struct work_struct work;
+	struct i3c_dev_desc *dev;
+	unsigned int len;
+	void *data;
+};
+
+/**
+ * struct i3c_device_ibi_info - IBI information attached to a specific device
+ * @all_ibis_handled: used to be informed when no more IBIs are waiting to be
+ *		      processed. Used by i3c_device_disable_ibi() to wait for
+ *		      all IBIs to be dequeued
+ * @pending_ibis: count the number of pending IBIs. Each pending IBI has its
+ *		  work element queued to the controller workqueue
+ * @max_payload_len: maximum payload length for an IBI coming from this device.
+ *		     this value is specified when calling
+ *		     i3c_device_request_ibi() and should not change at run
+ *		     time. All messages IBIs exceeding this limit should be
+ *		     rejected by the master
+ * @num_slots: number of IBI slots reserved for this device
+ * @enabled: reflect the IBI status
+ * @handler: IBI handler specified at i3c_device_request_ibi() call time. This
+ *	     handler will be called from the controller workqueue, and as such
+ *	     is allowed to sleep (though it is recommended to process the IBI
+ *	     as fast as possible to not stall processing of other IBIs queued
+ *	     on the same workqueue).
+ *	     New I3C messages can be sent from the IBI handler
+ *
+ * The &struct_i3c_device_ibi_info object is allocated when
+ * i3c_device_request_ibi() is called and attached to a specific device. This
+ * object is here to manage IBIs coming from a specific I3C device.
+ *
+ * Note that this structure is the generic view of the IBI management
+ * infrastructure. I3C master drivers may have their own internal
+ * representation which they can associate to the device using
+ * controller-private data.
+ */
+struct i3c_device_ibi_info {
+	struct completion all_ibis_handled;
+	atomic_t pending_ibis;
+	unsigned int max_payload_len;
+	unsigned int num_slots;
+	unsigned int enabled;
+	void (*handler)(struct i3c_device *dev,
+			const struct i3c_ibi_payload *payload);
+};
+
+/**
+ * struct i3c_dev_boardinfo - I3C device board information
+ * @node: used to insert the boardinfo object in the I3C boardinfo list
+ * @init_dyn_addr: initial dynamic address requested by the FW. We provide no
+ *		   guarantee that the device will end up using this address,
+ *		   but try our best to assign this specific address to the
+ *		   device
+ * @static_addr: static address the I3C device listen on before it's been
+ *		 assigned a dynamic address by the master. Will be used during
+ *		 bus initialization to assign it a specific dynamic address
+ *		 before starting DAA (Dynamic Address Assignment)
+ * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ *	 that may be used to attach boardinfo to i3c_dev_desc when the device
+ *	 does not have a static address
+ * @of_node: optional DT node in case the device has been described in the DT
+ *
+ * This structure is used to attach board-level information to an I3C device.
+ * Not all I3C devices connected on the bus will have a boardinfo. It's only
+ * needed if you want to attach extra resources to a device or assign it a
+ * specific dynamic address.
+ */
+struct i3c_dev_boardinfo {
+	struct list_head node;
+	u8 init_dyn_addr;
+	u8 static_addr;
+	u64 pid;
+	struct device_node *of_node;
+};
+
+/**
+ * struct i3c_dev_desc - I3C device descriptor
+ * @common: common part of the I3C device descriptor
+ * @info: I3C device information. Will be automatically filled when you create
+ *	  your device with i3c_master_add_i3c_dev_locked()
+ * @ibi_lock: lock used to protect the &struct_i3c_device->ibi
+ * @ibi: IBI info attached to a device. Should be NULL until
+ *	 i3c_device_request_ibi() is called
+ * @dev: pointer to the I3C device object exposed to I3C device drivers. This
+ *	 should never be accessed from I3C master controller drivers. Only core
+ *	 code should manipulate it in when updating the dev <-> desc link or
+ *	 when propagating IBI events to the driver
+ * @boardinfo: pointer to the boardinfo attached to this I3C device
+ *
+ * Internal representation of an I3C device. This object is only used by the
+ * core and passed to I3C master controller drivers when they're requested to
+ * do some operations on the device.
+ * The core maintains the link between the internal I3C dev descriptor and the
+ * object exposed to the I3C device drivers (&struct_i3c_device).
+ */
+struct i3c_dev_desc {
+	struct i3c_i2c_dev_desc common;
+	struct i3c_device_info info;
+	struct mutex ibi_lock;
+	struct i3c_device_ibi_info *ibi;
+	struct i3c_device *dev;
+	const struct i3c_dev_boardinfo *boardinfo;
+};
+
+/**
+ * struct i3c_device - I3C device object
+ * @dev: device object to register the I3C dev to the device model
+ * @desc: pointer to an i3c device descriptor object. This link is updated
+ *	  every time the I3C device is rediscovered with a different dynamic
+ *	  address assigned
+ * @bus: I3C bus this device is attached to
+ *
+ * I3C device object exposed to I3C device drivers. The takes care of linking
+ * this object to the relevant &struct_i3c_dev_desc one.
+ * All I3C devs on the I3C bus are represented, including I3C masters. For each
+ * of them, we have an instance of &struct i3c_device.
+ */
+struct i3c_device {
+	struct device dev;
+	struct i3c_dev_desc *desc;
+	struct i3c_bus *bus;
+};
+
+/*
+ * The I3C specification says the maximum number of devices connected on the
+ * bus is 11, but this number depends on external parameters like trace length,
+ * capacitive load per Device, and the types of Devices present on the Bus.
+ * I3C master can also have limitations, so this number is just here as a
+ * reference and should be adjusted on a per-controller/per-board basis.
+ */
+#define I3C_BUS_MAX_DEVS		11
+
+#define I3C_BUS_MAX_I3C_SCL_RATE	12900000
+#define I3C_BUS_TYP_I3C_SCL_RATE	12500000
+#define I3C_BUS_I2C_FM_PLUS_SCL_RATE	1000000
+#define I3C_BUS_I2C_FM_SCL_RATE		400000
+#define I3C_BUS_TLOW_OD_MIN_NS		200
+
+/**
+ * enum i3c_bus_mode - I3C bus mode
+ * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation
+ *		       expected
+ * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on
+ *			     the bus. The only impact in this mode is that the
+ *			     high SCL pulse has to stay below 50ns to trick I2C
+ *			     devices when transmitting I3C frames
+ * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present
+ *			     on the bus
+ */
+enum i3c_bus_mode {
+	I3C_BUS_MODE_PURE,
+	I3C_BUS_MODE_MIXED_FAST,
+	I3C_BUS_MODE_MIXED_SLOW,
+};
+
+/**
+ * enum i3c_addr_slot_status - I3C address slot status
+ * @I3C_ADDR_SLOT_FREE: address is free
+ * @I3C_ADDR_SLOT_RSVD: address is reserved
+ * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
+ * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
+ * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
+ *
+ * On an I3C bus, addresses are assigned dynamically, and we need to know which
+ * addresses are free to use and which ones are already assigned.
+ *
+ * Addresses marked as reserved are those reserved by the I3C protocol
+ * (broadcast address, ...).
+ */
+enum i3c_addr_slot_status {
+	I3C_ADDR_SLOT_FREE,
+	I3C_ADDR_SLOT_RSVD,
+	I3C_ADDR_SLOT_I2C_DEV,
+	I3C_ADDR_SLOT_I3C_DEV,
+	I3C_ADDR_SLOT_STATUS_MASK = 3,
+};
+
+/**
+ * struct i3c_bus - I3C bus object
+ * @cur_master: I3C master currently driving the bus. Since I3C is multi-master
+ *		this can change over the time. Will be used to let a master
+ *		know whether it needs to request bus ownership before sending
+ *		a frame or not
+ * @id: bus ID. Assigned by the framework when register the bus
+ * @addrslots: a bitmap with 2-bits per-slot to encode the address status and
+ *	       ease the DAA (Dynamic Address Assignment) procedure (see
+ *	       &enum i3c_addr_slot_status)
+ * @mode: bus mode (see &enum i3c_bus_mode)
+ * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv
+ *		  transfers
+ * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers
+ * @scl_rate: SCL signal rate for I3C and I2C mode
+ * @devs.i3c: contains a list of I3C device descriptors representing I3C
+ *	      devices connected on the bus and successfully attached to the
+ *	      I3C master
+ * @devs.i2c: contains a list of I2C device descriptors representing I2C
+ *	      devices connected on the bus and successfully attached to the
+ *	      I3C master
+ * @devs: 2 lists containing all I3C/I2C devices connected to the bus
+ * @lock: read/write lock on the bus. This is needed to protect against
+ *	  operations that have an impact on the whole bus and the devices
+ *	  connected to it. For example, when asking slaves to drop their
+ *	  dynamic address (RSTDAA CCC), we need to make sure no one is trying
+ *	  to send I3C frames to these devices.
+ *	  Note that this lock does not protect against concurrency between
+ *	  devices: several drivers can send different I3C/I2C frames through
+ *	  the same master in parallel. This is the responsibility of the
+ *	  master to guarantee that frames are actually sent sequentially and
+ *	  not interlaced
+ *
+ * The I3C bus is represented with its own object and not implicitly described
+ * by the I3C master to cope with the multi-master functionality, where one bus
+ * can be shared amongst several masters, each of them requesting bus ownership
+ * when they need to.
+ */
+struct i3c_bus {
+	struct i3c_dev_desc *cur_master;
+	int id;
+	unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG];
+	enum i3c_bus_mode mode;
+	struct {
+		unsigned long i3c;
+		unsigned long i2c;
+	} scl_rate;
+	struct {
+		struct list_head i3c;
+		struct list_head i2c;
+	} devs;
+	struct rw_semaphore lock;
+};
+
+/**
+ * struct i3c_master_controller_ops - I3C master methods
+ * @bus_init: hook responsible for the I3C bus initialization. You should at
+ *	      least call master_set_info() from there and set the bus mode.
+ *	      You can also put controller specific initialization in there.
+ *	      This method is mandatory.
+ * @bus_cleanup: cleanup everything done in
+ *		 &i3c_master_controller_ops->bus_init().
+ *		 This method is optional.
+ * @attach_i3c_dev: called every time an I3C device is attached to the bus. It
+ *		    can be after a DAA or when a device is statically declared
+ *		    by the FW, in which case it will only have a static address
+ *		    and the dynamic address will be 0.
+ *		    When this function is called, device information have not
+ *		    been retrieved yet.
+ *		    This is a good place to attach master controller specific
+ *		    data to I3C devices.
+ *		    This method is optional.
+ * @reattach_i3c_dev: called every time an I3C device has its addressed
+ *		      changed. It can be because the device has been powered
+ *		      down and has lost its address, or it can happen when a
+ *		      device had a static address and has been assigned a
+ *		      dynamic address with SETDASA.
+ *		      This method is optional.
+ * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually
+ *		    happens when the master device is unregistered.
+ *		    This method is optional.
+ * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure
+ *	    should send an ENTDAA CCC command and then add all devices
+ *	    discovered sure the DAA using i3c_master_add_i3c_dev_locked().
+ *	    Add devices added with i3c_master_add_i3c_dev_locked() will then be
+ *	    attached or re-attached to the controller.
+ *	    This method is mandatory.
+ * @supports_ccc_cmd: should return true if the CCC command is supported, false
+ *		      otherwise.
+ *		      This method is optional, if not provided the core assumes
+ *		      all CCC commands are supported.
+ * @send_ccc_cmd: send a CCC command
+ *		  This method is mandatory.
+ * @priv_xfers: do one or several private I3C SDR transfers
+ *		This method is mandatory.
+ * @attach_i2c_dev: called every time an I2C device is attached to the bus.
+ *		    This is a good place to attach master controller specific
+ *		    data to I2C devices.
+ *		    This method is optional.
+ * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually
+ *		    happens when the master device is unregistered.
+ *		    This method is optional.
+ * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c
+ *	       transfers, the core does not guarantee that buffers attached to
+ *	       the transfers are DMA-safe. If drivers want to have DMA-safe
+ *	       buffers, they should use the i2c_get_dma_safe_msg_buf()
+ *	       and i2c_put_dma_safe_msg_buf() helpers provided by the I2C
+ *	       framework.
+ *	       This method is mandatory.
+ * @i2c_funcs: expose the supported I2C functionalities.
+ *	       This method is mandatory.
+ * @request_ibi: attach an IBI handler to an I3C device. This implies defining
+ *		 an IBI handler and the constraints of the IBI (maximum payload
+ *		 length and number of pre-allocated slots).
+ *		 Some controllers support less IBI-capable devices than regular
+ *		 devices, so this method might return -%EBUSY if there's no
+ *		 more space for an extra IBI registration
+ *		 This method is optional.
+ * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI
+ *	      should have been disabled with ->disable_irq() prior to that
+ *	      This method is mandatory only if ->request_ibi is not NULL.
+ * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called
+ *		prior to ->enable_ibi(). The controller should first enable
+ *		the IBI on the controller end (for example, unmask the hardware
+ *		IRQ) and then send the ENEC CCC command (with the IBI flag set)
+ *		to the I3C device.
+ *		This method is mandatory only if ->request_ibi is not NULL.
+ * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI
+ *		 flag set and then deactivate the hardware IRQ on the
+ *		 controller end.
+ *		 This method is mandatory only if ->request_ibi is not NULL.
+ * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been
+ *		      processed by its handler. The IBI slot should be put back
+ *		      in the IBI slot pool so that the controller can re-use it
+ *		      for a future IBI
+ *		      This method is mandatory only if ->request_ibi is not
+ *		      NULL.
+ */
+struct i3c_master_controller_ops {
+	int (*bus_init)(struct i3c_master_controller *master);
+	void (*bus_cleanup)(struct i3c_master_controller *master);
+	int (*attach_i3c_dev)(struct i3c_dev_desc *dev);
+	int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr);
+	void (*detach_i3c_dev)(struct i3c_dev_desc *dev);
+	int (*do_daa)(struct i3c_master_controller *master);
+	bool (*supports_ccc_cmd)(struct i3c_master_controller *master,
+				 const struct i3c_ccc_cmd *cmd);
+	int (*send_ccc_cmd)(struct i3c_master_controller *master,
+			    struct i3c_ccc_cmd *cmd);
+	int (*priv_xfers)(struct i3c_dev_desc *dev,
+			  struct i3c_priv_xfer *xfers,
+			  int nxfers);
+	int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
+	void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
+	int (*i2c_xfers)(struct i2c_dev_desc *dev,
+			 const struct i2c_msg *xfers, int nxfers);
+	u32 (*i2c_funcs)(struct i3c_master_controller *master);
+	int (*request_ibi)(struct i3c_dev_desc *dev,
+			   const struct i3c_ibi_setup *req);
+	void (*free_ibi)(struct i3c_dev_desc *dev);
+	int (*enable_ibi)(struct i3c_dev_desc *dev);
+	int (*disable_ibi)(struct i3c_dev_desc *dev);
+	void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
+				 struct i3c_ibi_slot *slot);
+};
+
+/**
+ * struct i3c_master_controller - I3C master controller object
+ * @dev: device to be registered to the device-model
+ * @this: an I3C device object representing this master. This device will be
+ *	  added to the list of I3C devs available on the bus
+ * @i2c: I2C adapter used for backward compatibility. This adapter is
+ *	 registered to the I2C subsystem to be as transparent as possible to
+ *	 existing I2C drivers
+ * @ops: master operations. See &struct i3c_master_controller_ops
+ * @secondary: true if the master is a secondary master
+ * @init_done: true when the bus initialization is done
+ * @boardinfo.i3c: list of I3C  boardinfo objects
+ * @boardinfo.i2c: list of I2C boardinfo objects
+ * @boardinfo: board-level information attached to devices connected on the bus
+ * @bus: I3C bus exposed by this master
+ * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ *	drivers if they need to postpone operations that need to take place
+ *	in a thread context. Typical examples are Hot Join processing which
+ *	requires taking the bus lock in maintenance, which in turn, can only
+ *	be done from a sleep-able context
+ *
+ * A &struct i3c_master_controller has to be registered to the I3C subsystem
+ * through i3c_master_register(). None of &struct i3c_master_controller fields
+ * should be set manually, just pass appropriate values to
+ * i3c_master_register().
+ */
+struct i3c_master_controller {
+	struct device dev;
+	struct i3c_dev_desc *this;
+	struct i2c_adapter i2c;
+	const struct i3c_master_controller_ops *ops;
+	unsigned int secondary : 1;
+	unsigned int init_done : 1;
+	struct {
+		struct list_head i3c;
+		struct list_head i2c;
+	} boardinfo;
+	struct i3c_bus bus;
+	struct workqueue_struct *wq;
+};
+
+/**
+ * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: an I2C device descriptor pointer updated to point to the current slot
+ *	 at each iteration of the loop
+ *
+ * Iterate over all I2C devs present on the bus.
+ */
+#define i3c_bus_for_each_i2cdev(bus, dev)				\
+	list_for_each_entry(dev, &(bus)->devs.i2c, common.node)
+
+/**
+ * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: and I3C device descriptor pointer updated to point to the current slot
+ *	 at each iteration of the loop
+ *
+ * Iterate over all I3C devs present on the bus.
+ */
+#define i3c_bus_for_each_i3cdev(bus, dev)				\
+	list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+
+int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
+			    const struct i2c_msg *xfers,
+			    int nxfers);
+
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+			    u8 evts);
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+			   u8 evts);
+int i3c_master_entdaa_locked(struct i3c_master_controller *master);
+int i3c_master_defslvs_locked(struct i3c_master_controller *master);
+
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+			     u8 start_addr);
+
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+				  u8 addr);
+int i3c_master_do_daa(struct i3c_master_controller *master);
+
+int i3c_master_set_info(struct i3c_master_controller *master,
+			const struct i3c_device_info *info);
+
+int i3c_master_register(struct i3c_master_controller *master,
+			struct device *parent,
+			const struct i3c_master_controller_ops *ops,
+			bool secondary);
+int i3c_master_unregister(struct i3c_master_controller *master);
+
+/**
+ * i3c_dev_get_master_data() - get master private data attached to an I3C
+ *			       device descriptor
+ * @dev: the I3C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i3c_dev_set_master_data()
+ *	   or NULL if no data has been attached to the device.
+ */
+static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev)
+{
+	return dev->common.master_priv;
+}
+
+/**
+ * i3c_dev_set_master_data() - attach master private data to an I3C device
+ *			       descriptor
+ * @dev: the I3C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i3c_dev_get_master_data().
+ */
+static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev,
+					   void *data)
+{
+	dev->common.master_priv = data;
+}
+
+/**
+ * i2c_dev_get_master_data() - get master private data attached to an I2C
+ *			       device descriptor
+ * @dev: the I2C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i2c_dev_set_master_data()
+ *	   or NULL if no data has been attached to the device.
+ */
+static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev)
+{
+	return dev->common.master_priv;
+}
+
+/**
+ * i2c_dev_set_master_data() - attach master private data to an I2C device
+ *			       descriptor
+ * @dev: the I2C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i2c_device_get_master_data().
+ */
+static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev,
+					   void *data)
+{
+	dev->common.master_priv = data;
+}
+
+/**
+ * i3c_dev_get_master() - get master used to communicate with a device
+ * @dev: I3C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i3c_dev_get_master(struct i3c_dev_desc *dev)
+{
+	return dev->common.master;
+}
+
+/**
+ * i2c_dev_get_master() - get master used to communicate with a device
+ * @dev: I2C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i2c_dev_get_master(struct i2c_dev_desc *dev)
+{
+	return dev->common.master;
+}
+
+/**
+ * i3c_master_get_bus() - get the bus attached to a master
+ * @master: master object
+ *
+ * Return: the I3C bus @master is connected to
+ */
+static inline struct i3c_bus *
+i3c_master_get_bus(struct i3c_master_controller *master)
+{
+	return &master->bus;
+}
+
+struct i3c_generic_ibi_pool;
+
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+			   const struct i3c_ibi_setup *req);
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool);
+
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool);
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+				  struct i3c_ibi_slot *slot);
+
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
+
+struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+
+#endif /* I3C_MASTER_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 9c03a7d..da3a837 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -3185,4 +3185,57 @@
 	return true;
 }
 
+struct element {
+	u8 id;
+	u8 datalen;
+	u8 data[];
+};
+
+/* element iteration helpers */
+#define for_each_element(element, _data, _datalen)			\
+	for (element = (void *)(_data);					\
+	     (u8 *)(_data) + (_datalen) - (u8 *)element >=		\
+		sizeof(*element) &&					\
+	     (u8 *)(_data) + (_datalen) - (u8 *)element >=		\
+		sizeof(*element) + element->datalen;			\
+	     element = (void *)(element->data + element->datalen))
+
+#define for_each_element_id(element, _id, data, datalen)		\
+	for_each_element(element, data, datalen)			\
+		if (element->id == (_id))
+
+#define for_each_element_extid(element, extid, data, datalen)		\
+	for_each_element(element, data, datalen)			\
+		if (element->id == WLAN_EID_EXTENSION &&		\
+		    element->datalen > 0 &&				\
+		    element->data[0] == (extid))
+
+#define for_each_subelement(sub, element)				\
+	for_each_element(sub, (element)->data, (element)->datalen)
+
+#define for_each_subelement_id(sub, id, element)			\
+	for_each_element_id(sub, id, (element)->data, (element)->datalen)
+
+#define for_each_subelement_extid(sub, extid, element)			\
+	for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
+
+/**
+ * for_each_element_completed - determine if element parsing consumed all data
+ * @element: element pointer after for_each_element() or friends
+ * @data: same data pointer as passed to for_each_element() or friends
+ * @datalen: same data length as passed to for_each_element() or friends
+ *
+ * This function returns %true if all the data was parsed or considered
+ * while walking the elements. Only use this if your for_each_element()
+ * loop cannot be broken out of, otherwise it always returns %false.
+ *
+ * If some data was malformed, this returns %false since the last parsed
+ * element will not fill the whole remaining data.
+ */
+static inline bool for_each_element_completed(const struct element *element,
+					      const void *data, size_t datalen)
+{
+	return (u8 *)element == (u8 *)data + datalen;
+}
+
 #endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 6756fea..e44746d 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -54,6 +54,7 @@
 	case ARPHRD_IPGRE:
 	case ARPHRD_VOID:
 	case ARPHRD_NONE:
+	case ARPHRD_RAWIP:
 		return false;
 	default:
 		return true;
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 0e644e5..39f8279 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1178,6 +1178,7 @@
 
 enum ipa_smmu_client_type {
 	IPA_SMMU_WLAN_CLIENT,
+	IPA_SMMU_AP_CLIENT,
 	IPA_SMMU_CLIENT_MAX
 };
 
diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h
index aca7fba..3c8a72c 100644
--- a/include/linux/ipa_wdi3.h
+++ b/include/linux/ipa_wdi3.h
@@ -97,10 +97,12 @@
  * @transfer_ring_size:  size of the transfer ring
  * @transfer_ring_doorbell_pa:  physical address of the doorbell that
 	IPA uC will update the tailpointer of the transfer ring
+ * @is_txr_rn_db_pcie_addr: Bool indicated txr ring DB is pcie or not
  * @event_ring_base_pa:  physical address of the base of the event ring
  * @event_ring_size:  event ring size
  * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
 	will update the headpointer of the event ring
+ * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
  * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
 	ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
  * @pkt_offset: packet offset (wdi header length)
@@ -113,10 +115,12 @@
 	phys_addr_t  transfer_ring_base_pa;
 	u32  transfer_ring_size;
 	phys_addr_t  transfer_ring_doorbell_pa;
+	bool is_txr_rn_db_pcie_addr;
 
 	phys_addr_t  event_ring_base_pa;
 	u32  event_ring_size;
 	phys_addr_t  event_ring_doorbell_pa;
+	bool is_evt_rn_db_pcie_addr;
 	u16  num_pkt_buffers;
 
 	u16 pkt_offset;
@@ -132,10 +136,12 @@
  * @transfer_ring_size:  size of the transfer ring
  * @transfer_ring_doorbell_pa:  physical address of the doorbell that
 	IPA uC will update the tailpointer of the transfer ring
+ * @is_txr_rn_db_pcie_addr: Bool indicated  txr ring DB is pcie or not
  * @event_ring_base_pa:  physical address of the base of the event ring
  * @event_ring_size:  event ring size
  * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
 	will update the headpointer of the event ring
+ * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
  * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
 	ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
  * @pkt_offset: packet offset (wdi header length)
@@ -148,10 +154,12 @@
 	struct sg_table  transfer_ring_base;
 	u32  transfer_ring_size;
 	phys_addr_t  transfer_ring_doorbell_pa;
+	bool is_txr_rn_db_pcie_addr;
 
 	struct sg_table  event_ring_base;
 	u32  event_ring_size;
 	phys_addr_t  event_ring_doorbell_pa;
+	bool is_evt_rn_db_pcie_addr;
 	u16  num_pkt_buffers;
 
 	u16 pkt_offset;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12..c9bffda 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1151,7 +1151,8 @@
 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+				unsigned int *mapped_cpu);
 void irq_matrix_reserve(struct irq_matrix *m);
 void irq_matrix_remove_reserved(struct irq_matrix *m);
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8bdbb5f..3188c0b 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -319,7 +319,7 @@
 #define GITS_TYPER_PLPIS		(1UL << 0)
 #define GITS_TYPER_VLPIS		(1UL << 1)
 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT	4
-#define GITS_TYPER_ITT_ENTRY_SIZE(r)	((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_ITT_ENTRY_SIZE(r)	((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
 #define GITS_TYPER_IDBITS_SHIFT		8
 #define GITS_TYPER_DEVBITS_SHIFT	13
 #define GITS_TYPER_DEVBITS(r)		((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 814643f..444869d 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -25,6 +25,7 @@
 struct vm_area_struct;
 struct super_block;
 struct file_system_type;
+struct poll_table_struct;
 
 struct kernfs_open_node;
 struct kernfs_iattrs;
@@ -261,6 +262,9 @@
 	ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
 			 loff_t off);
 
+	__poll_t (*poll)(struct kernfs_open_file *of,
+			 struct poll_table_struct *pt);
+
 	int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -350,6 +354,8 @@
 int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
 		     const char *new_name, const void *new_ns);
 int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+			     struct poll_table_struct *pt);
 void kernfs_notify(struct kernfs_node *kn);
 
 const void *kernfs_super_ns(struct super_block *sb);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c196176..edf8f86 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -4,7 +4,6 @@
 /* Simple interface for creating and stopping kernel threads without mess. */
 #include <linux/err.h>
 #include <linux/sched.h>
-#include <linux/cgroup.h>
 
 __printf(4, 5)
 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
@@ -198,6 +197,8 @@
 
 void kthread_destroy_worker(struct kthread_worker *worker);
 
+struct cgroup_subsys_state;
+
 #ifdef CONFIG_BLK_CGROUP
 void kthread_associate_blkcg(struct cgroup_subsys_state *css);
 struct cgroup_subsys_state *kthread_blkcg(void);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c926698..a03d5e2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -694,7 +694,8 @@
 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 			   void *data, unsigned long len);
 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			   void *data, int offset, unsigned long len);
+				  void *data, unsigned int offset,
+				  unsigned long len);
 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 			      gpa_t gpa, unsigned long len);
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 97a020c..ce16efb 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1516,6 +1516,8 @@
 					size_t *len);
 	int (*inode_create)(struct inode *dir, struct dentry *dentry,
 				umode_t mode);
+	int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
+				umode_t mode);
 	int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
 				struct dentry *new_dentry);
 	int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@@ -1830,6 +1832,7 @@
 	struct hlist_head inode_free_security;
 	struct hlist_head inode_init_security;
 	struct hlist_head inode_create;
+	struct hlist_head inode_post_create;
 	struct hlist_head inode_link;
 	struct hlist_head inode_unlink;
 	struct hlist_head inode_symlink;
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 837f2f2..bb2c84a 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -281,4 +281,7 @@
 }
 #endif /* mul_u64_u32_div */
 
+#define DIV64_U64_ROUND_UP(ll, d)	\
+	({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
+
 #endif /* _LINUX_MATH64_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index d8b7855..90e2653 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -21,14 +21,16 @@
  * walkers which rely on the fully initialized page->flags and others
  * should use this rather than pfn_valid && pfn_to_page
  */
-#define pfn_to_online_page(pfn)				\
-({							\
-	struct page *___page = NULL;			\
-	unsigned long ___nr = pfn_to_section_nr(pfn);	\
-							\
-	if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
-		___page = pfn_to_page(pfn);		\
-	___page;					\
+#define pfn_to_online_page(pfn)					   \
+({								   \
+	struct page *___page = NULL;				   \
+	unsigned long ___pfn = pfn;				   \
+	unsigned long ___nr = pfn_to_section_nr(___pfn);	   \
+								   \
+	if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
+	    pfn_valid_within(___pfn))				   \
+		___page = pfn_to_page(___pfn);			   \
+	___page;						   \
 })
 
 /*
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index f91f9e7..a84572c 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -106,6 +106,7 @@
  * @altmap: pre-allocated/reserved memory for vmemmap allocations
  * @res: physical address range covered by @ref
  * @ref: reference count that pins the devm_memremap_pages() mapping
+ * @kill: callback to transition @ref to the dead state
  * @dev: host device of the mapping for debug
  * @data: private data pointer for page_free()
  * @type: memory type: see MEMORY_* in memory_hotplug.h
@@ -117,6 +118,7 @@
 	bool altmap_valid;
 	struct resource res;
 	struct percpu_ref *ref;
+	void (*kill)(struct percpu_ref *ref);
 	struct device *dev;
 	void *data;
 	enum memory_type type;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 88a041b..bbcfe2e 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1321,7 +1321,7 @@
 static inline const struct cpumask *
 mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
 {
-	return dev->priv.irq_info[vector].mask;
+	return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
 }
 
 #endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 689fe9b..7fb6028 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1725,11 +1725,15 @@
 
 static inline void mm_inc_nr_puds(struct mm_struct *mm)
 {
+	if (mm_pud_folded(mm))
+		return;
 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
 }
 
 static inline void mm_dec_nr_puds(struct mm_struct *mm)
 {
+	if (mm_pud_folded(mm))
+		return;
 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
 }
 #endif
@@ -1749,11 +1753,15 @@
 
 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
 {
+	if (mm_pmd_folded(mm))
+		return;
 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
 }
 
 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
 {
+	if (mm_pmd_folded(mm))
+		return;
 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
 }
 #endif
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 5957349..353bbc9 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -396,6 +396,7 @@
 	struct notifier_block   reboot_notify;
 	enum mmc_pon_type	pon_type;
 	struct mmc_bkops_info bkops;
+	struct workqueue_struct *complete_wq;	/* Private workqueue */
 };
 
 static inline bool mmc_large_sector(struct mmc_card *card)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 58be975..3b6fb43 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -165,6 +165,7 @@
 	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
 	WORKINGSET_REFAULT,
 	WORKINGSET_ACTIVATE,
+	WORKINGSET_RESTORE,
 	WORKINGSET_NODERECLAIM,
 	NR_ANON_MAPPED,	/* Mapped anonymous pages */
 	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 1a852ff..12b42dd 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -448,6 +448,23 @@
 	kernel_ulong_t driver_data;
 };
 
+/* i3c */
+
+#define I3C_MATCH_DCR			0x1
+#define I3C_MATCH_MANUF			0x2
+#define I3C_MATCH_PART			0x4
+#define I3C_MATCH_EXTRA_INFO		0x8
+
+struct i3c_device_id {
+	__u8 match_flags;
+	__u8 dcr;
+	__u16 manuf_id;
+	__u16 part_id;
+	__u16 extra_info;
+
+	const void *data;
+};
+
 /* spi */
 
 #define SPI_NAME_SIZE	32
diff --git a/include/linux/module.h b/include/linux/module.h
index 6b21060..2755a57 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -823,7 +823,7 @@
 static inline void module_bug_cleanup(struct module *mod) {}
 #endif	/* CONFIG_GENERIC_BUG */
 
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
 extern bool retpoline_module_ok(bool has_retpoline);
 #else
 static inline bool retpoline_module_ok(bool has_retpoline)
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 5839d80..be8ec81 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -116,6 +116,8 @@
 	list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
 #define for_each_msi_entry(desc, dev)	\
 	list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+#define for_each_msi_entry_safe(desc, tmp, dev)	\
+	list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
 
 #ifdef CONFIG_PCI_MSI
 #define first_pci_msi_entry(pdev)	first_msi_entry(&(pdev)->dev)
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index fd39487..6fc8151 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -702,6 +702,20 @@
 };
 
 /**
+* gsi_mhip_channel_scratch - MHI PRIME protocol SW config area of
+* channel scratch
+* @assert_bit_40: Valid only for non-host channels.
+* Set to 1 for MHIÂ’ channels when running over PCIe.
+* @host_channel: Set to 1 for MHIP channel running on host.
+*
+*/
+struct __packed gsi_mhip_channel_scratch {
+	uint32_t assert_bit_40:1;
+	uint32_t host_channel:1;
+	uint32_t resvd1:30;
+};
+
+/**
  * gsi_11ad_rx_channel_scratch - 11AD protocol SW config area of
  * RX channel scratch
  *
@@ -789,6 +803,7 @@
 	struct __packed gsi_11ad_rx_channel_scratch rx_11ad;
 	struct __packed gsi_11ad_tx_channel_scratch tx_11ad;
 	struct __packed gsi_wdi3_channel_scratch wdi3;
+	struct __packed gsi_mhip_channel_scratch mhip;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
@@ -829,6 +844,22 @@
 };
 
 /**
+* gsi_mhip_evt_scratch - MHI PRIME protocol SW config area of
+* event scratch
+*/
+struct __packed gsi_mhip_evt_scratch {
+	uint32_t rp_mod_threshold:8;
+	uint32_t rp_mod_timer:4;
+	uint32_t rp_mod_counter:8;
+	uint32_t rp_mod_timer_id:4;
+	uint32_t rp_mod_timer_running:1;
+	uint32_t resvd1:7;
+	uint32_t fixed_buffer_sz:16;
+	uint32_t resvd2:16;
+};
+
+
+/**
  * gsi_xdci_evt_scratch - xDCI protocol SW config area of
  * event scratch
  *
@@ -893,6 +924,7 @@
 	struct __packed gsi_wdi_evt_scratch wdi;
 	struct __packed gsi_11ad_evt_scratch w11ad;
 	struct __packed gsi_wdi3_evt_scratch wdi3;
+	struct __packed gsi_mhip_evt_scratch mhip;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 2b2a6dc..4c76fe2 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -11,6 +11,8 @@
 #define _LINUX_NETDEV_FEATURES_H
 
 #include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
 
 typedef u64 netdev_features_t;
 
@@ -154,8 +156,26 @@
 #define NETIF_F_HW_TLS_TX	__NETIF_F(HW_TLS_TX)
 #define NETIF_F_HW_TLS_RX	__NETIF_F(HW_TLS_RX)
 
-#define for_each_netdev_feature(mask_addr, bit)	\
-	for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+/* Finds the next feature with the highest number of the range of start till 0.
+ */
+static inline int find_next_netdev_feature(u64 feature, unsigned long start)
+{
+	/* like BITMAP_LAST_WORD_MASK() for u64
+	 * this sets the most significant 64 - start to 0.
+	 */
+	feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
+
+	return fls64(feature) - 1;
+}
+
+/* This goes for the MSB to the LSB through the set feature bits,
+ * mask_addr should be a u64 and bit an int
+ */
+#define for_each_netdev_feature(mask_addr, bit)				\
+	for ((bit) = find_next_netdev_feature((mask_addr),		\
+					      NETDEV_FEATURE_COUNT);	\
+	     (bit) >= 0;						\
+	     (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
 
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e86a358..c765496 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1455,6 +1455,7 @@
  * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
  * @IFF_FAILOVER: device is a failover master device
  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
+ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
  */
 enum netdev_priv_flags {
 	IFF_802_1Q_VLAN			= 1<<0,
@@ -1486,6 +1487,7 @@
 	IFF_NO_RX_HANDLER		= 1<<26,
 	IFF_FAILOVER			= 1<<27,
 	IFF_FAILOVER_SLAVE		= 1<<28,
+	IFF_L3MDEV_RX_HANDLER		= 1<<29,
 };
 
 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
@@ -1516,6 +1518,7 @@
 #define IFF_NO_RX_HANDLER		IFF_NO_RX_HANDLER
 #define IFF_FAILOVER			IFF_FAILOVER
 #define IFF_FAILOVER_SLAVE		IFF_FAILOVER_SLAVE
+#define IFF_L3MDEV_RX_HANDLER		IFF_L3MDEV_RX_HANDLER
 
 /**
  *	struct net_device - The DEVICE structure.
@@ -4465,6 +4468,11 @@
 	return dev->priv_flags & IFF_SUPP_NOFCS;
 }
 
+static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
+}
+
 static inline bool netif_is_l3_master(const struct net_device *dev)
 {
 	return dev->priv_flags & IFF_L3MDEV_MASTER;
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 4a520d3..cf09ab3 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -62,18 +62,6 @@
 }
 #endif /* CONFIG_PROVE_LOCKING */
 
-/*
- * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
- *
- * @p: The pointer to read, prior to dereferencing
- * @ss: The nfnetlink subsystem ID
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
- */
-#define nfnl_dereference(p, ss)					\
-	rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
-
 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
 	MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
 
diff --git a/include/linux/of.h b/include/linux/of.h
index 99b0ebf..40e58b0e 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -138,11 +138,16 @@
 extern struct device_node *of_stdout;
 extern raw_spinlock_t devtree_lock;
 
-/* flag descriptions (need to be visible even when !CONFIG_OF) */
-#define OF_DYNAMIC	1 /* node and properties were allocated via kmalloc */
-#define OF_DETACHED	2 /* node has been detached from the device tree */
-#define OF_POPULATED	3 /* device already created for the node */
-#define OF_POPULATED_BUS	4 /* of_platform_populate recursed to children of this node */
+/*
+ * struct device_node flag descriptions
+ * (need to be visible even when !CONFIG_OF)
+ */
+#define OF_DYNAMIC		1 /* (and properties) allocated via kmalloc */
+#define OF_DETACHED		2 /* detached from the device tree */
+#define OF_POPULATED		3 /* device already created */
+#define OF_POPULATED_BUS	4 /* platform bus created for children */
+#define OF_OVERLAY		5 /* allocated for an overlay */
+#define OF_OVERLAY_FREE_CSET	6 /* in overlay cset being freed */
 
 #define OF_BAD_ADDR	((u64)-1)
 
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1214cab..8634250 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -32,6 +32,7 @@
 }
 #endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
 
+extern int of_irq_domain_map(const struct irq_fwspec *in, struct irq_fwspec *out);
 extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
 extern int of_irq_parse_one(struct device_node *device, int index,
 			  struct of_phandle_args *out_irq);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 74bee8c..4d99504 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -69,13 +69,14 @@
  */
 enum pageflags {
 	PG_locked,		/* Page is locked. Don't touch. */
-	PG_error,
 	PG_referenced,
 	PG_uptodate,
 	PG_dirty,
 	PG_lru,
 	PG_active,
+	PG_workingset,
 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
+	PG_error,
 	PG_slab,
 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
 	PG_arch_1,
@@ -280,6 +281,8 @@
 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
 	TESTCLEARFLAG(Active, active, PF_HEAD)
+PAGEFLAG(Workingset, workingset, PF_HEAD)
+	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4f3baec..c337495 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -449,6 +449,11 @@
 	 * Filter events for PMU-specific reasons.
 	 */
 	int (*filter_match)		(struct perf_event *event); /* optional */
+
+	/*
+	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
+	 */
+	int (*check_period)		(struct perf_event *event, u64 value); /* optional */
 };
 
 enum perf_addr_filter_action_t {
diff --git a/include/linux/pinctrl/qcom-pinctrl.h b/include/linux/pinctrl/qcom-pinctrl.h
new file mode 100644
index 0000000..1ea9a87
--- /dev/null
+++ b/include/linux/pinctrl/qcom-pinctrl.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __LINUX_PINCTRL_MSM_H__
+#define __LINUX_PINCTRL_MSM_H__
+
+/* APIS to access qup_i3c registers */
+int msm_qup_write(u32 mode, u32 val);
+int msm_qup_read(u32 mode);
+
+
+#endif /* __LINUX_PINCTRL_MSM_H__ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 100247c..dc56925 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -323,6 +323,7 @@
 	POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
 	POWER_SUPPLY_PROP_CC_SOC,
 	POWER_SUPPLY_PROP_BATT_AGE_LEVEL,
+	POWER_SUPPLY_PROP_SCALE_MODE_EN,
 	/* Charge pump properties */
 	POWER_SUPPLY_PROP_CP_STATUS1,
 	POWER_SUPPLY_PROP_CP_STATUS2,
diff --git a/include/linux/psi.h b/include/linux/psi.h
new file mode 100644
index 0000000..af892c2
--- /dev/null
+++ b/include/linux/psi.h
@@ -0,0 +1,62 @@
+#ifndef _LINUX_PSI_H
+#define _LINUX_PSI_H
+
+#include <linux/jump_label.h>
+#include <linux/psi_types.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+
+struct seq_file;
+struct css_set;
+
+#ifdef CONFIG_PSI
+
+extern struct static_key_false psi_disabled;
+
+void psi_init(void);
+
+void psi_task_change(struct task_struct *task, int clear, int set);
+
+void psi_memstall_tick(struct task_struct *task, int cpu);
+void psi_memstall_enter(unsigned long *flags);
+void psi_memstall_leave(unsigned long *flags);
+
+int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
+
+#ifdef CONFIG_CGROUPS
+int psi_cgroup_alloc(struct cgroup *cgrp);
+void psi_cgroup_free(struct cgroup *cgrp);
+void cgroup_move_task(struct task_struct *p, struct css_set *to);
+
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+			char *buf, size_t nbytes, enum psi_res res);
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
+
+__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
+			poll_table *wait);
+#endif
+
+#else /* CONFIG_PSI */
+
+static inline void psi_init(void) {}
+
+static inline void psi_memstall_enter(unsigned long *flags) {}
+static inline void psi_memstall_leave(unsigned long *flags) {}
+
+#ifdef CONFIG_CGROUPS
+static inline int psi_cgroup_alloc(struct cgroup *cgrp)
+{
+	return 0;
+}
+static inline void psi_cgroup_free(struct cgroup *cgrp)
+{
+}
+static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
+{
+	rcu_assign_pointer(p->cgroups, to);
+}
+#endif
+
+#endif /* CONFIG_PSI */
+
+#endif /* _LINUX_PSI_H */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
new file mode 100644
index 0000000..07aaf9b
--- /dev/null
+++ b/include/linux/psi_types.h
@@ -0,0 +1,173 @@
+#ifndef _LINUX_PSI_TYPES_H
+#define _LINUX_PSI_TYPES_H
+
+#include <linux/kthread.h>
+#include <linux/seqlock.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/wait.h>
+
+#ifdef CONFIG_PSI
+
+/* Tracked task states */
+enum psi_task_count {
+	NR_IOWAIT,
+	NR_MEMSTALL,
+	NR_RUNNING,
+	NR_PSI_TASK_COUNTS = 3,
+};
+
+/* Task state bitmasks */
+#define TSK_IOWAIT	(1 << NR_IOWAIT)
+#define TSK_MEMSTALL	(1 << NR_MEMSTALL)
+#define TSK_RUNNING	(1 << NR_RUNNING)
+
+/* Resources that workloads could be stalled on */
+enum psi_res {
+	PSI_IO,
+	PSI_MEM,
+	PSI_CPU,
+	NR_PSI_RESOURCES = 3,
+};
+
+/*
+ * Pressure states for each resource:
+ *
+ * SOME: Stalled tasks & working tasks
+ * FULL: Stalled tasks & no working tasks
+ */
+enum psi_states {
+	PSI_IO_SOME,
+	PSI_IO_FULL,
+	PSI_MEM_SOME,
+	PSI_MEM_FULL,
+	PSI_CPU_SOME,
+	/* Only per-CPU, to weigh the CPU in the global average: */
+	PSI_NONIDLE,
+	NR_PSI_STATES = 6,
+};
+
+enum psi_aggregators {
+	PSI_AVGS = 0,
+	PSI_POLL,
+	NR_PSI_AGGREGATORS,
+};
+
+struct psi_group_cpu {
+	/* 1st cacheline updated by the scheduler */
+
+	/* Aggregator needs to know of concurrent changes */
+	seqcount_t seq ____cacheline_aligned_in_smp;
+
+	/* States of the tasks belonging to this group */
+	unsigned int tasks[NR_PSI_TASK_COUNTS];
+
+	/* Aggregate pressure state derived from the tasks */
+	u32 state_mask;
+
+	/* Period time sampling buckets for each state of interest (ns) */
+	u32 times[NR_PSI_STATES];
+
+	/* Time of last task change in this group (rq_clock) */
+	u64 state_start;
+
+	/* 2nd cacheline updated by the aggregator */
+
+	/* Delta detection against the sampling buckets */
+	u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES]
+			____cacheline_aligned_in_smp;
+};
+
+/* PSI growth tracking window */
+struct psi_window {
+	/* Window size in ns */
+	u64 size;
+
+	/* Start time of the current window in ns */
+	u64 start_time;
+
+	/* Value at the start of the window */
+	u64 start_value;
+
+	/* Value growth in the previous window */
+	u64 prev_growth;
+};
+
+struct psi_trigger {
+	/* PSI state being monitored by the trigger */
+	enum psi_states state;
+
+	/* User-spacified threshold in ns */
+	u64 threshold;
+
+	/* List node inside triggers list */
+	struct list_head node;
+
+	/* Backpointer needed during trigger destruction */
+	struct psi_group *group;
+
+	/* Wait queue for polling */
+	wait_queue_head_t event_wait;
+
+	/* Pending event flag */
+	int event;
+
+	/* Tracking window */
+	struct psi_window win;
+
+	/*
+	 * Time last event was generated. Used for rate-limiting
+	 * events to one per window
+	 */
+	u64 last_event_time;
+
+	/* Refcounting to prevent premature destruction */
+	struct kref refcount;
+};
+
+struct psi_group {
+	/* Protects data used by the aggregator */
+	struct mutex avgs_lock;
+
+	/* Per-cpu task state & time tracking */
+	struct psi_group_cpu __percpu *pcpu;
+
+	/* Running pressure averages */
+	u64 avg_total[NR_PSI_STATES - 1];
+	u64 avg_last_update;
+	u64 avg_next_update;
+
+	/* Aggregator work control */
+	struct delayed_work avgs_work;
+
+	/* Total stall times and sampled pressure averages */
+	u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
+	unsigned long avg[NR_PSI_STATES - 1][3];
+
+	/* Monitor work control */
+	atomic_t poll_scheduled;
+	struct kthread_worker __rcu *poll_kworker;
+	struct kthread_delayed_work poll_work;
+
+	/* Protects data used by the monitor */
+	struct mutex trigger_lock;
+
+	/* Configured polling triggers */
+	struct list_head triggers;
+	u32 nr_triggers[NR_PSI_STATES - 1];
+	u32 poll_states;
+	u64 poll_min_period;
+
+	/* Total stall times at the start of monitor activation */
+	u64 polling_total[NR_PSI_STATES - 1];
+	u64 polling_next_update;
+	u64 polling_until;
+};
+
+#else /* CONFIG_PSI */
+
+struct psi_group { };
+
+#endif /* CONFIG_PSI */
+
+#endif /* _LINUX_PSI_TYPES_H */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6894976..186cd8e 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -573,6 +573,8 @@
 		else if (destroy)
 			destroy(ptr);
 
+	if (producer >= size)
+		producer = 0;
 	__ptr_ring_set_size(r, size);
 	r->producer = producer;
 	r->consumer_head = 0;
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 56b803d..bb7faa6 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _LINUX_QCOM_GENI_SE
@@ -31,27 +31,40 @@
 
 /**
  * struct geni_se_rsc - GENI Serial Engine Resource
+ * @ctrl_dev		Pointer to controller device.
  * @wrapper_dev:	Pointer to the parent QUPv3 core.
  * @se_clk:		Handle to the core serial engine clock.
  * @m_ahb_clk:		Handle to the primary AHB clock.
  * @s_ahb_clk:		Handle to the secondary AHB clock.
  * @ab_list:		List Head of Average bus banwidth list.
+ * @ab_list_noc:	List Head of Average DDR path bus
+			bandwidth list.
  * @ab:			Average bus bandwidth request value.
+ * @ab_noc:		Average DDR path bus bandwidth request value.
  * @ib_list:		List Head of Instantaneous bus banwidth list.
+ * @ib_list_noc:	List Head of Instantaneous DDR path bus
+			bandwidth list.
  * @ib:			Instantaneous bus bandwidth request value.
+ * @ib_noc:		Instantaneous DDR path bus bandwidth
+			request value.
  * @geni_pinctrl:	Handle to the pinctrl configuration.
  * @geni_gpio_active:	Handle to the default/active pinctrl state.
  * @geni_gpi_sleep:	Handle to the sleep pinctrl state.
  */
 struct se_geni_rsc {
+	struct device *ctrl_dev;
 	struct device *wrapper_dev;
 	struct clk *se_clk;
 	struct clk *m_ahb_clk;
 	struct clk *s_ahb_clk;
 	struct list_head ab_list;
+	struct list_head ab_list_noc;
 	unsigned long ab;
+	unsigned long ab_noc;
 	struct list_head ib_list;
+	struct list_head ib_list_noc;
 	unsigned long ib;
+	unsigned long ib_noc;
 	struct pinctrl *geni_pinctrl;
 	struct pinctrl_state *geni_gpio_active;
 	struct pinctrl_state *geni_gpio_sleep;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 59ddf9a..2dd0a9e 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -663,6 +663,37 @@
 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
 				      u32 prod_idx, void *p_prod_elem)
 {
+	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+		u32 cur_prod, page_mask, page_cnt, page_diff;
+
+		cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
+			   p_chain->u.chain32.prod_idx;
+
+		/* Assume that number of elements in a page is power of 2 */
+		page_mask = ~p_chain->elem_per_page_mask;
+
+		/* Use "cur_prod - 1" and "prod_idx - 1" since producer index
+		 * reaches the first element of next page before the page index
+		 * is incremented. See qed_chain_produce().
+		 * Index wrap around is not a problem because the difference
+		 * between current and given producer indices is always
+		 * positive and lower than the chain's capacity.
+		 */
+		page_diff = (((cur_prod - 1) & page_mask) -
+			     ((prod_idx - 1) & page_mask)) /
+			    p_chain->elem_per_page;
+
+		page_cnt = qed_chain_get_page_cnt(p_chain);
+		if (is_chain_u16(p_chain))
+			p_chain->pbl.c.u16.prod_page_idx =
+				(p_chain->pbl.c.u16.prod_page_idx -
+				 page_diff + page_cnt) % page_cnt;
+		else
+			p_chain->pbl.c.u32.prod_page_idx =
+				(p_chain->pbl.c.u32.prod_page_idx -
+				 page_diff + page_cnt) % page_cnt;
+	}
+
 	if (is_chain_u16(p_chain))
 		p_chain->u.chain16.prod_idx = (u16) prod_idx;
 	else
diff --git a/include/linux/rq_stats.h b/include/linux/rq_stats.h
new file mode 100644
index 0000000..59440af
--- /dev/null
+++ b/include/linux/rq_stats.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2011,2013-2014,2019, The Linux Foundation. All rights reserved.
+ */
+
+struct rq_data {
+	unsigned long def_timer_jiffies;
+	unsigned long def_timer_last_jiffy;
+	int64_t def_start_time;
+	struct attribute_group *attr_group;
+	struct kobject *kobj;
+	struct work_struct def_timer_work;
+	int init;
+};
+
+extern spinlock_t rq_lock;
+extern struct rq_data rq_info;
+extern struct workqueue_struct *rq_wq;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b9d4c5c..8bd1a9b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -872,6 +872,10 @@
 	unsigned			sched_contributes_to_load:1;
 	unsigned			sched_migrated:1;
 	unsigned			sched_remote_wakeup:1;
+#ifdef CONFIG_PSI
+	unsigned			sched_psi_wake_requeue:1;
+#endif
+
 	/* Force alignment to the next boundary: */
 	unsigned			:0;
 
@@ -1129,6 +1133,10 @@
 	siginfo_t			*last_siginfo;
 
 	struct task_io_accounting	ioac;
+#ifdef CONFIG_PSI
+	/* Pressure stall state */
+	unsigned int			psi_flags;
+#endif
 #ifdef CONFIG_TASK_XACCT
 	/* Accumulated RSS usage: */
 	u64				acct_rss_mem1;
@@ -1559,6 +1567,7 @@
 #define PF_WAKE_UP_IDLE         0x01000000	/* TTWU on an idle CPU */
 #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
 #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
+#define PF_MEMSTALL		0x10000000	/* Stalled due to lack of memory */
 #define PF_MUTEX_TESTER		0x20000000	/* Thread belongs to the rt mutex tester */
 #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
 #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ec912d0..ecdc654 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,6 +71,7 @@
 #define MMF_HUGE_ZERO_PAGE	23      /* mm has ever used the global huge zero page */
 #define MMF_DISABLE_THP		24	/* disable THP for all VMAs */
 #define MMF_OOM_VICTIM		25	/* mm is the oom victim */
+#define MMF_OOM_REAP_QUEUED	26	/* mm was queued for oom_reaper */
 #define MMF_DISABLE_THP_MASK	(1 << MMF_DISABLE_THP)
 
 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 2ffcb31..9615657 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -2,7 +2,6 @@
 #ifndef _LINUX_SCHED_CPUFREQ_H
 #define _LINUX_SCHED_CPUFREQ_H
 
-#include <linux/cpufreq.h>
 #include <linux/types.h>
 
 /*
@@ -35,12 +34,4 @@
 }
 #endif /* CONFIG_CPU_FREQ */
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-			struct cpufreq_governor *old_gov);
-#else
-static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-			struct cpufreq_governor *old_gov) { }
-#endif
-
 #endif /* _LINUX_SCHED_CPUFREQ_H */
diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h
index 80bc84b..4859bea 100644
--- a/include/linux/sched/loadavg.h
+++ b/include/linux/sched/loadavg.h
@@ -22,10 +22,26 @@
 #define EXP_5		2014		/* 1/exp(5sec/5min) */
 #define EXP_15		2037		/* 1/exp(5sec/15min) */
 
-#define CALC_LOAD(load,exp,n) \
-	load *= exp; \
-	load += n*(FIXED_1-exp); \
-	load >>= FSHIFT;
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
+static inline unsigned long
+calc_load(unsigned long load, unsigned long exp, unsigned long active)
+{
+	unsigned long newload;
+
+	newload = load * exp + active * (FIXED_1 - exp);
+	if (active >= load)
+		newload += FIXED_1-1;
+
+	return newload / FIXED_1;
+}
+
+extern unsigned long calc_load_n(unsigned long load, unsigned long exp,
+				 unsigned long active, unsigned int n);
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
 
 extern void calc_global_load(unsigned long ticks);
 
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1a356250..229b7c2 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -33,9 +33,6 @@
 extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
 extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
 #ifdef CONFIG_SCHED_WALT
-extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int sysctl_sched_use_walt_task_util;
-extern unsigned int sysctl_sched_walt_init_task_load_pct;
 extern unsigned int sysctl_sched_cpu_high_irqload;
 extern unsigned int sysctl_sched_boost;
 extern unsigned int sysctl_sched_group_upmigrate_pct;
@@ -129,13 +126,6 @@
 					size_t *lenp, loff_t *ppos);
 #endif
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-extern unsigned int sysctl_sched_energy_aware;
-extern int sched_energy_aware_handler(struct ctl_table *table, int write,
-				 void __user *buffer, size_t *lenp,
-				 loff_t *ppos);
-#endif
-
 #define LIB_PATH_LENGTH 512
 extern char sched_lib_name[LIB_PATH_LENGTH];
 extern unsigned int sched_lib_mask_check;
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index abe28d5..3423919 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -13,6 +13,8 @@
 
 extern void sched_clock_register(u64 (*read)(void), int bits,
 				 unsigned long rate);
+extern int sched_clock_suspend(void);
+extern void sched_clock_resume(void);
 #else
 static inline void generic_sched_clock_init(void) { }
 
@@ -20,6 +22,8 @@
 					unsigned long rate)
 {
 }
+static inline int sched_clock_suspend(void) { return 0; }
+static inline void sched_clock_resume(void) { }
 #endif
 
 #endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 75f4156..c115dd2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -31,6 +31,7 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
+#include <linux/bio.h>
 
 struct linux_binprm;
 struct cred;
@@ -283,6 +284,8 @@
 				     const struct qstr *qstr, const char **name,
 				     void **value, size_t *len);
 int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+					umode_t mode);
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry);
 int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -671,6 +674,13 @@
 	return 0;
 }
 
+static inline int security_inode_post_create(struct inode *dir,
+					 struct dentry *dentry,
+					 umode_t mode)
+{
+	return 0;
+}
+
 static inline int security_inode_link(struct dentry *old_dentry,
 				       struct inode *dir,
 				       struct dentry *new_dentry)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 60a2e76..a404d47 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2392,7 +2392,7 @@
 
 	if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
 		skb_set_transport_header(skb, keys.control.thoff);
-	else
+	else if (offset_hint >= 0)
 		skb_set_transport_header(skb, offset_hint);
 }
 
@@ -3178,6 +3178,7 @@
  *
  *	This is exactly the same as pskb_trim except that it ensures the
  *	checksum of received packets are still valid after the operation.
+ *	It can change skb pointers.
  */
 
 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
diff --git a/include/linux/soc/qcom/irq.h b/include/linux/soc/qcom/irq.h
new file mode 100644
index 0000000..0010148
--- /dev/null
+++ b/include/linux/soc/qcom/irq.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCOM_IRQ_H
+#define __QCOM_IRQ_H
+
+#include <linux/irqdomain.h>
+
+/**
+ * struct qcom_irq_fwspec - qcom specific irq fwspec wrapper
+ * @fwspec: irq fwspec
+ * @mask: if true, keep the irq masked in the gpio controller
+ *
+ * Use this structure to communicate between the parent irq chip, MPM or PDC,
+ * to the gpio chip, TLMM, about the gpio being allocated in the parent
+ * and if the gpio chip should keep the line masked because the parent irq
+ * chip is handling everything about the irq line.
+ */
+struct qcom_irq_fwspec {
+	struct irq_fwspec fwspec;
+	bool mask;
+};
+
+#endif
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7ddfc65..4335bd7 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -184,6 +184,7 @@
 	struct clk *pclk;
 	struct clk *clk_ptp_ref;
 	unsigned int clk_ptp_rate;
+	unsigned int clk_ref_rate;
 	struct reset_control *stmmac_rst;
 	struct stmmac_axi *axi;
 	int has_gmac4;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 73e130a..fdb6b31 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -295,9 +295,12 @@
 	struct svc_cacherep *	rq_cacherep;	/* cache info */
 	struct task_struct	*rq_task;	/* service thread */
 	spinlock_t		rq_lock;	/* per-request lock */
+	struct net		*rq_bc_net;	/* pointer to backchannel's
+						 * net namespace
+						 */
 };
 
-#define SVC_NET(svc_rqst)	(svc_rqst->rq_xprt->xpt_net)
+#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
 
 /*
  * Rigorous type checking on sockaddr type conversions
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 336fd1a..f30bf50 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -443,6 +443,11 @@
 	return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
 }
 
+static inline int xprt_close_wait(struct rpc_xprt *xprt)
+{
+	return test_bit(XPRT_CLOSE_WAIT, &xprt->state);
+}
+
 static inline void xprt_set_bound(struct rpc_xprt *xprt)
 {
 	test_and_set_bit(XPRT_BOUND, &xprt->state);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4750036..5523159 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -233,7 +233,6 @@
 	unsigned long	flags;		/* SWP_USED etc: see above */
 	signed short	prio;		/* swap priority of this type */
 	struct plist_node list;		/* entry in swap_active_head */
-	struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
 	signed char	type;		/* strange name for an index */
 	unsigned int	max;		/* extent of the swap_map */
 	unsigned char *swap_map;	/* vmalloc'ed array of usage counts */
@@ -276,6 +275,16 @@
 	struct swap_cluster_list discard_clusters; /* discard clusters list */
 	unsigned int write_pending;
 	unsigned int max_writes;
+	struct plist_node avail_lists[0]; /*
+					   * entries in swap_avail_heads, one
+					   * entry per node.
+					   * Must be last as the number of the
+					   * array is nr_node_ids, which is not
+					   * a fixed value so have to allocate
+					   * dynamically.
+					   * And it has to be an array so that
+					   * plist_for_each_* can work.
+					   */
 };
 
 #ifdef CONFIG_64BIT
@@ -299,7 +308,7 @@
 
 /* linux/mm/workingset.c */
 void *workingset_eviction(struct address_space *mapping, struct page *page);
-bool workingset_refault(void *shadow);
+void workingset_refault(struct page *page, void *shadow);
 void workingset_activation(struct page *page);
 
 /* Do not use directly, use workingset_lookup_update */
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index b9626aa..3e2a80c 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -39,12 +39,13 @@
 
 static inline u32 t10_pi_ref_tag(struct request *rq)
 {
+	unsigned int shift = ilog2(queue_logical_block_size(rq->q));
+
 #ifdef CONFIG_BLK_DEV_INTEGRITY
-	return blk_rq_pos(rq) >>
-		(rq->q->integrity.interval_exp - 9) & 0xffffffff;
-#else
-	return -1U;
+	if (rq->q->integrity.interval_exp)
+		shift = rq->q->integrity.interval_exp;
 #endif
+	return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
 }
 
 extern const struct blk_integrity_profile t10_pi_type1_crc;
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644
index 0000000..8def143
--- /dev/null
+++ b/include/linux/usb/f_mtp.h
@@ -0,0 +1,53 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+#include <uapi/linux/usb/f_mtp.h>
+#include <linux/ioctl.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
+struct __compat_mtp_file_range {
+	compat_int_t	fd;
+	compat_loff_t	offset;
+	int64_t		length;
+	uint16_t	command;
+	uint32_t	transaction_id;
+};
+
+struct __compat_mtp_event {
+	compat_size_t	length;
+	compat_caddr_t	data;
+};
+
+#define COMPAT_MTP_SEND_FILE              _IOW('M', 0, \
+						struct __compat_mtp_file_range)
+#define COMPAT_MTP_RECEIVE_FILE           _IOW('M', 1, \
+						struct __compat_mtp_file_range)
+#define COMPAT_MTP_SEND_EVENT             _IOW('M', 3, \
+						struct __compat_mtp_event)
+#define COMPAT_MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, \
+						struct __compat_mtp_file_range)
+#endif
+#endif
+#endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 7e7fbfb..50c74a7 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -89,6 +89,7 @@
 	enum typec_port_data data;
 	enum typec_role default_role;
 	bool try_role_hw;	/* try.{src,snk} implemented in hardware */
+	bool self_powered;	/* port belongs to a self powered device */
 
 	const struct typec_altmode_desc *alt_modes;
 };
diff --git a/include/linux/usb/usbpd.h b/include/linux/usb/usbpd.h
index 2c7ff09..286c566 100644
--- a/include/linux/usb/usbpd.h
+++ b/include/linux/usb/usbpd.h
@@ -99,6 +99,8 @@
  *         otherwise ORIENTATION_NONE if not attached
  */
 enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd);
+
+void usbpd_vdm_in_suspend(struct usbpd *pd, bool in_suspend);
 #else
 static inline struct usbpd *devm_usbpd_get_by_phandle(struct device *dev,
 		const char *phandle)
@@ -134,6 +136,8 @@
 {
 	return ORIENTATION_NONE;
 }
+
+static inline void usbpd_vdm_in_suspend(struct usbpd *pd, bool in_suspend) { }
 #endif /* IS_ENABLED(CONFIG_USB_PD_POLICY) */
 
 /*
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index cb462f9..e0348cb 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -57,6 +57,25 @@
 
 		if (!skb_partial_csum_set(skb, start, off))
 			return -EINVAL;
+	} else {
+		/* gso packets without NEEDS_CSUM do not set transport_offset.
+		 * probe and drop if does not match one of the above types.
+		 */
+		if (gso_type && skb->network_header) {
+			if (!skb->protocol)
+				virtio_net_hdr_set_proto(skb, hdr);
+retry:
+			skb_probe_transport_header(skb, -1);
+			if (!skb_transport_header_was_set(skb)) {
+				/* UFO does not specify ipv4 or 6: try both */
+				if (gso_type & SKB_GSO_UDP &&
+				    skb->protocol == htons(ETH_P_IP)) {
+					skb->protocol = htons(ETH_P_IPV6);
+					goto retry;
+				}
+				return -EINVAL;
+			}
+		}
 	}
 
 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
diff --git a/include/media/cec.h b/include/media/cec.h
index 9b7394a..dc4b412 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -155,6 +155,7 @@
 	unsigned int transmit_queue_sz;
 	struct list_head wait_queue;
 	struct cec_data *transmitting;
+	bool transmit_in_progress;
 
 	struct task_struct *kthread_config;
 	struct completion config_completion;
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 3f9aea8..8b7eb46 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -201,6 +201,18 @@
 
 void __ax25_put_route(ax25_route *ax25_rt);
 
+extern rwlock_t ax25_route_lock;
+
+static inline void ax25_route_lock_use(void)
+{
+	read_lock(&ax25_route_lock);
+}
+
+static inline void ax25_route_lock_unuse(void)
+{
+	read_unlock(&ax25_route_lock);
+}
+
 static inline void ax25_put_route(ax25_route *ax25_rt)
 {
 	if (refcount_dec_and_test(&ax25_rt->refcount))
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index ec9d6bc..fabee6d 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -276,7 +276,7 @@
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
 int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
 
-void bt_accept_enqueue(struct sock *parent, struct sock *sk);
+void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
 void bt_accept_unlink(struct sock *sk);
 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 80e2183..3c56de5a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -6,7 +6,7 @@
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014 Intel Mobile Communications GmbH
  * Copyright 2015-2017	Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -2008,6 +2008,8 @@
  * @signal: signal strength value (type depends on the wiphy's signal_type)
  * @chains: bitmask for filled values in @chain_signal.
  * @chain_signal: per-chain signal strength of last received BSS in dBm.
+ * @bssid_index: index in the multiple BSS set
+ * @max_bssid_indicator: max number of members in the BSS set
  * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
  */
 struct cfg80211_bss {
@@ -2019,6 +2021,8 @@
 	const struct cfg80211_bss_ies __rcu *proberesp_ies;
 
 	struct cfg80211_bss *hidden_beacon_bss;
+	struct cfg80211_bss *transmitted_bss;
+	struct list_head nontrans_list;
 
 	s32 signal;
 
@@ -2029,19 +2033,36 @@
 	u8 chains;
 	s8 chain_signal[IEEE80211_MAX_CHAINS];
 
+	u8 bssid_index;
+	u8 max_bssid_indicator;
+
 	u8 priv[0] __aligned(sizeof(void *));
 };
 
 /**
- * ieee80211_bss_get_ie - find IE with given ID
+ * ieee80211_bss_get_elem - find element with given ID
  * @bss: the bss to search
- * @ie: the IE ID
+ * @id: the element ID
  *
  * Note that the return value is an RCU-protected pointer, so
  * rcu_read_lock() must be held when calling this function.
  * Return: %NULL if not found.
  */
-const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
+const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id);
+
+/**
+ * ieee80211_bss_get_ie - find IE with given ID
+ * @bss: the bss to search
+ * @id: the element ID
+ *
+ * Note that the return value is an RCU-protected pointer, so
+ * rcu_read_lock() must be held when calling this function.
+ * Return: %NULL if not found.
+ */
+static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
+{
+	return (void *)ieee80211_bss_get_elem(bss, id);
+}
 
 
 /**
@@ -3999,6 +4020,12 @@
  * @txq_limit: configuration of internal TX queue frame limit
  * @txq_memory_limit: configuration internal TX queue memory limit
  * @txq_quantum: configuration of internal TX queue scheduler quantum
+ *
+ * @support_mbssid: can HW support association with nontransmitted AP
+ * @support_only_he_mbssid: don't parse MBSSID elements if it is not
+ *	HE AP, in order to avoid compatibility issues.
+ *	@support_mbssid must be set for this to have any effect.
+ *
  */
 struct wiphy {
 	/* assign these fields before you register the wiphy */
@@ -4137,6 +4164,9 @@
 	u32 txq_memory_limit;
 	u32 txq_quantum;
 
+	u8 support_mbssid:1,
+	   support_only_he_mbssid:1;
+
 	char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -4677,6 +4707,33 @@
 				    struct cfg80211_qos_map *qos_map);
 
 /**
+ * cfg80211_find_elem_match - match information element and byte array in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ * @match: byte array to match
+ * @match_len: number of bytes in the match array
+ * @match_offset: offset in the IE data where the byte array should match.
+ *	Note the difference to cfg80211_find_ie_match() which considers
+ *	the offset to start from the element ID byte, but here we take
+ *	the data portion instead.
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data and being large enough for the
+ * byte array to match.
+ */
+const struct element *
+cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len,
+			 const u8 *match, unsigned int match_len,
+			 unsigned int match_offset);
+
+/**
  * cfg80211_find_ie_match - match information element and byte array in data
  *
  * @eid: element ID
@@ -4700,9 +4757,44 @@
  * having to fit into the given data and being large enough for the
  * byte array to match.
  */
-const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
-				 const u8 *match, int match_len,
-				 int match_offset);
+static inline const u8 *
+cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len,
+		       const u8 *match, unsigned int match_len,
+		       unsigned int match_offset)
+{
+	/* match_offset can't be smaller than 2, unless match_len is
+	 * zero, in which case match_offset must be zero as well.
+	 */
+	if (WARN_ON((match_len && match_offset < 2) ||
+		    (!match_len && match_offset)))
+		return NULL;
+
+	return (void *)cfg80211_find_elem_match(eid, ies, len,
+						match, match_len,
+						match_offset ?
+							match_offset - 2 : 0);
+}
+
+/**
+ * cfg80211_find_elem - find information element in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const struct element *
+cfg80211_find_elem(u8 eid, const u8 *ies, int len)
+{
+	return cfg80211_find_elem_match(eid, ies, len, NULL, 0, 0);
+}
 
 /**
  * cfg80211_find_ie - find information element in data
@@ -4725,6 +4817,28 @@
 }
 
 /**
+ * cfg80211_find_ext_elem - find information element with EID Extension in data
+ *
+ * @ext_eid: element ID Extension
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the etended element could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const struct element *
+cfg80211_find_ext_elem(u8 ext_eid, const u8 *ies, int len)
+{
+	return cfg80211_find_elem_match(WLAN_EID_EXTENSION, ies, len,
+					&ext_eid, 1, 0);
+}
+
+/**
  * cfg80211_find_ext_ie - find information element with EID Extension in data
  *
  * @ext_eid: element ID Extension
@@ -4746,6 +4860,25 @@
 }
 
 /**
+ * cfg80211_find_vendor_elem - find vendor specific information element in data
+ *
+ * @oui: vendor OUI
+ * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the vendor specific element ID could not be found or if the
+ * element is invalid (claims to be longer than the given data); otherwise
+ * return the element structure for the requested element.
+ *
+ * Note: There are no checks on the element length other than having to fit into
+ * the given data.
+ */
+const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type,
+						const u8 *ies,
+						unsigned int len);
+
+/**
  * cfg80211_find_vendor_ie - find vendor specific information element in data
  *
  * @oui: vendor OUI
@@ -4761,8 +4894,12 @@
  * Note: There are no checks on the element length other than having to fit into
  * the given data.
  */
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
-				  const u8 *ies, int len);
+static inline const u8 *
+cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
+			const u8 *ies, unsigned int len)
+{
+	return (void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
+}
 
 /**
  * DOC: Regulatory enforcement infrastructure
@@ -4997,6 +5134,29 @@
 }
 
 /**
+ * cfg80211_gen_new_bssid - generate a nontransmitted BSSID for multi-BSSID
+ * @bssid: transmitter BSSID
+ * @max_bssid: max BSSID indicator, taken from Multiple BSSID element
+ * @mbssid_index: BSSID index, taken from Multiple BSSID index element
+ * @new_bssid_addr: address of the resulting BSSID
+ */
+static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
+					  u8 mbssid_index, u8 *new_bssid_addr)
+{
+	u64 bssid_tmp, new_bssid;
+	u64 lsb_n;
+
+	bssid_tmp = ether_addr_to_u64(bssid);
+
+	lsb_n = bssid_tmp & ((1 << max_bssid) - 1);
+	new_bssid = bssid_tmp;
+	new_bssid &= ~((1 << max_bssid) - 1);
+	new_bssid |= (lsb_n + mbssid_index) % (1 << max_bssid);
+
+	u64_to_ether_addr(new_bssid, new_bssid_addr);
+}
+
+/**
  * enum cfg80211_bss_frame_type - frame type that the BSS data came from
  * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is
  *	from a beacon or probe response
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index 93cb55d..922e661 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -160,6 +160,7 @@
 extern int cnss_get_platform_cap(struct device *dev,
 				 struct cnss_platform_cap *cap);
 extern struct dma_iommu_mapping *cnss_smmu_get_mapping(struct device *dev);
+extern struct iommu_domain *cnss_smmu_get_domain(struct device *dev);
 extern int cnss_smmu_map(struct device *dev,
 			 phys_addr_t paddr, uint32_t *iova_addr, size_t size);
 extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info);
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 3ef2743..8665bf2 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -22,6 +22,7 @@
 
 #include <net/inet_sock.h>
 #include <net/snmp.h>
+#include <net/ip.h>
 
 struct icmp_err {
   int		errno;
@@ -39,7 +40,13 @@
 struct sk_buff;
 struct net;
 
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+		 const struct ip_options *opt);
+static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+	__icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+}
+
 int icmp_rcv(struct sk_buff *skb);
 void icmp_err(struct sk_buff *skb, u32 info);
 int icmp_init(void);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 00b5e78..74ff688 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -39,6 +39,7 @@
 
 	u32			metrics[RTAX_MAX];
 	u32			rate_tokens;	/* rate limiting for ICMP */
+	u32			n_redirects;
 	unsigned long		rate_last;
 	/*
 	 * Once inet_peer is queued for deletion (refcnt == 0), following field
diff --git a/include/net/ip.h b/include/net/ip.h
index ddaa2bb5..0693b82 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -641,6 +641,8 @@
 }
 
 void ip_options_fragment(struct sk_buff *skb);
+int __ip_options_compile(struct net *net, struct ip_options *opt,
+			 struct sk_buff *skb, __be32 *info);
 int ip_options_compile(struct net *net, struct ip_options *opt,
 		       struct sk_buff *skb);
 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
@@ -690,7 +692,7 @@
 int ip_misc_proc_init(void);
 #endif
 
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
 				struct netlink_ext_ack *extack);
 
 #endif	/* _IP_H */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index c9b7b13..95eed32 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -230,7 +230,7 @@
 		     struct netlink_ext_ack *extack);
 int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
 		   struct netlink_callback *cb);
-int fib_table_flush(struct net *net, struct fib_table *table);
+int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
 struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
 void fib_table_flush_external(struct fib_table *table);
 void fib_free_table(struct fib_table *tb);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index b0d022f..e114235 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -326,6 +326,26 @@
 int ip_tunnel_encap_setup(struct ip_tunnel *t,
 			  struct ip_tunnel_encap *ipencap);
 
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+{
+	int nhlen;
+
+	switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_IPV6)
+	case htons(ETH_P_IPV6):
+		nhlen = sizeof(struct ipv6hdr);
+		break;
+#endif
+	case htons(ETH_P_IP):
+		nhlen = sizeof(struct iphdr);
+		break;
+	default:
+		nhlen = 0;
+	}
+
+	return pskb_network_may_pull(skb, nhlen);
+}
+
 static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
 {
 	const struct ip_tunnel_encap_ops *ops;
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 3832099..1284876 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -142,7 +142,8 @@
 
 	if (netif_is_l3_slave(skb->dev))
 		master = netdev_master_upper_dev_get_rcu(skb->dev);
-	else if (netif_is_l3_master(skb->dev))
+	else if (netif_is_l3_master(skb->dev) ||
+		 netif_has_l3_rx_handler(skb->dev))
 		master = skb->dev;
 
 	if (master && master->l3mdev_ops->l3mdev_l3_rcv)
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 4b2b2ba..f32fc82 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -5,17 +5,10 @@
 
 struct nf_conncount_data;
 
-enum nf_conncount_list_add {
-	NF_CONNCOUNT_ADDED, 	/* list add was ok */
-	NF_CONNCOUNT_ERR,	/* -ENOMEM, must drop skb */
-	NF_CONNCOUNT_SKIP,	/* list is already reclaimed by gc */
-};
-
 struct nf_conncount_list {
 	spinlock_t list_lock;
 	struct list_head head;	/* connections with the same filtering key */
 	unsigned int count;	/* length of list */
-	bool dead;
 };
 
 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
@@ -29,18 +22,12 @@
 				const struct nf_conntrack_tuple *tuple,
 				const struct nf_conntrack_zone *zone);
 
-void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
-			 const struct nf_conntrack_tuple *tuple,
-			 const struct nf_conntrack_zone *zone,
-			 bool *addit);
+int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
+		     const struct nf_conntrack_tuple *tuple,
+		     const struct nf_conntrack_zone *zone);
 
 void nf_conncount_list_init(struct nf_conncount_list *list);
 
-enum nf_conncount_list_add
-nf_conncount_add(struct nf_conncount_list *list,
-		 const struct nf_conntrack_tuple *tuple,
-		 const struct nf_conntrack_zone *zone);
-
 bool nf_conncount_gc_list(struct net *net,
 			  struct nf_conncount_list *list);
 
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 0e355f4..0a3de10 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -84,7 +84,6 @@
 struct nf_flow_route {
 	struct {
 		struct dst_entry	*dst;
-		int			ifindex;
 	} tuple[FLOW_OFFLOAD_DIR_MAX];
 };
 
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a6d0009..c44da48 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -47,7 +47,10 @@
 struct qdisc_skb_head {
 	struct sk_buff	*head;
 	struct sk_buff	*tail;
-	__u32		qlen;
+	union {
+		u32		qlen;
+		atomic_t	atomic_qlen;
+	};
 	spinlock_t	lock;
 };
 
@@ -384,27 +387,19 @@
 	BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
-{
-	return this_cpu_ptr(q->cpu_qstats)->qlen;
-}
-
 static inline int qdisc_qlen(const struct Qdisc *q)
 {
 	return q->q.qlen;
 }
 
-static inline int qdisc_qlen_sum(const struct Qdisc *q)
+static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
 {
-	__u32 qlen = q->qstats.qlen;
-	int i;
+	u32 qlen = q->qstats.qlen;
 
-	if (q->flags & TCQ_F_NOLOCK) {
-		for_each_possible_cpu(i)
-			qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
-	} else {
+	if (q->flags & TCQ_F_NOLOCK)
+		qlen += atomic_read(&q->q.atomic_qlen);
+	else
 		qlen += q->q.qlen;
-	}
 
 	return qlen;
 }
@@ -776,14 +771,14 @@
 	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
 }
 
-static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
 {
-	this_cpu_inc(sch->cpu_qstats->qlen);
+	atomic_inc(&sch->q.atomic_qlen);
 }
 
-static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
 {
-	this_cpu_dec(sch->cpu_qstats->qlen);
+	atomic_dec(&sch->q.atomic_qlen);
 }
 
 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
diff --git a/include/net/sock.h b/include/net/sock.h
index f18dbd6..8f44733 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -298,6 +298,7 @@
   *	@sk_filter: socket filtering instructions
   *	@sk_timer: sock cleanup timer
   *	@sk_stamp: time stamp of last packet received
+  *	@sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
   *	@sk_tsflags: SO_TIMESTAMPING socket options
   *	@sk_tskey: counter to disambiguate concurrent tstamp requests
   *	@sk_zckey: counter to order MSG_ZEROCOPY notifications
@@ -474,6 +475,9 @@
 	const struct cred	*sk_peer_cred;
 	long			sk_rcvtimeo;
 	ktime_t			sk_stamp;
+#if BITS_PER_LONG==32
+	seqlock_t		sk_stamp_seq;
+#endif
 	u16			sk_tsflags;
 	u8			sk_shutdown;
 	u32			sk_tskey;
@@ -2290,6 +2294,34 @@
 	atomic_add(segs, &sk->sk_drops);
 }
 
+static inline ktime_t sock_read_timestamp(struct sock *sk)
+{
+#if BITS_PER_LONG==32
+	unsigned int seq;
+	ktime_t kt;
+
+	do {
+		seq = read_seqbegin(&sk->sk_stamp_seq);
+		kt = sk->sk_stamp;
+	} while (read_seqretry(&sk->sk_stamp_seq, seq));
+
+	return kt;
+#else
+	return sk->sk_stamp;
+#endif
+}
+
+static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
+{
+#if BITS_PER_LONG==32
+	write_seqlock(&sk->sk_stamp_seq);
+	sk->sk_stamp = kt;
+	write_sequnlock(&sk->sk_stamp_seq);
+#else
+	sk->sk_stamp = kt;
+#endif
+}
+
 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
 			   struct sk_buff *skb);
 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2314,7 +2346,7 @@
 	     (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
 		__sock_recv_timestamp(msg, sk, skb);
 	else
-		sk->sk_stamp = kt;
+		sock_write_timestamp(sk, kt);
 
 	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
 		__sock_recv_wifi_status(msg, sk, skb);
@@ -2335,9 +2367,9 @@
 	if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
 		__sock_recv_ts_and_drops(msg, sk, skb);
 	else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
-		sk->sk_stamp = skb->tstamp;
+		sock_write_timestamp(sk, skb->tstamp);
 	else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
-		sk->sk_stamp = 0;
+		sock_write_timestamp(sk, 0);
 }
 
 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
@@ -2489,6 +2521,16 @@
 		return;
 	sk->sk_pacing_shift = val;
 }
+/* SOCKEV Notifier Events */
+#define SOCKEV_SOCKET   0x00
+#define SOCKEV_BIND     0x01
+#define SOCKEV_LISTEN   0x02
+#define SOCKEV_ACCEPT   0x03
+#define SOCKEV_CONNECT  0x04
+#define SOCKEV_SHUTDOWN 0x05
+
+int sockev_register_notify(struct notifier_block *nb);
+int sockev_unregister_notify(struct notifier_block *nb);
 
 /* if a socket is bound to a device, check that the given device
  * index is either the same or that the socket is bound to an L3
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 0eb390c..da588de 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1552,6 +1552,7 @@
 		    int (*func)(struct xfrm_state *, int, void*), void *);
 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
 struct xfrm_state *xfrm_state_alloc(struct net *net);
+void xfrm_state_free(struct xfrm_state *x);
 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
 				   const xfrm_address_t *saddr,
 				   const struct flowi *fl,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 4c6289d..fe73e74 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -648,6 +648,9 @@
 	/* The controller does not support WRITE SAME */
 	unsigned no_write_same:1;
 
+	/* Inline encryption support? */
+	unsigned inlinecrypt_support:1;
+
 	unsigned use_blk_mq:1;
 	unsigned use_cmd_list:1;
 
diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h
index 019ac14..f75e538 100644
--- a/include/soc/qcom/qmi_rmnet.h
+++ b/include/soc/qcom/qmi_rmnet.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _QMI_RMNET_H
@@ -20,6 +20,7 @@
 void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
 void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt);
 void qmi_rmnet_enable_all_flows(struct net_device *dev);
+bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
 #else
 static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
 {
@@ -34,6 +35,12 @@
 qmi_rmnet_enable_all_flows(struct net_device *dev)
 {
 }
+
+static inline bool
+qmi_rmnet_all_flows_enabled(struct net_device *dev)
+{
+	return true;
+}
 #endif
 
 #ifdef CONFIG_QCOM_QMI_DFC
@@ -71,7 +78,7 @@
 void qmi_rmnet_work_init(void *port);
 void qmi_rmnet_work_exit(void *port);
 void qmi_rmnet_work_maybe_restart(void *port);
-void qmi_rmnet_work_restart(void *port);
+void qmi_rmnet_set_dl_msg_active(void *port);
 
 int qmi_rmnet_ps_ind_register(void *port,
 			      struct qmi_rmnet_ps_ind *ps_ind);
@@ -88,18 +95,16 @@
 static inline void qmi_rmnet_work_init(void *port)
 {
 }
-static inline void qmi_rmnet_work_restart(void *port)
-{
-
-}
 static inline void qmi_rmnet_work_exit(void *port)
 {
 }
-
 static inline void qmi_rmnet_work_maybe_restart(void *port)
 {
 
 }
+static inline void qmi_rmnet_set_dl_msg_active(void *port)
+{
+}
 
 static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
 				     struct qmi_rmnet_ps_ind *ps_ind)
diff --git a/include/soc/qcom/qtee_shmbridge.h b/include/soc/qcom/qtee_shmbridge.h
index c023653..4cc332d 100644
--- a/include/soc/qcom/qtee_shmbridge.h
+++ b/include/soc/qcom/qtee_shmbridge.h
@@ -6,6 +6,9 @@
 #ifndef __QTEE_SHMBRIDGE_H__
 #define __QTEE_SHMBRIDGE_H__
 
+/* VMID and permission definitions */
+#include <soc/qcom/secure_buffer.h>
+
 /**
  * struct qtee_shm - info of shared memory allocated from the default bridge
  * @ paddr: physical address of the shm allocated from the default bridge
@@ -28,27 +31,29 @@
 /**
  * Register paddr & size as a bridge, get bridge handle
  *
- * @ paddr: paddr of buffer to be turned into bridge
- * @ size: size of the bridge
- * @ ns_vmid: non-secure vmid, like VMID_HLOS
- * @ ns_vm_perm: NS VM permission, like PERM_READ, PERM_WRITE
- * @ tz_perm: TZ permission
- * @ *handle: output shmbridge handle
+ * @ [IN] addr: paddr of buffer to be turned into bridge
+ * @ [IN] size: size of the bridge
+ * @ [IN] ns_vmid_list: non-secure vmids array
+ * @ [IN] ns_vm_perm_list: NS VM permission array
+ * @ [IN] ns_vmid_num: number of NS VMIDs (at most 4)
+ * @ [IN] tz_perm: TZ permission
+ * @ [OUT] *handle: output shmbridge handle
  *
  * return success or error
  */
 int32_t qtee_shmbridge_register(
 		phys_addr_t paddr,
 		size_t size,
-		uint32_t ns_vmid,
-		uint32_t ns_vm_perm,
+		uint32_t *ns_vmid_list,
+		uint32_t *ns_vm_perm_list,
+		uint32_t ns_vmid_num,
 		uint32_t tz_perm,
 		uint64_t *handle);
 
 /**
  * Deregister bridge
  *
- * @ handle: shmbridge handle
+ * @ [IN] handle: shmbridge handle
  *
  * return success or error
  */
@@ -57,8 +62,8 @@
 /**
  * Sub-allocate from default kernel bridge created by shmb driver
  *
- * @ size: size of the buffer to be sub-allocated from the bridge
- * @ *shm: output qtee_shm structure with buffer paddr, vaddr and
+ * @ [IN] size: size of the buffer to be sub-allocated from the bridge
+ * @ [OUT] *shm: output qtee_shm structure with buffer paddr, vaddr and
  *         size; returns ERR_PTR or NULL otherwise
  *
  * return success or error
@@ -68,7 +73,7 @@
 /*
  * Free buffer that is sub-allocated from default kernel bridge
  *
- * @ shm: qtee_shm structure to be freed
+ * @ [IN] shm: qtee_shm structure to be freed
  *
  */
 void qtee_shmbridge_free_shm(struct qtee_shm *shm);
diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h
index a5289c8..9096b10 100644
--- a/include/soc/qcom/rmnet_qmi.h
+++ b/include/soc/qcom/rmnet_qmi.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _RMNET_QMI_H
@@ -19,6 +19,7 @@
 void rmnet_reset_qmi_pt(void *port);
 void rmnet_init_qmi_pt(void *port, void *qmi);
 void rmnet_enable_all_flows(void *port);
+bool rmnet_all_flows_enabled(void *port);
 void rmnet_set_powersave_format(void *port);
 void rmnet_clear_powersave_format(void *port);
 void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
@@ -57,6 +58,11 @@
 {
 }
 
+static inline bool rmnet_all_flows_enabled(void *port)
+{
+	return true;
+}
+
 static inline void rmnet_set_port_format(void *port)
 {
 }
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 03f03319..602055a 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -48,6 +48,12 @@
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
 			int dest_nelems);
+
+int try_hyp_assign_table(struct sg_table *table,
+			 u32 *source_vm_list, int source_nelems,
+			 int *dest_vmids, int *dest_perms,
+			 int dest_nelems);
+
 extern int hyp_assign_phys(phys_addr_t addr, u64 size,
 			u32 *source_vmlist, int source_nelems,
 			int *dest_vmids, int *dest_perms, int dest_nelems);
@@ -72,6 +78,14 @@
 	return -EINVAL;
 }
 
+static inline int try_hyp_assign_table(struct sg_table *table,
+				       u32 *source_vm_list, int source_nelems,
+				       int *dest_vmids, int *dest_perms,
+				       int dest_nelems)
+{
+	return -EINVAL;
+}
+
 static inline int hyp_assign_phys(phys_addr_t addr, u64 size,
 			u32 *source_vmlist, int source_nelems,
 			int *dest_vmids, int *dest_perms, int dest_nelems)
diff --git a/include/soc/qcom/subsystem_notif.h b/include/soc/qcom/subsystem_notif.h
index 7d1bbbb..79f8169 100644
--- a/include/soc/qcom/subsystem_notif.h
+++ b/include/soc/qcom/subsystem_notif.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2011, 2013-2014, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2013-2014, 2018-2019, The Linux Foundation. All rights reserved.
  */
 /*
  * Subsystem restart notifier API header
@@ -17,6 +17,7 @@
 	SUBSYS_AFTER_SHUTDOWN,
 	SUBSYS_BEFORE_POWERUP,
 	SUBSYS_AFTER_POWERUP,
+	SUBSYS_BEFORE_AUTH_AND_RESET,
 	SUBSYS_RAMDUMP_NOTIFICATION,
 	SUBSYS_POWERUP_FAILURE,
 	SUBSYS_PROXY_VOTE,
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index 363928a..81716f3 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SUBSYS_RESTART_H
@@ -157,6 +157,7 @@
 extern enum crash_status subsys_get_crash_status(struct subsys_device *dev);
 void notify_proxy_vote(struct device *device);
 void notify_proxy_unvote(struct device *device);
+void notify_before_auth_and_reset(struct device *device);
 void complete_err_ready(struct subsys_device *subsys);
 void complete_shutdown_ack(struct subsys_device *subsys);
 struct subsys_device *find_subsys_device(const char *str);
@@ -218,6 +219,7 @@
 }
 static inline void notify_proxy_vote(struct device *device) { }
 static inline void notify_proxy_unvote(struct device *device) { }
+static inline void notify_before_auth_and_reset(struct device *device) { }
 static inline int wait_for_shutdown_ack(struct subsys_desc *desc)
 {
 	return -EOPNOTSUPP;
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 8e52178..b52d4a0 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -177,7 +177,11 @@
 	if (snd_BUG_ON(!stream))
 		return;
 
-	stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+	if (stream->direction == SND_COMPRESS_PLAYBACK)
+		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+	else
+		stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+
 	wake_up(&stream->runtime->sleep);
 }
 
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7a4ee78..2cfd3b4 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -602,6 +602,7 @@
 	struct se_node_acl	*se_node_acl;
 	struct se_portal_group *se_tpg;
 	void			*fabric_sess_ptr;
+	struct percpu_ref	cmd_count;
 	struct list_head	sess_list;
 	struct list_head	sess_acl_list;
 	struct list_head	sess_cmd_list;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index f4147b3..eb9d092 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -116,7 +116,7 @@
 				struct se_session *, void *));
 void target_remove_session(struct se_session *);
 
-void transport_init_session(struct se_session *);
+int transport_init_session(struct se_session *se_sess);
 struct se_session *transport_alloc_session(enum target_prot_op);
 int transport_alloc_session_tags(struct se_session *, unsigned int,
 		unsigned int);
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index 5017a88..a0773a5 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -8,7 +8,7 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
 
 	TP_PROTO(unsigned long pfn, const struct page *page,
 		 unsigned int count, unsigned int align),
@@ -61,6 +61,44 @@
 		  __entry->count)
 );
 
+TRACE_EVENT(cma_alloc_start,
+
+	TP_PROTO(unsigned int count, unsigned int align),
+
+	TP_ARGS(count, align),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, count)
+		__field(unsigned int, align)
+	),
+
+	TP_fast_assign(
+		__entry->count = count;
+		__entry->align = align;
+	),
+
+	TP_printk("count=%u align=%u",
+		  __entry->count,
+		  __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+
 #endif /* _TRACE_CMA_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h
index 4ba0fd4..cb62767 100644
--- a/include/trace/events/dfc.h
+++ b/include/trace/events/dfc.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #undef TRACE_SYSTEM
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 0e31eb1..0dfb174 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -225,6 +225,26 @@
 		  (unsigned long) __entry->ino, __entry->drop)
 );
 
+TRACE_EVENT(ext4_nfs_commit_metadata,
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode),
+
+	TP_STRUCT__entry(
+		__field(	dev_t,	dev			)
+		__field(	ino_t,	ino			)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+	),
+
+	TP_printk("dev %d,%d ino %lu",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino)
+);
+
 TRACE_EVENT(ext4_mark_inode_dirty,
 	TP_PROTO(struct inode *inode, unsigned long IP),
 
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 705b33d..ff2d62a 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -70,6 +70,54 @@
 		__print_symbolic(__entry->mode, MIGRATE_MODE),
 		__print_symbolic(__entry->reason, MIGRATE_REASON))
 );
+
+TRACE_EVENT(mm_numa_migrate_ratelimit,
+
+	TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
+
+	TP_ARGS(p, dst_nid, nr_pages),
+
+	TP_STRUCT__entry(
+		__array(char,		comm,	TASK_COMM_LEN)
+		__field(pid_t,		pid)
+		__field(int,		dst_nid)
+		__field(unsigned long,	nr_pages)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid		= p->pid;
+		__entry->dst_nid	= dst_nid;
+		__entry->nr_pages	= nr_pages;
+	),
+
+	TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
+		__entry->comm,
+		__entry->pid,
+		__entry->dst_nid,
+		__entry->nr_pages)
+);
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+	TP_PROTO(enum migrate_mode mode, int reason),
+
+	TP_ARGS(mode, reason),
+
+	TP_STRUCT__entry(
+		__field(enum migrate_mode, mode)
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->mode	= mode;
+		__entry->reason	= reason;
+	),
+
+	TP_printk("mode=%s reason=%s",
+		__print_symbolic(__entry->mode, MIGRATE_MODE),
+		__print_symbolic(__entry->reason, MIGRATE_REASON))
+);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index a81cffb..a1675d4 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -88,6 +88,7 @@
 	{1UL << PG_dirty,		"dirty"		},		\
 	{1UL << PG_lru,			"lru"		},		\
 	{1UL << PG_active,		"active"	},		\
+	{1UL << PG_workingset,		"workingset"	},		\
 	{1UL << PG_slab,		"slab"		},		\
 	{1UL << PG_owner_priv_1,	"owner_priv_1"	},		\
 	{1UL << PG_arch_1,		"arch_1"	},		\
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 305576e..872a60f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -205,11 +205,11 @@
 	TP_fast_assign(
 		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
 		__entry->prev_pid	= prev->pid;
-		__entry->prev_prio	= prev->prio;
+		__entry->prev_prio	= prev->prio == -1 ? 150 : prev->prio;
 		__entry->prev_state	= __trace_sched_switch_state(preempt, prev);
 		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
 		__entry->next_pid	= next->pid;
-		__entry->next_prio	= next->prio;
+		__entry->next_prio	= next->prio == -1 ? 150 : next->prio;
 		/* XXX SCHED_DEADLINE */
 	),
 
@@ -940,10 +940,7 @@
 );
 
 #ifdef CONFIG_SCHED_WALT
-extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int sysctl_sched_use_walt_task_util;
 extern unsigned int sched_ravg_window;
-extern unsigned int walt_disabled;
 #endif
 
 /*
@@ -973,8 +970,7 @@
 		__entry->util_avg_walt  = div64_ul(cpu_rq(cpu)->prev_runnable_sum,
 					  sched_ravg_window >> SCHED_CAPACITY_SHIFT);
 
-		if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
-			__entry->util_avg       = __entry->util_avg_walt;
+		__entry->util_avg       = __entry->util_avg_walt;
 #endif
 	),
 
@@ -1002,8 +998,6 @@
 		__field(	unsigned long,	load			      )
 		__field(	unsigned long,	rbl_load		      )
 		__field(	unsigned long,	util			      )
-		__field(	unsigned long,	util_pelt		      )
-		__field(	u32,		util_walt		      )
 	),
 
 	TP_fast_assign(
@@ -1020,22 +1014,11 @@
 		__entry->load = se->avg.load_avg;
 		__entry->rbl_load = se->avg.runnable_load_avg;
 		__entry->util = se->avg.util_avg;
-		__entry->util_pelt  = __entry->util;
-		__entry->util_walt  = 0;
-#ifdef CONFIG_SCHED_WALT
-		if (!se->my_q) {
-			struct task_struct *p = container_of(se, struct task_struct, se);
-			__entry->util_walt = p->ravg.demand / (sched_ravg_window >> SCHED_CAPACITY_SHIFT);
-			if (!walt_disabled && sysctl_sched_use_walt_task_util)
-				__entry->util = __entry->util_walt;
-		}
-#endif
 	),
 
-	TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu util_pelt=%lu util_walt=%u",
+	TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu",
 		  __entry->cpu, __get_str(path), __entry->comm, __entry->pid,
-		  __entry->load, __entry->rbl_load, __entry->util,
-		  __entry->util_pelt, __entry->util_walt)
+		  __entry->load, __entry->rbl_load, __entry->util)
 );
 
 /*
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index bbb08a3..a2644c4 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -582,7 +582,8 @@
 		__field(u32, vers)
 		__field(u32, proc)
 		__string(service, name)
-		__string(addr, rqst->rq_xprt->xpt_remotebuf)
+		__string(addr, rqst->rq_xprt ?
+			 rqst->rq_xprt->xpt_remotebuf : "(null)")
 	),
 
 	TP_fast_assign(
@@ -590,7 +591,8 @@
 		__entry->vers = rqst->rq_vers;
 		__entry->proc = rqst->rq_proc;
 		__assign_str(service, name);
-		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+		__assign_str(addr, rqst->rq_xprt ?
+			     rqst->rq_xprt->xpt_remotebuf : "(null)");
 	),
 
 	TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h
index 2c10a02..c31c67a 100644
--- a/include/trace/events/walt.h
+++ b/include/trace/events/walt.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifdef CONFIG_SCHED_WALT
@@ -491,57 +491,6 @@
 		__entry->affinity, __entry->task_util, __entry->h_load)
 );
 
-DECLARE_EVENT_CLASS(sched_cpu_load,
-
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
-
-	TP_ARGS(rq, idle, irqload, power_cost),
-
-	TP_STRUCT__entry(
-		__field(unsigned int, cpu)
-		__field(unsigned int, idle)
-		__field(unsigned int, nr_running)
-		__field(unsigned int, nr_big_tasks)
-		__field(unsigned int, load_scale_factor)
-		__field(unsigned int, capacity)
-		__field(u64,	      cumulative_runnable_avg)
-		__field(u64,	      irqload)
-		__field(unsigned int, max_freq)
-		__field(unsigned int, power_cost)
-		__field(int,	      cstate)
-		__field(int,	      dstate)
-	),
-
-	TP_fast_assign(
-		__entry->cpu			= rq->cpu;
-		__entry->idle			= idle;
-		__entry->nr_running		= rq->nr_running;
-		__entry->nr_big_tasks		= rq->walt_stats.nr_big_tasks;
-		__entry->load_scale_factor	=
-						cpu_load_scale_factor(rq->cpu);
-		__entry->capacity		= cpu_capacity(rq->cpu);
-		__entry->cumulative_runnable_avg =
-				rq->walt_stats.cumulative_runnable_avg_scaled;
-		__entry->irqload		= irqload;
-		__entry->max_freq		= cpu_max_freq(rq->cpu);
-		__entry->power_cost		= power_cost;
-		__entry->cstate			= rq->cstate;
-		__entry->dstate			= rq->cluster->dstate;
-	),
-
-	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d",
-		__entry->cpu, __entry->idle, __entry->nr_running,
-		__entry->nr_big_tasks, __entry->load_scale_factor,
-		__entry->capacity, __entry->cumulative_runnable_avg,
-		__entry->irqload, __entry->max_freq, __entry->power_cost,
-		__entry->cstate, __entry->dstate)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
-	TP_ARGS(rq, idle, irqload, power_cost)
-);
-
 TRACE_EVENT(sched_load_to_gov,
 
 	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index f5271bc..93333c0 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -119,7 +119,8 @@
 #define  DRM_MODE_FLAG_SUPPORTS_RGB		(1<<23)
 
 #define  DRM_MODE_FLAG_SUPPORTS_YUV		(1<<24)
-
+#define  DRM_MODE_FLAG_VID_MODE_PANEL	(1<<29)
+#define  DRM_MODE_FLAG_CMD_MODE_PANEL	(1<<30)
 #define  DRM_MODE_FLAG_SEAMLESS			(1<<31)
 
 #define  DRM_MODE_FLAG_ALL	(DRM_MODE_FLAG_PHSYNC |		\
@@ -136,6 +137,8 @@
 				 DRM_MODE_FLAG_CLKDIV2 |	\
 				 DRM_MODE_FLAG_SUPPORTS_RGB |	\
 				 DRM_MODE_FLAG_SUPPORTS_YUV |	\
+				 DRM_MODE_FLAG_VID_MODE_PANEL |	\
+				 DRM_MODE_FLAG_CMD_MODE_PANEL |	\
 				 DRM_MODE_FLAG_3D_MASK)
 
 /* DPMS flags */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index e9c8b3c..90910aa 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -382,6 +382,16 @@
 	__u32 id;      /* out, identifier */
 };
 
+/**
+ * struct drm_msm_power_ctrl: Payload to enable/disable the power vote
+ * @enable: enable/disable the power vote
+ * @flags:  operation control flags, for future use
+ */
+struct drm_msm_power_ctrl {
+	__u32 enable;
+	__u32 flags;
+};
+
 #define DRM_MSM_GET_PARAM              0x00
 /* placeholder:
 #define DRM_MSM_SET_PARAM              0x01
@@ -402,6 +412,7 @@
 #define DRM_MSM_REGISTER_EVENT         0x41
 #define DRM_MSM_DEREGISTER_EVENT       0x42
 #define DRM_MSM_RMFB2                  0x43
+#define DRM_MSM_POWER_CTRL             0x44
 
 /* sde custom events */
 #define DRM_EVENT_HISTOGRAM 0x80000000
@@ -433,6 +444,8 @@
 			DRM_MSM_RMFB2), unsigned int)
 #define DRM_IOCTL_MSM_SUBMITQUEUE_NEW    DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
 #define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE  DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
+#define DRM_IOCTL_MSM_POWER_CTRL DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_POWER_CTRL), struct drm_msm_power_ctrl)
 
 #if defined(__cplusplus)
 }
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index a68420d..2645523 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -468,12 +468,14 @@
 	__u32 factor_out;
 };
 
+#define LTM_FEATURE_DEF 1
 #define LTM_DATA_SIZE_0 32
 #define LTM_DATA_SIZE_1 128
 #define LTM_DATA_SIZE_2 256
 #define LTM_DATA_SIZE_3 33
 #define LTM_BUFFER_SIZE 5
 #define LTM_GUARD_BYTES 255
+#define LTM_BLOCK_SIZE 2
 
 #define LTM_STATS_SAT (1 << 1)
 #define LTM_STATS_MERGE_SAT (1 << 2)
@@ -488,6 +490,18 @@
 	__u32 stats_04[LTM_DATA_SIZE_0];
 	__u32 stats_05[LTM_DATA_SIZE_0];
 	__u32 status_flag;
+	__u32 display_h;
+	__u32 display_v;
+	__u32 init_h[LTM_BLOCK_SIZE];
+	__u32 init_v;
+	__u32 inc_h;
+	__u32 inc_v;
+	__u32 portrait_en;
+	__u32 merge_en;
+	__u32 cfg_param_01;
+	__u32 cfg_param_02;
+	__u32 cfg_param_03;
+	__u32 cfg_param_04;
 };
 
 /*
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 8045240..dc1a320 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -13,8 +13,10 @@
 endif
 
 header-y += nfc/
+header-y += qbt_handler.h
 
 ifneq ($(VSERVICES_SUPPORT), "")
 include include/linux/Kbuild.vservices
 endif
 header-y += okl4-link-shbuf.h
+header-y += sockev.h
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 4a1c285..7d62fcf 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -88,6 +88,16 @@
 	 * scheduling policy from the caller (for synchronous transactions).
 	 */
 	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
+
+#ifdef __KERNEL__
+	/**
+	 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
+	 *
+	 * Only when set, causes senders to include their security
+	 * context
+	 */
+	FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
+#endif /* __KERNEL__ */
 };
 
 #ifdef BINDER_IPC_32BIT
@@ -265,6 +275,7 @@
 #define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
 #define BINDER_GET_NODE_DEBUG_INFO	_IOWR('b', 11, struct binder_node_debug_info)
 #define BINDER_GET_NODE_INFO_FOR_REF	_IOWR('b', 12, struct binder_node_info_for_ref)
+#define BINDER_SET_CONTEXT_MGR_EXT	_IOW('b', 13, struct flat_binder_object)
 
 /*
  * NOTE: Two special error codes you should check for when calling
@@ -323,6 +334,13 @@
 	} data;
 };
 
+#ifdef __KERNEL__
+struct binder_transaction_data_secctx {
+	struct binder_transaction_data transaction_data;
+	binder_uintptr_t secctx;
+};
+#endif /* __KERNEL__ */
+
 struct binder_transaction_data_sg {
 	struct binder_transaction_data transaction_data;
 	binder_size_t buffers_size;
@@ -358,6 +376,13 @@
 	BR_OK = _IO('r', 1),
 	/* No parameters! */
 
+#ifdef __KERNEL__
+	BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
+				      struct binder_transaction_data_secctx),
+	/*
+	 * binder_transaction_data_secctx: the received command.
+	 */
+#endif /* __KERNEL__ */
 	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
 	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
 	/*
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 086e7ee..245f38c 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -283,6 +283,7 @@
 #define FS_ENCRYPTION_MODE_SPECK128_256_XTS	7 /* Removed, do not use. */
 #define FS_ENCRYPTION_MODE_SPECK128_256_CTS	8 /* Removed, do not use. */
 #define FS_ENCRYPTION_MODE_ADIANTUM		9
+#define FS_ENCRYPTION_MODE_PRIVATE		127
 
 struct fscrypt_policy {
 	__u8 version;
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 14565d7..e8baca8 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -137,15 +137,21 @@
 	INET_DIAG_TCLASS,
 	INET_DIAG_SKMEMINFO,
 	INET_DIAG_SHUTDOWN,
-	INET_DIAG_DCTCPINFO,
-	INET_DIAG_PROTOCOL,  /* response attribute only */
+
+	/*
+	 * Next extenstions cannot be requested in struct inet_diag_req_v2:
+	 * its field idiag_ext has only 8 bits.
+	 */
+
+	INET_DIAG_DCTCPINFO,	/* request as INET_DIAG_VEGASINFO */
+	INET_DIAG_PROTOCOL,	/* response attribute only */
 	INET_DIAG_SKV6ONLY,
 	INET_DIAG_LOCALS,
 	INET_DIAG_PEERS,
 	INET_DIAG_PAD,
-	INET_DIAG_MARK,
-	INET_DIAG_BBRINFO,
-	INET_DIAG_CLASS_ID,
+	INET_DIAG_MARK,		/* only with CAP_NET_ADMIN */
+	INET_DIAG_BBRINFO,	/* request as INET_DIAG_VEGASINFO */
+	INET_DIAG_CLASS_ID,	/* request as INET_DIAG_TCLASS */
 	INET_DIAG_MD5SIG,
 	__INET_DIAG_MAX,
 };
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 6b634e2..7a03c15 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -744,6 +744,15 @@
 
 #define ABS_MISC		0x28
 
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED		0x2e
+
 #define ABS_MT_SLOT		0x2f	/* MT slot being modified */
 #define ABS_MT_TOUCH_MAJOR	0x30	/* Major axis of touching ellipse */
 #define ABS_MT_TOUCH_MINOR	0x31	/* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index fb78f6f..f056b2a 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -26,13 +26,17 @@
  */
 
 struct input_event {
-#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
+#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
 	struct timeval time;
 #define input_event_sec time.tv_sec
 #define input_event_usec time.tv_usec
 #else
 	__kernel_ulong_t __sec;
+#if defined(__sparc__) && defined(__arch64__)
+	unsigned int __usec;
+#else
 	__kernel_ulong_t __usec;
+#endif
 #define input_event_sec  __sec
 #define input_event_usec __usec
 #endif
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index fb72150..9b9448e 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -34,22 +34,23 @@
 
 #include <linux/types.h>
 
-#define QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01 2
-#define QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01 2
-#define QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01 2
-#define QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01 2
-#define QMI_IPA_MAX_FILTERS_V01 64
-#define QMI_IPA_MAX_FILTERS_EX_V01 128
-#define QMI_IPA_MAX_PIPES_V01 20
-#define QMI_IPA_MAX_APN_V01 8
-#define QMI_IPA_MAX_PER_CLIENTS_V01 64
 #define QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01 6
-#define QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01 6
+#define QMI_IPA_MAX_FILTERS_EX_V01 128
+#define QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01 2
+#define QMI_IPA_MAX_FILTERS_V01 64
+#define QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01 2
+#define QMI_IPA_ENDP_DESC_NUM_MAX_V01 31
+#define QMI_IPA_MAX_APN_V01 8
 /* Currently max we can use is only 1. But for scalability purpose
  * we are having max value as 8.
  */
 #define QMI_IPA_MAX_CLIENT_DST_PIPES_V01 8
+#define QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01 2
 #define QMI_IPA_MAX_UL_FIREWALL_RULES_V01 64
+#define QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01 6
+#define QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01 2
+#define QMI_IPA_MAX_PIPES_V01 20
+#define QMI_IPA_MAX_PER_CLIENTS_V01 64
 
 /*
  * Indicates presence of newly added member to support HW stats.
@@ -660,6 +661,116 @@
 };  /* Type */
 
 
+struct ipa_filter_rule_req2_type_v01 {
+	uint16_t rule_eq_bitmap;
+	/* 16-bit Bitmask to indicate how many eqs are valid in this rule */
+
+	uint8_t pure_ack_eq_present;
+	/*
+	 *  specifies if a tcp pure ack check rule is present
+	 */
+
+	uint8_t pure_ack_eq;
+	/* The value to check against the type of service (ipv4) field */
+
+	uint8_t protocol_eq_present;
+	/* Specifies if a protocol check rule is present */
+
+	uint8_t protocol_eq;
+	/* The value to check against the protocol field */
+
+	uint8_t num_ihl_offset_range_16;
+	/*  The number of 16 bit range check rules at the location
+	 *	determined by IP header length plus a given offset offset
+	 *	in this rule. See the definition of the ipa_filter_range_eq_16
+	 *	for better understanding. The value of this field cannot exceed
+	 *	IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS which is set as 2
+	 */
+
+	struct ipa_ipfltr_range_eq_16_type_v01
+		ihl_offset_range_16[QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01];
+	/*	Array of the registered IP header length offset 16 bit range
+	 *	check rules.
+	 */
+
+	uint8_t num_offset_meq_32;
+	/*  The number of 32 bit masked comparison rules present
+	 *  in this rule
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		offset_meq_32[QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01];
+	/*  An array of all the possible 32bit masked comparison rules
+	 *	in this rule
+	 */
+
+	uint8_t tc_eq_present;
+	/*  Specifies if the traffic class rule is present in this rule */
+
+	uint8_t tc_eq;
+	/* The value against which the IPV4 traffic class field has to
+	 * be checked
+	 */
+
+	uint8_t flow_eq_present;
+	/* Specifies if the "flow equals" rule is present in this rule */
+
+	uint32_t flow_eq;
+	/* The value against which the IPV6 flow field has to be checked */
+
+	uint8_t ihl_offset_eq_16_present;
+	/*	Specifies if there is a 16 bit comparison required at the
+	 *	location in	the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_16_type_v01 ihl_offset_eq_16;
+	/* The 16 bit comparison equation */
+
+	uint8_t ihl_offset_eq_32_present;
+	/*	Specifies if there is a 32 bit comparison required at the
+	 *	location in the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_32_type_v01 ihl_offset_eq_32;
+	/*	The 32 bit comparison equation */
+
+	uint8_t num_ihl_offset_meq_32;
+	/*	The number of 32 bit masked comparison equations in this
+	 *	rule. The location of the packet to be compared is
+	 *	determined by the IP Header length + the give offset
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		ihl_offset_meq_32[QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01];
+	/*	Array of 32 bit masked comparison equations.
+	 */
+
+	uint8_t num_offset_meq_128;
+	/*	The number of 128 bit comparison equations in this rule */
+
+	struct ipa_ipfltr_mask_eq_128_type_v01
+		offset_meq_128[QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01];
+	/*	Array of 128 bit comparison equations. The location in the
+	 *	packet is determined by the specified offset
+	 */
+
+	uint8_t metadata_meq32_present;
+	/*  Boolean indicating if the 32 bit masked comparison equation
+	 *	is present or not. Comparison is done against the metadata
+	 *	in IPA. Metadata can either be extracted from the packet
+	 *	header or from the "metadata" register.
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+			metadata_meq32;
+	/* The metadata  32 bit masked comparison equation */
+
+	uint8_t ipv4_frag_eq_present;
+	/* Specifies if the IPv4 Fragment equation is present in this rule */
+};  /* Type */
+
 enum ipa_ip_type_enum_v01 {
 	IPA_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
 	/* To force a 32 bit signed enum.  Do not change or use*/
@@ -790,6 +901,55 @@
 	 */
 };  /* Type */
 
+struct ipa_filter_spec_ex2_type_v01 {
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*	This field identifies the IP type for which this rule is
+	 *	applicable. The driver needs to identify the filter table
+	 *	(V6 or V4) and this field is essential for that
+	 */
+
+	struct ipa_filter_rule_req2_type_v01 filter_rule;
+	/*	This field specifies the rules in the filter spec. These rules
+	 *	are the ones that are matched against fields in the packet.
+	 */
+
+	enum ipa_filter_action_enum_v01 filter_action;
+	/*	This field specifies the action to be taken when a filter match
+	 *	occurs. The remote side should install this information into the
+	 *	hardware along with the filter equations.
+	 */
+
+	uint8_t is_routing_table_index_valid;
+	/*	Specifies whether the routing table index is present or not.
+	 *	If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+	 *	parameter need not be provided.
+	 */
+
+	uint32_t route_table_index;
+	/*	This is the index in the routing table that should be used
+	 *	to route the packets if the filter rule is hit
+	 */
+
+	uint8_t is_mux_id_valid;
+	/*	Specifies whether the mux_id is valid */
+
+	uint32_t mux_id;
+	/*	This field identifies the QMAP MUX ID. As a part of QMAP
+	 *	protocol, several data calls may be multiplexed over the
+	 *	same physical transport channel. This identifier is used to
+	 *	identify one such data call. The maximum value for this
+	 *	identifier is 255.
+	 */
+
+	uint32_t rule_id;
+	/* Rule Id of the given filter. The Rule Id is populated in the rule
+	 * header when installing the rule in IPA.
+	 */
+
+	uint8_t is_rule_hashable;
+	/** Specifies whether the given rule is hashable.
+	 */
+};  /* Type */
 
 /*  Request Message; This is the message that is exchanged between the
  *	control point and the service in order to request the installation
@@ -853,7 +1013,7 @@
 	 */
 
 	/* Optional */
-	/*  Extended Filter Specification  */
+	/*  Extended Filter Specification */
 	uint8_t filter_spec_ex_list_valid;
 	/* Must be set to true if filter_spec_ex_list is being passed */
 	uint32_t filter_spec_ex_list_len;
@@ -866,6 +1026,15 @@
 	 *	The driver installing these rules must do so in the same
 	 *	order as specified in this list.
 	 */
+
+	/* Optional */
+	/*  Extended Type 2 Filter Specification */
+	uint8_t filter_spec_ex2_list_valid;
+	/* Must be set to true if filter_spec_ex2_list is being passed */
+	uint32_t filter_spec_ex2_list_len;
+	/* Must be set to # of elements in filter_spec_ex2_list */
+	struct ipa_filter_spec_ex2_type_v01
+		filter_spec_ex2_list[QMI_IPA_MAX_FILTERS_V01];
 };  /* Message */
 
 struct ipa_filter_rule_identifier_to_handle_map_v01 {
@@ -1664,6 +1833,15 @@
 	 * receiver if the PDN is XLAT before installing them on the associated
 	 * IPA consumer pipe.
 	 */
+
+	/* Optional */
+	/* Extended Type 2 Filter Specification */
+	uint8_t filter_spec_ex2_list_valid;
+	/* Must be set to true if filter_spec_ex2_list is being passed */
+	uint32_t filter_spec_ex2_list_len;
+	/* Must be set to # of elements in filter_spec_ex2_list */
+	struct ipa_filter_spec_ex2_type_v01
+		filter_spec_ex2_list[QMI_IPA_MAX_FILTERS_V01];
 };  /* Message */
 
 /* Response Message; Requests installation of filtering rules in the hardware
@@ -2198,6 +2376,175 @@
 };
 #define IPA_MHI_CLEANUP_RESP_MSG_V01_MAX_MSG_LEN 7
 
+enum ipa_ep_desc_type_enum_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_EP_DESC_TYPE_ENUM_MIN_VAL_V01 = IPA_INT_MIN,
+	DATA_EP_DESC_TYPE_RESERVED_V01 = 0x00,
+	DATA_EP_DESC_TYPE_EMB_CONS_V01 = 0x01,
+	DATA_EP_DESC_TYPE_EMB_PROD_V01 = 0x02,
+	DATA_EP_DESC_TYPE_RSC_PROD_V01 = 0x03,
+	DATA_EP_DESC_TYPE_QDSS_PROD_V01 = 0x04,
+	DATA_EP_DESC_TYPE_DPL_PROD_V01 = 0x05,
+	DATA_EP_DESC_TYPE_TETH_CONS_V01 = 0x06,
+	DATA_EP_DESC_TYPE_TETH_PROD_V01 = 0x07,
+	DATA_EP_DESC_TYPE_TETH_RMNET_CONS_V01 = 0x08,
+	DATA_EP_DESC_TYPE_TETH_RMNET_PROD_V01 = 0x09,
+	IPA_EP_DESC_TYPE_ENUM_MAX_VAL_V01 = IPA_INT_MAX,
+};
+
+enum ipa_ic_type_enum_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_IC_TYPE_ENUM_MIN_VAL_V01 = IPA_INT_MIN,
+	DATA_IC_TYPE_RESERVED_V01 = 0x00,
+	DATA_IC_TYPE_MHI_V01 = 0x01,
+	DATA_IC_TYPE_MHI_PRIME_V01 = 0x02,
+	DATA_IC_TYPE_USB_V01 = 0x03,
+	DATA_IC_TYPE_AP_V01 = 0x04,
+	DATA_IC_TYPE_Q6_V01 = 0x05,
+	DATA_IC_TYPE_UC_V01 = 0x06,
+	IPA_IC_TYPE_ENUM_MAX_VAL_V01 = IPA_INT_MAX,
+};
+
+enum ipa_ep_status_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_EP_STATUS_TYPE_MIN_VAL_V01 = IPA_INT_MIN,
+	DATA_EP_STATUS_RESERVED_V01 = 0x00,
+	DATA_EP_STATUS_STATIC_V01 = 0x01,
+	DATA_EP_STATUS_CONNECTED_V01 = 0x02,
+	DATA_EP_STATUS_DISCONNECTED_V01 = 0x03,
+	IPA_EP_STATUS_TYPE_MAX_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_ep_id_type_v01 {
+	/* Interconnect type. See ipa_ic_desc_type_enum type */
+	enum ipa_ic_type_enum_v01 ic_type;
+	/* Peripheral end point type */
+	enum ipa_ep_desc_type_enum_v01 ep_type;
+	/* Peripheral interface number */
+	uint32_t ep_id;
+	/* Status of endpoint */
+	enum ipa_ep_status_type_v01 ep_status;
+};
+
+struct ipa_endp_desc_indication_msg_v01 {
+	/* Optional */
+	uint8_t ep_info_valid;
+	/* Must be set to true if type_arr is being passed */
+	uint32_t ep_info_len;
+	/* Must be set to # of elements in type_arr */
+	struct ipa_ep_id_type_v01 ep_info[QMI_IPA_ENDP_DESC_NUM_MAX_V01];
+	/* Optional */
+	uint8_t num_eps_valid;
+	/* Must be set to true if num_of_eps is being passed */
+	/* Must be set to # of elements of num_of_eps */
+	uint32_t num_eps;
+}; /* Message */
+#define IPA_ENDP_DESC_INDICATION_MSG_V01_MAX_MSG_LEN 507
+
+enum ipa_aggr_enum_type_v01 {
+	IPA_AGGR_ENUM_TYPE_MIN_VAL_V01 = IPA_INT_MIN,
+	DATA_AGGR_TYPE_RESERVED_V01 = 0x00,
+	DATA_AGGR_TYPE_QMAP_V01 = 0x01,
+	DATA_AGGR_TYPE_QMAPv5_V01 = 0x02,
+	DATA_AGGR_TYPE_INHERITED_V01 = 0x03,
+	IPA_AGGR_ENUM_TYPE_MAX_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_mhi_prime_aggr_info_type_v01 {
+	enum ipa_ic_type_enum_v01 ic_type;
+	/* Peripheral end point type */
+	enum ipa_ep_desc_type_enum_v01 ep_type;
+	/* Bytes count in KB */
+	uint32_t bytes_count;
+	/* packet count */
+	uint32_t pkt_count;
+	/* aggr_type */
+	enum ipa_aggr_enum_type_v01 aggr_type;
+}; /* Message */
+#define IPA_MHI_PRIME_AGGR_INFO_REQ_MSG_V01_MAX_MSG_LEN 631
+
+struct ipa_mhi_prime_aggr_info_req_msg_v01 {
+	/* optional */
+	uint8_t aggr_info_valid;
+	/* Aggregration info for MHI prime */
+	/* Must be set to true if aggr_info is being passed*/
+	uint32_t aggr_info_len;
+	/* Must be set to # of elements in aggr_info */
+	struct ipa_mhi_prime_aggr_info_type_v01
+		aggr_info[QMI_IPA_ENDP_DESC_NUM_MAX_V01];
+	/* optional */
+	/* Must be set to true if num_eps_valid is being passed*/
+	uint8_t num_eps_valid;
+	/* Must be set to # of num_eps */
+	uint32_t num_eps;
+}; /* Message */
+#define IPA_MHI_PRIME_AGGR_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+
+struct ipa_mhi_prime_aggr_info_resp_msg_v01 {
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+}; /* Message */
+
+struct ipa_add_offload_connection_req_msg_v01 {
+	/* optional */
+	/* Must be set to true if num_ipv4_filters is being passed*/
+	uint8_t num_ipv4_filters_valid;
+	/* Must be set to # of ipv4_filters*/
+	uint32_t num_ipv4_filters;
+	/* optional */
+	/* Must be set to true if num_ipv6_filters is being passed*/
+	uint8_t num_ipv6_filters_valid;
+	/* Must be set to # of ipv6_filters*/
+	uint32_t num_ipv6_filters;
+	/* optional */
+	uint8_t xlat_filter_indices_list_valid;
+	/* Must be set to true if xlat_filter_indices_list is being passed*/
+	uint32_t xlat_filter_indices_list_len;
+	/* Must be set to # of  xlat_filter_indices_list*/
+	uint32_t xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_V01];
+	/* optional */
+	/* Must be set to true if filter_spec_ex_list is being passed*/
+	uint8_t filter_spec_ex2_list_valid;
+	/* Must be set to # of  filter_spec_ex_list*/
+	uint32_t filter_spec_ex2_list_len;
+	struct ipa_filter_spec_ex2_type_v01
+		filter_spec_ex2_list[QMI_IPA_MAX_FILTERS_V01];
+}; /* Message */
+#define IPA_ADD_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN 11350
+
+struct ipa_add_offload_connection_resp_msg_v01 {
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* optional */
+	/* Must be set to true if filter_handle_list is being passed*/
+	uint8_t filter_handle_list_valid;
+	/* Must be set to # of  filter_handle_list*/
+	uint32_t filter_handle_list_len;
+	struct ipa_filter_rule_identifier_to_handle_map_v01
+		filter_handle_list[QMI_IPA_MAX_FILTERS_V01];
+}; /* Message */
+#define IPA_ADD_OFFLOAD_CONNECTION_RESP_MSG_V01_MAX_MSG_LEN 523
+
+struct ipa_remove_offload_connection_req_msg_v01 {
+	/* optional */
+	/* Must be set to true if filter_handle_list is being passed*/
+	uint8_t filter_handle_list_valid;
+	/* Must be set to # of  filter_handle_list*/
+	uint32_t filter_handle_list_len;
+	struct ipa_filter_rule_identifier_to_handle_map_v01
+		filter_handle_list[QMI_IPA_MAX_FILTERS_V01];
+}; /* Message */
+#define IPA_REMOVE_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN 516
+
+struct ipa_remove_offload_connection_resp_msg_v01 {
+	/* optional */
+	/* Must be set to true if filter_handle_list is being passed*/
+	uint8_t resp_valid;
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+}; /* Message */
+#define IPA_REMOVE_OFFLOAD_CONNECTION_RESP_MSG_V01_MAX_MSG_LEN 7
+
 /*Service Message Definition*/
 #define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
 #define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
@@ -2245,13 +2592,21 @@
 #define QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01 0x003D
 #define QMI_IPA_MHI_CLEANUP_REQ_V01 0x003E
 #define QMI_IPA_MHI_CLEANUP_RESP_V01 0x003E
+#define QMI_IPA_ENDP_DESC_INDICATION_V01 0x003F
+#define QMI_IPA_MHI_PRIME_AGGR_INFO_REQ_V01 0x0040
+#define QMI_IPA_MHI_PRIME_AGGR_INFO_RESP_V01 0x0040
+#define QMI_IPA_ADD_OFFLOAD_CONNECTION_REQ_V01 0x0041
+#define QMI_IPA_ADD_OFFLOAD_CONNECTION_RESP_V01 0x0041
+#define QMI_IPA_REMOVE_OFFLOAD_CONNECTION_REQ_V01 0x0042
+#define QMI_IPA_REMOVE_OFFLOAD_CONNECTION_RESP_V01 0x0042
+
 
 /* add for max length*/
 #define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 162
 #define QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01 25
-#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 8
+#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 12
 #define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
-#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 33445
 #define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
 #define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 870
 #define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
@@ -2283,7 +2638,7 @@
 #define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01 4
 #define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01 7
 
-#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 22685
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 33761
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523
 
 #define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index fbbaa5b..d3c7b7a 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -41,6 +41,11 @@
 #define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
 
 /**
+ * name for default value of invalid protocol of NAT
+ */
+#define IPAHAL_NAT_INVALID_PROTOCOL   0xFF
+
+/**
  * commands supported by IPA driver
  */
 #define IPA_IOCTL_ADD_HDR                       0
@@ -346,9 +351,17 @@
 
 	/* RESERVED PROD			= 94, */
 	IPA_CLIENT_APPS_WAN_COAL_CONS		= 95,
+
+	IPA_CLIENT_MHI_PRIME_TETH_PROD		= 96,
+	IPA_CLIENT_MHI_PRIME_TETH_CONS		= 97,
+
+	IPA_CLIENT_MHI_PRIME_RMNET_PROD		= 98,
+	IPA_CLIENT_MHI_PRIME_RMNET_CONS		= 99,
+
+	IPA_CLIENT_MHI_PRIME_DPL_PROD		= 100,
 };
 
-#define IPA_CLIENT_MAX (IPA_CLIENT_APPS_WAN_COAL_CONS + 1)
+#define IPA_CLIENT_MAX (IPA_CLIENT_MHI_PRIME_DPL_PROD + 1)
 
 #define IPA_CLIENT_WLAN2_PROD IPA_CLIENT_A5_WLAN_AMPDU_PROD
 #define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD
@@ -362,10 +375,17 @@
 #define IPA_CLIENT_WIGIG2_CONS IPA_CLIENT_WIGIG2_CONS
 #define IPA_CLIENT_WIGIG3_CONS IPA_CLIENT_WIGIG3_CONS
 #define IPA_CLIENT_WIGIG4_CONS IPA_CLIENT_WIGIG4_CONS
+#define IPA_CLIENT_APPS_WAN_COAL_CONS IPA_CLIENT_APPS_WAN_COAL_CONS
+#define IPA_CLIENT_MHI_PRIME_TETH_PROD IPA_CLIENT_MHI_PRIME_TETH_PROD
+#define IPA_CLIENT_MHI_PRIME_TETH_CONS IPA_CLIENT_MHI_PRIME_TETH_CONS
+#define IPA_CLIENT_MHI_PRIME_RMNET_PROD IPA_CLIENT_MHI_PRIME_RMNET_PROD
+#define IPA_CLIENT_MHI_PRIME_RMNET_CONS IPA_CLIENT_MHI_PRIME_RMNET_CONS
+#define IPA_CLIENT_MHI_PRIME_DPL_PROD IPA_CLIENT_MHI_PRIME_DPL_PROD
 
 #define IPA_CLIENT_IS_APPS_CONS(client) \
 	((client) == IPA_CLIENT_APPS_LAN_CONS || \
-	(client) == IPA_CLIENT_APPS_WAN_CONS)
+	(client) == IPA_CLIENT_APPS_WAN_CONS || \
+	(client) == IPA_CLIENT_APPS_WAN_COAL_CONS)
 
 #define IPA_CLIENT_IS_USB_CONS(client) \
 	((client) == IPA_CLIENT_USB_CONS || \
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 97ff3c1..e5b3972 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -155,8 +155,8 @@
 };
 
 struct sock_txtime {
-	clockid_t	clockid;	/* reference clockid */
-	__u32		flags;		/* as defined by enum txtime_flags */
+	__kernel_clockid_t	clockid;/* reference clockid */
+	__u32			flags;	/* as defined by enum txtime_flags */
 };
 
 #endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netfilter/xt_HARDIDLETIMER.h b/include/uapi/linux/netfilter/xt_HARDIDLETIMER.h
new file mode 100644
index 0000000..aa6a5ac
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_HARDIDLETIMER.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/* linux/include/linux/netfilter/xt_HARDIDLETIMER.h
+ *
+ * Header file for Xtables timer target module.
+ *
+ * Copyright (c) 2014, 2017, 2019 The Linux Foundation. All rights reserved.
+ *
+ * Copyright (C) 2004, 2010 Nokia Corporation
+ *
+ * Written by Timo Teras <ext-timo.teras@nokia.com>
+ *
+ * Converted to x_tables and forward-ported to 2.6.34
+ * by Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ */
+
+#ifndef _XT_HARDIDLETIMER_H
+#define _XT_HARDIDLETIMER_H
+
+#include <linux/types.h>
+
+#define MAX_HARDIDLETIMER_LABEL_SIZE 28
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
+
+struct hardidletimer_tg_info {
+	__u32 timeout;
+
+	char label[MAX_HARDIDLETIMER_LABEL_SIZE];
+
+	/* Use netlink messages for notification in addition to sysfs */
+	__u8 send_nl_msg;
+
+	/* for kernel module internal use only */
+	struct hardidletimer_tg *timer __attribute__((aligned(8)));
+};
+
+#endif
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 776bc92..5fa3fcc 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -29,7 +29,7 @@
 #define NETLINK_RDMA		20
 #define NETLINK_CRYPTO		21	/* Crypto layer */
 #define NETLINK_SMC		22	/* SMC monitoring */
-
+#define NETLINK_SOCKEV		23	/* Socket Administrative Events */
 #define NETLINK_INET_DIAG	NETLINK_SOCK_DIAG
 
 #define MAX_LINKS 32		
diff --git a/include/uapi/linux/qbt_handler.h b/include/uapi/linux/qbt_handler.h
new file mode 100644
index 0000000..8ebbf1f
--- /dev/null
+++ b/include/uapi/linux/qbt_handler.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_QBT_HANDLER_H_
+#define _UAPI_QBT_HANDLER_H_
+
+#define MAX_NAME_SIZE 32
+
+#define QBT_IS_WUHB_CONNECTED    100
+#define QBT_SEND_KEY_EVENT       101
+#define QBT_ENABLE_IPC           102
+#define QBT_DISABLE_IPC          103
+#define QBT_ENABLE_FD            104
+#define QBT_DISABLE_FD           105
+
+/*
+ * enum qbt_fw_event -
+ *      enumeration of firmware events
+ * @FW_EVENT_FINGER_DOWN - finger down detected
+ * @FW_EVENT_FINGER_UP - finger up detected
+ * @FW_EVENT_IPC - an IPC from the firmware is pending
+ */
+enum qbt_fw_event {
+	FW_EVENT_FINGER_DOWN = 1,
+	FW_EVENT_FINGER_UP = 2,
+	FW_EVENT_IPC = 3,
+};
+
+/*
+ * struct qbt_wuhb_connected_status -
+ *		used to query whether WUHB INT line is connected
+ * @is_wuhb_connected - if non-zero, WUHB INT line is connected
+ */
+struct qbt_wuhb_connected_status {
+	bool is_wuhb_connected;
+};
+
+/*
+ * struct qbt_key_event -
+ *		used to send key event
+ * @key - the key event to send
+ * @value - value of the key event
+ */
+struct qbt_key_event {
+	int key;
+	int value;
+};
+
+#endif /* _UAPI_QBT_HANDLER_H_ */
diff --git a/include/uapi/linux/qrng.h b/include/uapi/linux/qrng.h
new file mode 100644
index 0000000..b999eee
--- /dev/null
+++ b/include/uapi/linux/qrng.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+#ifndef _UAPI_QRNG_H_
+#define _UAPI_QRNG_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QRNG_IOC_MAGIC    0x100
+
+#define QRNG_IOCTL_RESET_BUS_BANDWIDTH\
+	_IO(QRNG_IOC_MAGIC, 1)
+
+#endif /* _UAPI_QRNG_H_ */
diff --git a/include/uapi/linux/sockev.h b/include/uapi/linux/sockev.h
new file mode 100644
index 0000000..df42986
--- /dev/null
+++ b/include/uapi/linux/sockev.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+
+#ifndef _SOCKEV_H_
+#define _SOCKEV_H_
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/socket.h>
+
+enum sknetlink_groups {
+	SKNLGRP_UNICAST,
+	SKNLGRP_SOCKEV,
+	__SKNLGRP_MAX
+};
+
+#define SOCKEV_STR_MAX 32
+
+/********************************************************************
+ *		Socket operation messages
+ ****/
+
+struct sknlsockevmsg {
+	__u8 event[SOCKEV_STR_MAX];
+	__u32 pid; /* (struct task_struct*)->pid */
+	__u16 skfamily; /* (struct socket*)->sk->sk_family */
+	__u8 skstate; /* (struct socket*)->sk->sk_state */
+	__u8 skprotocol; /* (struct socket*)->sk->sk_protocol */
+	__u16 sktype; /* (struct socket*)->sk->sk_type */
+	__u64 skflags; /* (struct socket*)->sk->sk_flags */
+};
+
+#endif /* _SOCKEV_H_ */
+
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index b7aa7bb..5e8ca16 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
  */
 
 
-#define TASKSTATS_VERSION	8
+#define TASKSTATS_VERSION	9
 #define TS_COMM_LEN		32	/* should be >= TASK_COMM_LEN
 					 * in linux/sched.h */
 
@@ -164,6 +164,10 @@
 	/* Delay waiting for memory reclaim */
 	__u64	freepages_count;
 	__u64	freepages_delay_total;
+
+	/* Delay waiting for thrashing page */
+	__u64	thrashing_count;
+	__u64	thrashing_delay_total;
 };
 
 
diff --git a/include/uapi/linux/usb/f_mtp.h b/include/uapi/linux/usb/f_mtp.h
new file mode 100644
index 0000000..5032918
--- /dev/null
+++ b/include/uapi/linux/usb/f_mtp.h
@@ -0,0 +1,61 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_MTP_H
+#define _UAPI_LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct mtp_file_range {
+	/* file descriptor for file to transfer */
+	int			fd;
+	/* offset in file for start of transfer */
+	loff_t		offset;
+	/* number of bytes to transfer */
+	int64_t		length;
+	/* MTP command ID for data header,
+	 * used only for MTP_SEND_FILE_WITH_HEADER
+	 */
+	uint16_t	command;
+	/* MTP transaction ID for data header,
+	 * used only for MTP_SEND_FILE_WITH_HEADER
+	 */
+	uint32_t	transaction_id;
+};
+
+struct mtp_event {
+	/* size of the event */
+	size_t		length;
+	/* event data to send */
+	void		*data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE              _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE           _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT             _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, struct mtp_file_range)
+
+#endif /* _UAPI_LINUX_USB_F_MTP_H */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 702b20f..1496008 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -962,6 +962,9 @@
 #define V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE \
 	(V4L2_CID_MPEG_MSM_VIDC_BASE + 119)
 
+#define V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 131)
+
 #define V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER \
 	(V4L2_CID_MPEG_MSM_VIDC_BASE + 120)
 enum v4l2_mpeg_vidc_video_hevc_max_hier_coding_layer {
@@ -976,6 +979,11 @@
 
 #define V4L2_CID_MPEG_VIDC_VENC_CVP_DISABLE \
 	(V4L2_CID_MPEG_MSM_VIDC_BASE + 121)
+#define V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 122)
+
+#define V4L2_CID_MPEG_VIDC_VENC_RC_TIMESTAMP_DISABLE \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 123)
 
 /*  Camera class control IDs */
 
diff --git a/include/uapi/linux/wil6210_uapi.h b/include/uapi/linux/wil6210_uapi.h
new file mode 100644
index 0000000..ecc9599
--- /dev/null
+++ b/include/uapi/linux/wil6210_uapi.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __WIL6210_UAPI_H__
+#define __WIL6210_UAPI_H__
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#include <linux/sockios.h>
+
+/* Numbers SIOCDEVPRIVATE and SIOCDEVPRIVATE + 1
+ * are used by Android devices to implement PNO (preferred network offload).
+ * Albeit it is temporary solution, use different numbers to avoid conflicts
+ */
+
+/**
+ * Perform 32-bit I/O operation to the card memory
+ *
+ * User code should arrange data in memory like this:
+ *
+ *	struct wil_memio io;
+ *	struct ifreq ifr = {
+ *		.ifr_data = &io,
+ *	};
+ */
+#define WIL_IOCTL_MEMIO (SIOCDEVPRIVATE + 2)
+
+/**
+ * Perform block I/O operation to the card memory
+ *
+ * User code should arrange data in memory like this:
+ *
+ *	void *buf;
+ *	struct wil_memio_block io = {
+ *		.block = buf,
+ *	};
+ *	struct ifreq ifr = {
+ *		.ifr_data = &io,
+ *	};
+ */
+#define WIL_IOCTL_MEMIO_BLOCK (SIOCDEVPRIVATE + 3)
+
+/** operation to perform */
+#define WIL_MMIO_READ 0
+#define WIL_MMIO_WRITE 1
+#define WIL_MMIO_OP_MASK 0xff
+
+/** addressing mode to use */
+#define WIL_MMIO_ADDR_LINKER (0 << 8)
+#define WIL_MMIO_ADDR_AHB (1 << 8)
+#define WIL_MMIO_ADDR_BAR (2 << 8)
+#define WIL_MMIO_ADDR_MASK 0xff00
+
+struct wil_memio {
+	uint32_t op; /* enum wil_memio_op */
+	uint32_t addr; /* should be 32-bit aligned */
+	uint32_t val;
+};
+
+struct wil_memio_block {
+	uint32_t op; /* enum wil_memio_op */
+	uint32_t addr; /* should be 32-bit aligned */
+	uint32_t size; /* should be multiple of 4 */
+	uint64_t __user block; /* block address */
+};
+
+#endif /* __WIL6210_UAPI_H__ */
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
index 306450e..bba8eeb 100644
--- a/include/uapi/media/cam_isp.h
+++ b/include/uapi/media/cam_isp.h
@@ -96,6 +96,7 @@
 #define CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG   4
 #define CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG           5
 #define CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2      6
+#define CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG     7
 
 /* Query devices */
 /**
@@ -435,6 +436,37 @@
 /* Acquire Device/HW v2 */
 
 /**
+ * struct cam_isp_core_config - ISP core registers configuration
+ *
+ * @version:                    Version info
+ * @vid_ds16_r2pd:              Enables Y and C merging PD output for video DS16
+ * @vid_ds4_r2pd:               Enables Y and C merging PD output for video DS4
+ * @disp_ds16_r2pd:             Enables Y and C merging PD output for disp DS16
+ * @disp_ds4_r2pd:              Enables Y and C merging PD output for disp DS4
+ * @dsp_streaming_tap_point:    This selects source for DSP streaming interface
+ * @ihist_src_sel:              Selects input for IHIST module
+ * @hdr_be_src_sel:             Selects input for HDR BE module
+ * @hdr_bhist_src_sel:          Selects input for HDR BHIST module
+ * @input_mux_sel_pdaf:         Selects input for PDAF
+ * @input_mux_sel_pp:           Selects input for Pixel Pipe
+ * @reserved:                   Reserved
+ */
+struct cam_isp_core_config {
+	uint32_t     version;
+	uint32_t     vid_ds16_r2pd;
+	uint32_t     vid_ds4_r2pd;
+	uint32_t     disp_ds16_r2pd;
+	uint32_t     disp_ds4_r2pd;
+	uint32_t     dsp_streaming_tap_point;
+	uint32_t     ihist_src_sel;
+	uint32_t     hdr_be_src_sel;
+	uint32_t     hdr_bhist_src_sel;
+	uint32_t     input_mux_sel_pdaf;
+	uint32_t     input_mux_sel_pp;
+	uint32_t     reserved;
+} __attribute__((packed));
+
+/**
  * struct cam_isp_acquire_hw_info - ISP acquire HW params
  *
  * @common_info_version  : Version of common info struct used
diff --git a/include/uapi/media/msm_cvp_private.h b/include/uapi/media/msm_cvp_private.h
index 2be53e6..1200c5c 100644
--- a/include/uapi/media/msm_cvp_private.h
+++ b/include/uapi/media/msm_cvp_private.h
@@ -8,84 +8,107 @@
 #include <linux/videodev2.h>
 
 #define MAX_DFS_HFI_PARAMS 20
+#define HFI_MAX_PLANES 4
 
 /* VIDIOC private cvp command */
 #define VIDIOC_CVP_CMD \
-		_IOWR('V', BASE_VIDIOC_PRIVATE_CVP, struct msm_cvp_arg)
+		_IOWR('V', BASE_VIDIOC_PRIVATE_CVP, struct cvp_kmd_arg)
 
 /* Commands type */
-#define MSM_VIDC_CMD_START		0x10000000
-#define MSM_CVP_CMD_START		(MSM_VIDC_CMD_START + 0x1000)
+#define CVP_KMD_CMD_BASE		0x10000000
+#define CVP_KMD_CMD_START		(CVP_KMD_CMD_BASE + 0x1000)
 
 /*
  * userspace clients pass one of the below arguments type
- * in struct msm_cvp_arg (@type field).
+ * in struct cvp_kmd_arg (@type field).
  */
 
 /*
- * MSM_CVP_GET_SESSION_INFO - this argument type is used to
+ * CVP_KMD_GET_SESSION_INFO - this argument type is used to
  *          get the session information from driver. it passes
- *          struct msm_cvp_session_info {}
+ *          struct cvp_kmd_session_info {}
  */
-#define MSM_CVP_GET_SESSION_INFO	(MSM_CVP_CMD_START + 1)
+#define CVP_KMD_GET_SESSION_INFO	(CVP_KMD_CMD_START + 1)
 
 /*
- * MSM_CVP_REQUEST_POWER - this argument type is used to
+ * CVP_KMD_REQUEST_POWER - this argument type is used to
  *          set the power required to driver. it passes
- *          struct msm_cvp_request_power {}
+ *          struct cvp_kmd_request_power {}
  */
-#define MSM_CVP_REQUEST_POWER		(MSM_CVP_CMD_START + 2)
+#define CVP_KMD_REQUEST_POWER		(CVP_KMD_CMD_START + 2)
 
 /*
- * MSM_CVP_REGISTER_BUFFER - this argument type is used to
+ * CVP_KMD_REGISTER_BUFFER - this argument type is used to
  *          register the buffer to driver. it passes
- *          struct msm_cvp_buffer {}
+ *          struct cvp_kmd_buffer {}
  */
-#define MSM_CVP_REGISTER_BUFFER		(MSM_CVP_CMD_START + 3)
+#define CVP_KMD_REGISTER_BUFFER		(CVP_KMD_CMD_START + 3)
 
 /*
- * MSM_CVP_REGISTER_BUFFER - this argument type is used to
+ * CVP_KMD_REGISTER_BUFFER - this argument type is used to
  *          unregister the buffer to driver. it passes
- *          struct msm_cvp_buffer {}
+ *          struct cvp_kmd_buffer {}
  */
-#define MSM_CVP_UNREGISTER_BUFFER	(MSM_CVP_CMD_START + 4)
+#define CVP_KMD_UNREGISTER_BUFFER	(CVP_KMD_CMD_START + 4)
 
-#define MSM_CVP_HFI_SEND_CMD        (MSM_CVP_CMD_START + 5)
+#define CVP_KMD_HFI_SEND_CMD        (CVP_KMD_CMD_START + 5)
 
-#define MSM_CVP_HFI_DFS_CONFIG_CMD  (MSM_CVP_CMD_START + 6)
+#define CVP_KMD_HFI_DFS_CONFIG_CMD  (CVP_KMD_CMD_START + 6)
 
-#define MSM_CVP_HFI_DFS_FRAME_CMD  (MSM_CVP_CMD_START + 7)
+#define CVP_KMD_HFI_DFS_FRAME_CMD  (CVP_KMD_CMD_START + 7)
 
-#define MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE  (MSM_CVP_CMD_START + 8)
+#define CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE  (CVP_KMD_CMD_START + 8)
+
+#define CVP_KMD_HFI_DME_CONFIG_CMD  (CVP_KMD_CMD_START + 9)
+
+#define CVP_KMD_HFI_DME_FRAME_CMD  (CVP_KMD_CMD_START + 10)
+
+#define CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE  (CVP_KMD_CMD_START + 11)
+
+#define CVP_KMD_HFI_PERSIST_CMD  (CVP_KMD_CMD_START + 12)
+
+#define CVP_KMD_HFI_PERSIST_CMD_RESPONSE  (CVP_KMD_CMD_START + 13)
+
+#define CVP_KMD_HFI_DME_FRAME_FENCE_CMD  (CVP_KMD_CMD_START + 14)
+
+#define CVP_KMD_SEND_CMD_PKT	(CVP_KMD_CMD_START + 64)
+
+#define CVP_KMD_RECEIVE_MSG_PKT	 (CVP_KMD_CMD_START + 65)
+
+#define CVP_KMD_SET_SYS_PROPERTY	(CVP_KMD_CMD_START + 66)
+
+#define CVP_KMD_GET_SYS_PROPERTY	(CVP_KMD_CMD_START + 67)
+
+#define CVP_KMD_SESSION_CONTROL		(CVP_KMD_CMD_START + 68)
 
 /* flags */
-#define MSM_CVP_FLAG_UNSECURE			0x00000000
-#define MSM_CVP_FLAG_SECURE			0x00000001
+#define CVP_KMD_FLAG_UNSECURE			0x00000000
+#define CVP_KMD_FLAG_SECURE			0x00000001
 
 /* buffer type */
-#define MSM_CVP_BUFTYPE_INPUT			0x00000001
-#define MSM_CVP_BUFTYPE_OUTPUT			0x00000002
-#define MSM_CVP_BUFTYPE_INTERNAL_1		0x00000003
-#define MSM_CVP_BUFTYPE_INTERNAL_2		0x00000004
+#define CVP_KMD_BUFTYPE_INPUT			0x00000001
+#define CVP_KMD_BUFTYPE_OUTPUT			0x00000002
+#define CVP_KMD_BUFTYPE_INTERNAL_1		0x00000003
+#define CVP_KMD_BUFTYPE_INTERNAL_2		0x00000004
 
 
 /**
- * struct msm_cvp_session_info - session information
+ * struct cvp_kmd_session_info - session information
  * @session_id:    current session id
  */
-struct msm_cvp_session_info {
+struct cvp_kmd_session_info {
 	unsigned int session_id;
 	unsigned int reserved[10];
 };
 
 /**
- * struct msm_cvp_request_power - power / clock data information
+ * struct cvp_kmd_request_power - power / clock data information
  * @clock_cycles_a:  clock cycles per second required for hardware_a
  * @clock_cycles_b:  clock cycles per second required for hardware_b
  * @ddr_bw:        bandwidth required for ddr in bps
  * @sys_cache_bw:  bandwidth required for system cache in bps
  */
-struct msm_cvp_request_power {
+struct cvp_kmd_request_power {
 	unsigned int clock_cycles_a;
 	unsigned int clock_cycles_b;
 	unsigned int ddr_bw;
@@ -94,7 +117,7 @@
 };
 
 /**
- * struct msm_cvp_buffer - buffer information to be registered
+ * struct cvp_kmd_buffer - buffer information to be registered
  * @index:         index of buffer
  * @type:          buffer type
  * @fd:            file descriptor of buffer
@@ -103,7 +126,7 @@
  * @pixelformat:   fourcc format
  * @flags:         buffer flags
  */
-struct msm_cvp_buffer {
+struct cvp_kmd_buffer {
 	unsigned int index;
 	unsigned int type;
 	unsigned int fd;
@@ -115,144 +138,134 @@
 };
 
 /**
- * struct msm_cvp_send_cmd - sending generic HFI command
+ * struct cvp_kmd_send_cmd - sending generic HFI command
  * @cmd_address_fd:   file descriptor of cmd_address
  * @cmd_size:         allocated size of buffer
  */
-struct msm_cvp_send_cmd {
+struct cvp_kmd_send_cmd {
 	unsigned int cmd_address_fd;
 	unsigned int cmd_size;
 	unsigned int reserved[10];
 };
 
 /**
- * enum HFI_COLOR_PLANE_TYPE - define the type of plane
- */
-enum HFI_COLOR_PLANE_TYPE {
-	HFI_COLOR_PLANE_METADATA,
-	HFI_COLOR_PLANE_PICDATA,
-	HFI_MAX_PLANES
-};
-
-/**
- * struct msm_cvp_color_plane_info - color plane info
+ * struct cvp_kmd_color_plane_info - color plane info
  * @stride:      stride of plane
  * @buf_size:    size of plane
  */
-struct msm_cvp_color_plane_info {
+struct cvp_kmd_color_plane_info {
 	int stride[HFI_MAX_PLANES];
 	unsigned int buf_size[HFI_MAX_PLANES];
 };
 
 /**
- * struct msm_cvp_client_data - store generic client
+ * struct cvp_kmd_client_data - store generic client
  *                              data
  * @transactionid:  transaction id
  * @client_data1:   client data to be used during callback
  * @client_data2:   client data to be used during callback
  */
-struct msm_cvp_client_data {
+struct cvp_kmd_client_data {
 	unsigned int transactionid;
 	unsigned int client_data1;
 	unsigned int client_data2;
 };
 
-/**
- * struct msm_cvp_dfsconfig - dfs config packet
- * @cmd_size:               command size in bytes
- * @cmd_address:            command address
- * @size:                   packet size in bytes
- * @packet_type:            HFI_CMD_SESSION_CVP_DFS
- * @session_id:             id value associated with a session
- * @srcbuffer_format:       buffer format of source imagesize
- * @left_plane_info:        left view buffer plane info
- * @right_plane_info:       right view buffer plane info
- * @width:                  image width
- * @height:                 image height
- * @occlusionmask_enable:   0: disable, 1: enable
- * @occlusioncost:          occlusion cost threshold
- * @occlusionbound:         occlusion bound
- * @occlusionshift:         occlusion shift
- * @maxdisparity:           max disparitymap in integer precision
- * @disparityoffset:        disparity offset
- * @medianfilter_enable:    enable median filter on disparity map
- * @occlusionfilling_enable:0: disable, 1: enable
- * @occlusionmaskdump:      0: disable, 1: enable
- * @clientdata:             client data for mapping command
- *                          and message pairs
- */
-struct msm_cvp_dfsconfig {
-	unsigned int cmd_size;
-	unsigned int cmd_address;
-	unsigned int size;
-	unsigned int packet_type;
-	unsigned int session_id;
-	unsigned int srcbuffer_format;
-	struct msm_cvp_color_plane_info left_plane_info;
-	struct msm_cvp_color_plane_info right_plane_info;
-	unsigned int width;
-	unsigned int height;
-	unsigned int occlusionmask_enable;
-	unsigned int occlusioncost;
-	unsigned int occlusionbound;
-	unsigned int occlusionshift;
-	unsigned int maxdisparity;
-	unsigned int disparityoffset;
-	unsigned int medianfilter_enable;
-	unsigned int occlusionfilling_enable;
-	unsigned int occlusionmaskdump;
-	struct msm_cvp_client_data clientdata;
-	unsigned int reserved[MAX_DFS_HFI_PARAMS];
+#define CVP_COLOR_PLANE_INFO_SIZE \
+	sizeof(struct cvp_kmd_color_plane_info)
+#define CVP_CLIENT_DATA_SIZE	sizeof(struct cvp_kmd_client_data)
+#define CVP_DFS_CONFIG_CMD_SIZE   38
+#define CVP_DFS_FRAME_CMD_SIZE 16
+#define CVP_DFS_FRAME_BUFFERS_OFFSET 8
+
+#define CVP_DME_CONFIG_CMD_SIZE   194
+#define CVP_DME_FRAME_CMD_SIZE 28
+#define CVP_DME_FRAME_BUFFERS_OFFSET 12
+#define CVP_DME_BUF_NUM	8
+
+#define CVP_PERSIST_CMD_SIZE 11
+#define CVP_PERSIST_BUFFERS_OFFSET 7
+#define CVP_PSRSIST_BUF_NUM	2
+
+struct cvp_kmd_dfs_config {
+	unsigned int cvp_dfs_config[CVP_DFS_CONFIG_CMD_SIZE];
 };
 
-/**
- * struct msm_cvp_dfsframe - dfs frame packet
- * @cmd_size:                command size in bytes
- * @cmd_address:             command address
- * @size:                    packet size in bytes
- * @packet_type:             HFI_CMD_SESSION_CVP_DFS
- * @session_id:              id value associated with a session
- * @left_buffer_index:       left buffer index
- * @right_buffer_index:      right buffer index
- * @disparitymap_buffer_idx: disparity map buffer index
- * @occlusionmask_buffer_idx:occlusion mask buffer index
- */
-struct msm_cvp_dfsframe {
-	unsigned int cmd_size;
-	unsigned int cmd_address;
-	unsigned int size;
-	unsigned int packet_type;
-	unsigned int session_id;
-	unsigned int left_buffer_index;
-	unsigned int right_buffer_index;
-	unsigned int disparitymap_buffer_idx;
-	unsigned int occlusionmask_buffer_idx;
-	struct msm_cvp_client_data clientdata;
+struct cvp_kmd_dfs_frame {
+	unsigned int frame_data[CVP_DFS_FRAME_CMD_SIZE];
 };
 
+struct cvp_kmd_dme_config {
+	unsigned int cvp_dme_config[CVP_DME_CONFIG_CMD_SIZE];
+};
+
+struct cvp_kmd_dme_frame {
+	unsigned int frame_data[CVP_DME_FRAME_CMD_SIZE];
+};
+
+struct cvp_kmd_persist_buf {
+	unsigned int persist_data[CVP_PERSIST_CMD_SIZE];
+};
+
+#define	MAX_HFI_PKT_SIZE	250
+
+struct cvp_kmd_hfi_packet {
+	unsigned int pkt_data[MAX_HFI_PKT_SIZE];
+};
+
+struct cvp_kmd_sys_property {
+	unsigned int prop_type;
+	unsigned int data;
+};
+
+struct cvp_kmd_sys_properties {
+	unsigned int prop_num;
+	struct cvp_kmd_sys_property prop_data;
+};
+
+#define MAX_HFI_FENCE_SIZE        16
+#define	MAX_HFI_FENCE_OFFSET	(MAX_HFI_PKT_SIZE-MAX_HFI_FENCE_SIZE)
+struct cvp_kmd_hfi_fence_packet {
+	unsigned int pkt_data[MAX_HFI_FENCE_OFFSET];
+	unsigned int fence_data[MAX_HFI_FENCE_SIZE];
+};
+
+
 /**
- * struct msm_cvp_arg - argument passed with VIDIOC_CVP_CMD
+ * struct cvp_kmd_arg - argument passed with VIDIOC_CVP_CMD
+ * To be deprecated
  * @type:          command type
+ * @buf_offset:    offset to buffer list in the command
+ * @buf_num:       number of buffers in the command
  * @session:       session information
  * @req_power:     power information
  * @regbuf:        buffer to be registered
  * @unregbuf:      buffer to be unregistered
  * @send_cmd:      sending generic HFI command
- * @dfsconfig:     sending DFS config command
- * @dfsframe:      sending DFS frame command
+ * @dfs_config:    sending DFS config command
+ * @dfs_frame:     sending DFS frame command
+ * @hfi_pkt:       HFI packet created by user library
+ * @sys_properties System properties read or set by user library
+ * @hfi_fence_pkt: HFI fence packet created by user library
  */
-struct msm_cvp_arg {
+struct cvp_kmd_arg {
 	unsigned int type;
-	union data_t {
-		struct msm_cvp_session_info session;
-		struct msm_cvp_request_power req_power;
-		struct msm_cvp_buffer regbuf;
-		struct msm_cvp_buffer unregbuf;
-		struct msm_cvp_send_cmd send_cmd;
-		struct msm_cvp_dfsconfig dfsconfig;
-		struct msm_cvp_dfsframe dfsframe;
+	unsigned int buf_offset;
+	unsigned int buf_num;
+	union cvp_data_t {
+		struct cvp_kmd_session_info session;
+		struct cvp_kmd_request_power req_power;
+		struct cvp_kmd_buffer regbuf;
+		struct cvp_kmd_buffer unregbuf;
+		struct cvp_kmd_send_cmd send_cmd;
+		struct cvp_kmd_dfs_config dfs_config;
+		struct cvp_kmd_dfs_frame dfs_frame;
+		struct cvp_kmd_dme_config dme_config;
+		struct cvp_kmd_dme_frame dme_frame;
+		struct cvp_kmd_persist_buf pbuf_cmd;
+		struct cvp_kmd_hfi_packet hfi_pkt;
+		struct cvp_kmd_sys_properties sys_properties;
+		struct cvp_kmd_hfi_fence_packet hfi_fence_pkt;
 	} data;
-	unsigned int reserved[12];
 };
-
 #endif
diff --git a/include/uapi/media/msm_vidc_utils.h b/include/uapi/media/msm_vidc_utils.h
index 6316cb0..14ee584 100644
--- a/include/uapi/media/msm_vidc_utils.h
+++ b/include/uapi/media/msm_vidc_utils.h
@@ -268,6 +268,11 @@
 	__u32 chroma_sample_loc_type_bottom_field;
 };
 
+#define  MSM_VIDC_EXTRADATA_HDR_HIST 0x7F100008
+struct msm_vidc_extradata_hdr_hist_payload {
+	__u32 value_count[1024];
+};
+
 #define MSM_VIDC_EXTRADATA_MPEG2_SEQDISP 0x0000000D
 struct msm_vidc_mpeg2_seqdisp_payload {
 	__u32 video_format;
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index d13fd49..6e73f02 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -78,6 +78,7 @@
 	PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
 	PVRDMA_WR_BIND_MW,
 	PVRDMA_WR_REG_SIG_MR,
+	PVRDMA_WR_ERROR,
 };
 
 enum pvrdma_wc_status {
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index 3abd327..7d09e54 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -36,12 +36,9 @@
 	struct usb_device *udev;
 	struct fb_info *info;
 	struct urb_list urbs;
-	struct kref kref;
 	char *backing_buffer;
 	int fb_count;
 	bool virtualized; /* true when physical usb device not present */
-	struct delayed_work init_framebuffer_work;
-	struct delayed_work free_framebuffer_work;
 	atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
 	atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
 	char *edid; /* null until we read edid from hw or get from sysfs */
diff --git a/init/Kconfig b/init/Kconfig
index 6e3059a..0729071 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -504,6 +504,34 @@
 
 	  Say N if unsure.
 
+config PSI
+	bool "Pressure stall information tracking"
+	help
+	  Collect metrics that indicate how overcommitted the CPU, memory,
+	  and IO capacity are in the system.
+
+	  If you say Y here, the kernel will create /proc/pressure/ with the
+	  pressure statistics files cpu, memory, and io. These will indicate
+	  the share of walltime in which some or all tasks in the system are
+	  delayed due to contention of the respective resource.
+
+	  In kernels with cgroup support, cgroups (cgroup2 only) will
+	  have cpu.pressure, memory.pressure, and io.pressure files,
+	  which aggregate pressure stalls for the grouped tasks only.
+
+	  For more details see Documentation/accounting/psi.txt.
+
+	  Say N if unsure.
+
+config PSI_DEFAULT_DISABLED
+	bool "Require boot parameter to enable pressure stall information tracking"
+	default n
+	depends on PSI
+	help
+	  If set, pressure stall information tracking will be disabled
+	  per default but can be enabled through passing psi_enable=1
+	  on the kernel commandline during boot.
+
 endmenu # "CPU/Task time and stats accounting"
 
 config CPU_ISOLATION
@@ -1159,6 +1187,7 @@
 	bool "Dead code and data elimination (EXPERIMENTAL)"
 	depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
 	depends on EXPERT
+	depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
 	depends on $(cc-option,-ffunction-sections -fdata-sections)
 	depends on $(ld-option,--gc-sections)
 	help
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 3f5bf1a..474525e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -52,6 +52,7 @@
 #define DST	regs[insn->dst_reg]
 #define SRC	regs[insn->src_reg]
 #define FP	regs[BPF_REG_FP]
+#define AX	regs[BPF_REG_AX]
 #define ARG1	regs[BPF_REG_ARG1]
 #define CTX	regs[BPF_REG_CTX]
 #define IMM	insn->imm
@@ -642,6 +643,26 @@
 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 
+	/* Constraints on AX register:
+	 *
+	 * AX register is inaccessible from user space. It is mapped in
+	 * all JITs, and used here for constant blinding rewrites. It is
+	 * typically "stateless" meaning its contents are only valid within
+	 * the executed instruction, but not across several instructions.
+	 * There are a few exceptions however which are further detailed
+	 * below.
+	 *
+	 * Constant blinding is only used by JITs, not in the interpreter.
+	 * The interpreter uses AX in some occasions as a local temporary
+	 * register e.g. in DIV or MOD instructions.
+	 *
+	 * In restricted circumstances, the verifier can also use the AX
+	 * register for rewrites as long as they do not interfere with
+	 * the above cases!
+	 */
+	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
+		goto out;
+
 	if (from->imm == 0 &&
 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
@@ -971,7 +992,6 @@
  */
 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
 {
-	u64 tmp;
 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
 	static const void *jumptable[256] = {
@@ -1045,36 +1065,36 @@
 		(*(s64 *) &DST) >>= IMM;
 		CONT;
 	ALU64_MOD_X:
-		div64_u64_rem(DST, SRC, &tmp);
-		DST = tmp;
+		div64_u64_rem(DST, SRC, &AX);
+		DST = AX;
 		CONT;
 	ALU_MOD_X:
-		tmp = (u32) DST;
-		DST = do_div(tmp, (u32) SRC);
+		AX = (u32) DST;
+		DST = do_div(AX, (u32) SRC);
 		CONT;
 	ALU64_MOD_K:
-		div64_u64_rem(DST, IMM, &tmp);
-		DST = tmp;
+		div64_u64_rem(DST, IMM, &AX);
+		DST = AX;
 		CONT;
 	ALU_MOD_K:
-		tmp = (u32) DST;
-		DST = do_div(tmp, (u32) IMM);
+		AX = (u32) DST;
+		DST = do_div(AX, (u32) IMM);
 		CONT;
 	ALU64_DIV_X:
 		DST = div64_u64(DST, SRC);
 		CONT;
 	ALU_DIV_X:
-		tmp = (u32) DST;
-		do_div(tmp, (u32) SRC);
-		DST = (u32) tmp;
+		AX = (u32) DST;
+		do_div(AX, (u32) SRC);
+		DST = (u32) AX;
 		CONT;
 	ALU64_DIV_K:
 		DST = div64_u64(DST, IMM);
 		CONT;
 	ALU_DIV_K:
-		tmp = (u32) DST;
-		do_div(tmp, (u32) IMM);
-		DST = (u32) tmp;
+		AX = (u32) DST;
+		do_div(AX, (u32) IMM);
+		DST = (u32) AX;
 		CONT;
 	ALU_END_TO_BE:
 		switch (IMM) {
@@ -1330,7 +1350,7 @@
 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
 { \
 	u64 stack[stack_size / sizeof(u64)]; \
-	u64 regs[MAX_BPF_REG]; \
+	u64 regs[MAX_BPF_EXT_REG]; \
 \
 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
 	ARG1 = (u64) (unsigned long) ctx; \
@@ -1343,7 +1363,7 @@
 				      const struct bpf_insn *insn) \
 { \
 	u64 stack[stack_size / sizeof(u64)]; \
-	u64 regs[MAX_BPF_REG]; \
+	u64 regs[MAX_BPF_EXT_REG]; \
 \
 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
 	BPF_R1 = r1; \
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 03cc59e..cebadd6 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -677,7 +677,7 @@
 	}
 
 	if (htab_is_prealloc(htab)) {
-		pcpu_freelist_push(&htab->freelist, &l->fnode);
+		__pcpu_freelist_push(&htab->freelist, &l->fnode);
 	} else {
 		atomic_dec(&htab->count);
 		l->htab = htab;
@@ -739,7 +739,7 @@
 		} else {
 			struct pcpu_freelist_node *l;
 
-			l = pcpu_freelist_pop(&htab->freelist);
+			l = __pcpu_freelist_pop(&htab->freelist);
 			if (!l)
 				return ERR_PTR(-E2BIG);
 			l_new = container_of(l, struct htab_elem, fnode);
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 3bfbf44..9670ee5 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -12,6 +12,7 @@
 struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
 {
 	struct bpf_map *inner_map, *inner_map_meta;
+	u32 inner_map_meta_size;
 	struct fd f;
 
 	f = fdget(inner_map_ufd);
@@ -35,7 +36,12 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
+	inner_map_meta_size = sizeof(*inner_map_meta);
+	/* In some cases verifier needs to access beyond just base map. */
+	if (inner_map->ops == &array_map_ops)
+		inner_map_meta_size = sizeof(struct bpf_array);
+
+	inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
 	if (!inner_map_meta) {
 		fdput(f);
 		return ERR_PTR(-ENOMEM);
@@ -45,9 +51,16 @@
 	inner_map_meta->key_size = inner_map->key_size;
 	inner_map_meta->value_size = inner_map->value_size;
 	inner_map_meta->map_flags = inner_map->map_flags;
-	inner_map_meta->ops = inner_map->ops;
 	inner_map_meta->max_entries = inner_map->max_entries;
 
+	/* Misc members not needed in bpf_map_meta_equal() check. */
+	inner_map_meta->ops = inner_map->ops;
+	if (inner_map->ops == &array_map_ops) {
+		inner_map_meta->unpriv_array = inner_map->unpriv_array;
+		container_of(inner_map_meta, struct bpf_array, map)->index_mask =
+		     container_of(inner_map, struct bpf_array, map)->index_mask;
+	}
+
 	fdput(f);
 	return inner_map_meta;
 }
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 673fa6f..0c1b4ba 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -28,8 +28,8 @@
 	free_percpu(s->freelist);
 }
 
-static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
-					struct pcpu_freelist_node *node)
+static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
+					 struct pcpu_freelist_node *node)
 {
 	raw_spin_lock(&head->lock);
 	node->next = head->first;
@@ -37,12 +37,22 @@
 	raw_spin_unlock(&head->lock);
 }
 
-void pcpu_freelist_push(struct pcpu_freelist *s,
+void __pcpu_freelist_push(struct pcpu_freelist *s,
 			struct pcpu_freelist_node *node)
 {
 	struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
 
-	__pcpu_freelist_push(head, node);
+	___pcpu_freelist_push(head, node);
+}
+
+void pcpu_freelist_push(struct pcpu_freelist *s,
+			struct pcpu_freelist_node *node)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	__pcpu_freelist_push(s, node);
+	local_irq_restore(flags);
 }
 
 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
@@ -63,7 +73,7 @@
 	for_each_possible_cpu(cpu) {
 again:
 		head = per_cpu_ptr(s->freelist, cpu);
-		__pcpu_freelist_push(head, buf);
+		___pcpu_freelist_push(head, buf);
 		i++;
 		buf += elem_size;
 		if (i == nr_elems)
@@ -74,14 +84,12 @@
 	local_irq_restore(flags);
 }
 
-struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
 {
 	struct pcpu_freelist_head *head;
 	struct pcpu_freelist_node *node;
-	unsigned long flags;
 	int orig_cpu, cpu;
 
-	local_irq_save(flags);
 	orig_cpu = cpu = raw_smp_processor_id();
 	while (1) {
 		head = per_cpu_ptr(s->freelist, cpu);
@@ -89,16 +97,25 @@
 		node = head->first;
 		if (node) {
 			head->first = node->next;
-			raw_spin_unlock_irqrestore(&head->lock, flags);
+			raw_spin_unlock(&head->lock);
 			return node;
 		}
 		raw_spin_unlock(&head->lock);
 		cpu = cpumask_next(cpu, cpu_possible_mask);
 		if (cpu >= nr_cpu_ids)
 			cpu = 0;
-		if (cpu == orig_cpu) {
-			local_irq_restore(flags);
+		if (cpu == orig_cpu)
 			return NULL;
-		}
 	}
 }
+
+struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+{
+	struct pcpu_freelist_node *ret;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	ret = __pcpu_freelist_pop(s);
+	local_irq_restore(flags);
+	return ret;
+}
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
index 3049aae..c396011 100644
--- a/kernel/bpf/percpu_freelist.h
+++ b/kernel/bpf/percpu_freelist.h
@@ -22,8 +22,12 @@
 	struct pcpu_freelist_node *next;
 };
 
+/* pcpu_freelist_* do spin_lock_irqsave. */
 void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
+/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
+void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
 			    u32 nr_elems);
 int pcpu_freelist_init(struct pcpu_freelist *);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 8061a43..6a32933 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -180,11 +180,14 @@
 
 		if (nhdr->n_type == BPF_BUILD_ID &&
 		    nhdr->n_namesz == sizeof("GNU") &&
-		    nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
+		    nhdr->n_descsz > 0 &&
+		    nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
 			memcpy(build_id,
 			       note_start + note_offs +
 			       ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
-			       BPF_BUILD_ID_SIZE);
+			       nhdr->n_descsz);
+			memset(build_id + nhdr->n_descsz, 0,
+			       BPF_BUILD_ID_SIZE - nhdr->n_descsz);
 			return 0;
 		}
 		new_offs = note_offs + sizeof(Elf32_Nhdr) +
@@ -260,7 +263,7 @@
 		return -EFAULT;	/* page not mapped */
 
 	ret = -EINVAL;
-	page_addr = page_address(page);
+	page_addr = kmap_atomic(page);
 	ehdr = (Elf32_Ehdr *)page_addr;
 
 	/* compare magic x7f "ELF" */
@@ -276,6 +279,7 @@
 	else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
 		ret = stack_map_get_build_id_64(page_addr, build_id);
 out:
+	kunmap_atomic(page_addr);
 	put_page(page);
 	return ret;
 }
@@ -310,6 +314,7 @@
 		for (i = 0; i < trace_nr; i++) {
 			id_offs[i].status = BPF_STACK_BUILD_ID_IP;
 			id_offs[i].ip = ips[i];
+			memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
 		}
 		return;
 	}
@@ -320,6 +325,7 @@
 			/* per entry fall back to ips */
 			id_offs[i].status = BPF_STACK_BUILD_ID_IP;
 			id_offs[i].ip = ips[i];
+			memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
 			continue;
 		}
 		id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 382c09d..cc40b8b 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -701,8 +701,13 @@
 
 	if (bpf_map_is_dev_bound(map)) {
 		err = bpf_map_offload_lookup_elem(map, key, value);
-	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
-		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+		goto done;
+	}
+
+	preempt_disable();
+	this_cpu_inc(bpf_prog_active);
+	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 		err = bpf_percpu_hash_copy(map, key, value);
 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 		err = bpf_percpu_array_copy(map, key, value);
@@ -722,7 +727,10 @@
 		rcu_read_unlock();
 		err = ptr ? 0 : -ENOENT;
 	}
+	this_cpu_dec(bpf_prog_active);
+	preempt_enable();
 
+done:
 	if (err)
 		goto free_value;
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2954e4b..bcb42aa 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -156,6 +156,7 @@
 
 #define BPF_COMPLEXITY_LIMIT_INSNS	131072
 #define BPF_COMPLEXITY_LIMIT_STACK	1024
+#define BPF_COMPLEXITY_LIMIT_STATES	64
 
 #define BPF_MAP_PTR_UNPRIV	1UL
 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
@@ -465,6 +466,7 @@
 		free_func_state(dst_state->frame[i]);
 		dst_state->frame[i] = NULL;
 	}
+	dst_state->speculative = src->speculative;
 	dst_state->curframe = src->curframe;
 	dst_state->parent = src->parent;
 	for (i = 0; i <= src->curframe; i++) {
@@ -510,7 +512,8 @@
 }
 
 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
-					     int insn_idx, int prev_insn_idx)
+					     int insn_idx, int prev_insn_idx,
+					     bool speculative)
 {
 	struct bpf_verifier_state *cur = env->cur_state;
 	struct bpf_verifier_stack_elem *elem;
@@ -528,6 +531,7 @@
 	err = copy_verifier_state(&elem->st, cur);
 	if (err)
 		goto err;
+	elem->st.speculative |= speculative;
 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
 		verbose(env, "BPF program is too complex\n");
 		goto err;
@@ -1237,6 +1241,31 @@
 	}
 }
 
+static int check_stack_access(struct bpf_verifier_env *env,
+			      const struct bpf_reg_state *reg,
+			      int off, int size)
+{
+	/* Stack accesses must be at a fixed offset, so that we
+	 * can determine what type of data were returned. See
+	 * check_stack_read().
+	 */
+	if (!tnum_is_const(reg->var_off)) {
+		char tn_buf[48];
+
+		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+		verbose(env, "variable stack access var_off=%s off=%d size=%d",
+			tn_buf, off, size);
+		return -EACCES;
+	}
+
+	if (off >= 0 || off < -MAX_BPF_STACK) {
+		verbose(env, "invalid stack off=%d size=%d\n", off, size);
+		return -EACCES;
+	}
+
+	return 0;
+}
+
 /* check read/write into map element returned by bpf_map_lookup_elem() */
 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
 			      int size, bool zero_size_allowed)
@@ -1268,13 +1297,17 @@
 	 */
 	if (env->log.level)
 		print_verifier_state(env, state);
+
 	/* The minimum value is only important with signed
 	 * comparisons where we can't assume the floor of a
 	 * value is 0.  If we are using signed variables for our
 	 * index'es we need to make sure that whatever we use
 	 * will have a set floor within our range.
 	 */
-	if (reg->smin_value < 0) {
+	if (reg->smin_value < 0 &&
+	    (reg->smin_value == S64_MIN ||
+	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
+	      reg->smin_value + off < 0)) {
 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
 			regno);
 		return -EACCES;
@@ -1735,24 +1768,10 @@
 		}
 
 	} else if (reg->type == PTR_TO_STACK) {
-		/* stack accesses must be at a fixed offset, so that we can
-		 * determine what type of data were returned.
-		 * See check_stack_read().
-		 */
-		if (!tnum_is_const(reg->var_off)) {
-			char tn_buf[48];
-
-			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
-			verbose(env, "variable stack access var_off=%s off=%d size=%d",
-				tn_buf, off, size);
-			return -EACCES;
-		}
 		off += reg->var_off.value;
-		if (off >= 0 || off < -MAX_BPF_STACK) {
-			verbose(env, "invalid stack off=%d size=%d\n", off,
-				size);
-			return -EACCES;
-		}
+		err = check_stack_access(env, reg, off, size);
+		if (err)
+			return err;
 
 		state = func(env, reg);
 		err = update_stack_depth(env, state, off);
@@ -2682,6 +2701,125 @@
 	return true;
 }
 
+static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
+{
+	return &env->insn_aux_data[env->insn_idx];
+}
+
+static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+			      u32 *ptr_limit, u8 opcode, bool off_is_neg)
+{
+	bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+			    (opcode == BPF_SUB && !off_is_neg);
+	u32 off;
+
+	switch (ptr_reg->type) {
+	case PTR_TO_STACK:
+		off = ptr_reg->off + ptr_reg->var_off.value;
+		if (mask_to_left)
+			*ptr_limit = MAX_BPF_STACK + off;
+		else
+			*ptr_limit = -off;
+		return 0;
+	case PTR_TO_MAP_VALUE:
+		if (mask_to_left) {
+			*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+		} else {
+			off = ptr_reg->smin_value + ptr_reg->off;
+			*ptr_limit = ptr_reg->map_ptr->value_size - off;
+		}
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
+				    const struct bpf_insn *insn)
+{
+	return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
+}
+
+static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
+				       u32 alu_state, u32 alu_limit)
+{
+	/* If we arrived here from different branches with different
+	 * state or limits to sanitize, then this won't work.
+	 */
+	if (aux->alu_state &&
+	    (aux->alu_state != alu_state ||
+	     aux->alu_limit != alu_limit))
+		return -EACCES;
+
+	/* Corresponding fixup done in fixup_bpf_calls(). */
+	aux->alu_state = alu_state;
+	aux->alu_limit = alu_limit;
+	return 0;
+}
+
+static int sanitize_val_alu(struct bpf_verifier_env *env,
+			    struct bpf_insn *insn)
+{
+	struct bpf_insn_aux_data *aux = cur_aux(env);
+
+	if (can_skip_alu_sanitation(env, insn))
+		return 0;
+
+	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+}
+
+static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+			    struct bpf_insn *insn,
+			    const struct bpf_reg_state *ptr_reg,
+			    struct bpf_reg_state *dst_reg,
+			    bool off_is_neg)
+{
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_insn_aux_data *aux = cur_aux(env);
+	bool ptr_is_dst_reg = ptr_reg == dst_reg;
+	u8 opcode = BPF_OP(insn->code);
+	u32 alu_state, alu_limit;
+	struct bpf_reg_state tmp;
+	bool ret;
+
+	if (can_skip_alu_sanitation(env, insn))
+		return 0;
+
+	/* We already marked aux for masking from non-speculative
+	 * paths, thus we got here in the first place. We only care
+	 * to explore bad access from here.
+	 */
+	if (vstate->speculative)
+		goto do_sim;
+
+	alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+	alu_state |= ptr_is_dst_reg ?
+		     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+
+	if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
+		return 0;
+	if (update_alu_sanitation_state(aux, alu_state, alu_limit))
+		return -EACCES;
+do_sim:
+	/* Simulate and find potential out-of-bounds access under
+	 * speculative execution from truncation as a result of
+	 * masking when off was not within expected range. If off
+	 * sits in dst, then we temporarily need to move ptr there
+	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
+	 * for cases where we use K-based arithmetic in one direction
+	 * and truncated reg-based in the other in order to explore
+	 * bad access.
+	 */
+	if (!ptr_is_dst_reg) {
+		tmp = *dst_reg;
+		*dst_reg = *ptr_reg;
+	}
+	ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+	if (!ptr_is_dst_reg)
+		*dst_reg = tmp;
+	return !ret ? -EFAULT : 0;
+}
+
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
  * If we return -EACCES, caller may want to try again treating pointer as a
@@ -2700,8 +2838,9 @@
 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
+	u32 dst = insn->dst_reg, src = insn->src_reg;
 	u8 opcode = BPF_OP(insn->code);
-	u32 dst = insn->dst_reg;
+	int ret;
 
 	dst_reg = &regs[dst];
 
@@ -2737,6 +2876,12 @@
 			dst);
 		return -EACCES;
 	}
+	if (ptr_reg->type == PTR_TO_MAP_VALUE &&
+	    !env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
+		verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
+			off_reg == dst_reg ? dst : src);
+		return -EACCES;
+	}
 
 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
 	 * The id may be overwritten later if we create a new variable offset.
@@ -2750,6 +2895,11 @@
 
 	switch (opcode) {
 	case BPF_ADD:
+		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
+		if (ret < 0) {
+			verbose(env, "R%d tried to add from different maps or paths\n", dst);
+			return ret;
+		}
 		/* We can take a fixed offset as long as it doesn't overflow
 		 * the s32 'off' field
 		 */
@@ -2800,6 +2950,11 @@
 		}
 		break;
 	case BPF_SUB:
+		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
+		if (ret < 0) {
+			verbose(env, "R%d tried to sub from different maps or paths\n", dst);
+			return ret;
+		}
 		if (dst_reg == off_reg) {
 			/* scalar -= pointer.  Creates an unknown scalar */
 			verbose(env, "R%d tried to subtract pointer from scalar\n",
@@ -2879,6 +3034,25 @@
 	__update_reg_bounds(dst_reg);
 	__reg_deduce_bounds(dst_reg);
 	__reg_bound_offset(dst_reg);
+
+	/* For unprivileged we require that resulting offset must be in bounds
+	 * in order to be able to sanitize access later on.
+	 */
+	if (!env->allow_ptr_leaks) {
+		if (dst_reg->type == PTR_TO_MAP_VALUE &&
+		    check_map_access(env, dst, dst_reg->off, 1, false)) {
+			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
+				"prohibited for !root\n", dst);
+			return -EACCES;
+		} else if (dst_reg->type == PTR_TO_STACK &&
+			   check_stack_access(env, dst_reg, dst_reg->off +
+					      dst_reg->var_off.value, 1)) {
+			verbose(env, "R%d stack pointer arithmetic goes out of range, "
+				"prohibited for !root\n", dst);
+			return -EACCES;
+		}
+	}
+
 	return 0;
 }
 
@@ -2897,6 +3071,8 @@
 	s64 smin_val, smax_val;
 	u64 umin_val, umax_val;
 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+	u32 dst = insn->dst_reg;
+	int ret;
 
 	if (insn_bitness == 32) {
 		/* Relevant for 32-bit RSH: Information can propagate towards
@@ -2931,6 +3107,11 @@
 
 	switch (opcode) {
 	case BPF_ADD:
+		ret = sanitize_val_alu(env, insn);
+		if (ret < 0) {
+			verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
+			return ret;
+		}
 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
 			dst_reg->smin_value = S64_MIN;
@@ -2950,6 +3131,11 @@
 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
 		break;
 	case BPF_SUB:
+		ret = sanitize_val_alu(env, insn);
+		if (ret < 0) {
+			verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
+			return ret;
+		}
 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
 			/* Overflow possible, we know nothing */
@@ -3285,12 +3471,15 @@
 			return err;
 
 		if (BPF_SRC(insn->code) == BPF_X) {
+			struct bpf_reg_state *src_reg = regs + insn->src_reg;
+			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
+
 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
 				/* case: R1 = R2
 				 * copy register state to dest reg
 				 */
-				regs[insn->dst_reg] = regs[insn->src_reg];
-				regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
+				*dst_reg = *src_reg;
+				dst_reg->live |= REG_LIVE_WRITTEN;
 			} else {
 				/* R1 = (u32) R2 */
 				if (is_pointer_value(env, insn->src_reg)) {
@@ -3298,9 +3487,14 @@
 						"R%d partial copy of pointer\n",
 						insn->src_reg);
 					return -EACCES;
+				} else if (src_reg->type == SCALAR_VALUE) {
+					*dst_reg = *src_reg;
+					dst_reg->live |= REG_LIVE_WRITTEN;
+				} else {
+					mark_reg_unknown(env, regs,
+							 insn->dst_reg);
 				}
-				mark_reg_unknown(env, regs, insn->dst_reg);
-				coerce_reg_to_size(&regs[insn->dst_reg], 4);
+				coerce_reg_to_size(dst_reg, 4);
 			}
 		} else {
 			/* case: R = imm
@@ -3467,6 +3661,79 @@
 	}
 }
 
+/* compute branch direction of the expression "if (reg opcode val) goto target;"
+ * and return:
+ *  1 - branch will be taken and "goto target" will be executed
+ *  0 - branch will not be taken and fall-through to next insn
+ * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
+ */
+static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
+{
+	if (__is_pointer_value(false, reg))
+		return -1;
+
+	switch (opcode) {
+	case BPF_JEQ:
+		if (tnum_is_const(reg->var_off))
+			return !!tnum_equals_const(reg->var_off, val);
+		break;
+	case BPF_JNE:
+		if (tnum_is_const(reg->var_off))
+			return !tnum_equals_const(reg->var_off, val);
+		break;
+	case BPF_JGT:
+		if (reg->umin_value > val)
+			return 1;
+		else if (reg->umax_value <= val)
+			return 0;
+		break;
+	case BPF_JSGT:
+		if (reg->smin_value > (s64)val)
+			return 1;
+		else if (reg->smax_value < (s64)val)
+			return 0;
+		break;
+	case BPF_JLT:
+		if (reg->umax_value < val)
+			return 1;
+		else if (reg->umin_value >= val)
+			return 0;
+		break;
+	case BPF_JSLT:
+		if (reg->smax_value < (s64)val)
+			return 1;
+		else if (reg->smin_value >= (s64)val)
+			return 0;
+		break;
+	case BPF_JGE:
+		if (reg->umin_value >= val)
+			return 1;
+		else if (reg->umax_value < val)
+			return 0;
+		break;
+	case BPF_JSGE:
+		if (reg->smin_value >= (s64)val)
+			return 1;
+		else if (reg->smax_value < (s64)val)
+			return 0;
+		break;
+	case BPF_JLE:
+		if (reg->umax_value <= val)
+			return 1;
+		else if (reg->umin_value > val)
+			return 0;
+		break;
+	case BPF_JSLE:
+		if (reg->smax_value <= (s64)val)
+			return 1;
+		else if (reg->smin_value > (s64)val)
+			return 0;
+		break;
+	}
+
+	return -1;
+}
+
 /* Adjusts the register min/max values in the case that the dst_reg is the
  * variable register that we are working on, and src_reg is a constant or we're
  * simply doing a BPF_K check.
@@ -3860,28 +4127,23 @@
 
 	dst_reg = &regs[insn->dst_reg];
 
-	/* detect if R == 0 where R was initialized to zero earlier */
-	if (BPF_SRC(insn->code) == BPF_K &&
-	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
-	    dst_reg->type == SCALAR_VALUE &&
-	    tnum_is_const(dst_reg->var_off)) {
-		if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
-		    (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
-			/* if (imm == imm) goto pc+off;
-			 * only follow the goto, ignore fall-through
-			 */
+	if (BPF_SRC(insn->code) == BPF_K) {
+		int pred = is_branch_taken(dst_reg, insn->imm, opcode);
+
+		if (pred == 1) {
+			 /* only follow the goto, ignore fall-through */
 			*insn_idx += insn->off;
 			return 0;
-		} else {
-			/* if (imm != imm) goto pc+off;
-			 * only follow fall-through branch, since
+		} else if (pred == 0) {
+			/* only follow fall-through branch, since
 			 * that's where the program will go
 			 */
 			return 0;
 		}
 	}
 
-	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
+	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
+				  false);
 	if (!other_branch)
 		return -EFAULT;
 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
@@ -4596,6 +4858,12 @@
 	if (old->curframe != cur->curframe)
 		return false;
 
+	/* Verification state from speculative execution simulation
+	 * must never prune a non-speculative execution one.
+	 */
+	if (old->speculative && !cur->speculative)
+		return false;
+
 	/* for states to be equal callsites have to be the same
 	 * and all frame states need to be equivalent
 	 */
@@ -4660,7 +4928,7 @@
 	struct bpf_verifier_state_list *new_sl;
 	struct bpf_verifier_state_list *sl;
 	struct bpf_verifier_state *cur = env->cur_state;
-	int i, j, err;
+	int i, j, err, states_cnt = 0;
 
 	sl = env->explored_states[insn_idx];
 	if (!sl)
@@ -4687,8 +4955,12 @@
 			return 1;
 		}
 		sl = sl->next;
+		states_cnt++;
 	}
 
+	if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
+		return 0;
+
 	/* there were no equivalent states, remember current one.
 	 * technically the current state is not proven to be safe yet,
 	 * but it will either reach outer most bpf_exit (which means it's safe)
@@ -4736,7 +5008,6 @@
 	struct bpf_insn *insns = env->prog->insnsi;
 	struct bpf_reg_state *regs;
 	int insn_cnt = env->prog->len, i;
-	int insn_idx, prev_insn_idx = 0;
 	int insn_processed = 0;
 	bool do_print_state = false;
 
@@ -4744,7 +5015,7 @@
 	if (!state)
 		return -ENOMEM;
 	state->curframe = 0;
-	state->parent = NULL;
+	state->speculative = false;
 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
 	if (!state->frame[0]) {
 		kfree(state);
@@ -4755,19 +5026,19 @@
 			BPF_MAIN_FUNC /* callsite */,
 			0 /* frameno */,
 			0 /* subprogno, zero == main subprog */);
-	insn_idx = 0;
+
 	for (;;) {
 		struct bpf_insn *insn;
 		u8 class;
 		int err;
 
-		if (insn_idx >= insn_cnt) {
+		if (env->insn_idx >= insn_cnt) {
 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
-				insn_idx, insn_cnt);
+				env->insn_idx, insn_cnt);
 			return -EFAULT;
 		}
 
-		insn = &insns[insn_idx];
+		insn = &insns[env->insn_idx];
 		class = BPF_CLASS(insn->code);
 
 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
@@ -4777,17 +5048,19 @@
 			return -E2BIG;
 		}
 
-		err = is_state_visited(env, insn_idx);
+		err = is_state_visited(env, env->insn_idx);
 		if (err < 0)
 			return err;
 		if (err == 1) {
 			/* found equivalent state, can prune the search */
 			if (env->log.level) {
 				if (do_print_state)
-					verbose(env, "\nfrom %d to %d: safe\n",
-						prev_insn_idx, insn_idx);
+					verbose(env, "\nfrom %d to %d%s: safe\n",
+						env->prev_insn_idx, env->insn_idx,
+						env->cur_state->speculative ?
+						" (speculative execution)" : "");
 				else
-					verbose(env, "%d: safe\n", insn_idx);
+					verbose(env, "%d: safe\n", env->insn_idx);
 			}
 			goto process_bpf_exit;
 		}
@@ -4800,10 +5073,12 @@
 
 		if (env->log.level > 1 || (env->log.level && do_print_state)) {
 			if (env->log.level > 1)
-				verbose(env, "%d:", insn_idx);
+				verbose(env, "%d:", env->insn_idx);
 			else
-				verbose(env, "\nfrom %d to %d:",
-					prev_insn_idx, insn_idx);
+				verbose(env, "\nfrom %d to %d%s:",
+					env->prev_insn_idx, env->insn_idx,
+					env->cur_state->speculative ?
+					" (speculative execution)" : "");
 			print_verifier_state(env, state->frame[state->curframe]);
 			do_print_state = false;
 		}
@@ -4814,19 +5089,20 @@
 				.private_data	= env,
 			};
 
-			verbose(env, "%d: ", insn_idx);
+			verbose(env, "%d: ", env->insn_idx);
 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
 		}
 
 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
-			err = bpf_prog_offload_verify_insn(env, insn_idx,
-							   prev_insn_idx);
+			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
+							   env->prev_insn_idx);
 			if (err)
 				return err;
 		}
 
 		regs = cur_regs(env);
-		env->insn_aux_data[insn_idx].seen = true;
+		env->insn_aux_data[env->insn_idx].seen = true;
+
 		if (class == BPF_ALU || class == BPF_ALU64) {
 			err = check_alu_op(env, insn);
 			if (err)
@@ -4851,13 +5127,13 @@
 			/* check that memory (src_reg + off) is readable,
 			 * the state of dst_reg will be updated by this func
 			 */
-			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
-					       BPF_SIZE(insn->code), BPF_READ,
-					       insn->dst_reg, false);
+			err = check_mem_access(env, env->insn_idx, insn->src_reg,
+					       insn->off, BPF_SIZE(insn->code),
+					       BPF_READ, insn->dst_reg, false);
 			if (err)
 				return err;
 
-			prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
+			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
 
 			if (*prev_src_type == NOT_INIT) {
 				/* saw a valid insn
@@ -4884,10 +5160,10 @@
 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
 
 			if (BPF_MODE(insn->code) == BPF_XADD) {
-				err = check_xadd(env, insn_idx, insn);
+				err = check_xadd(env, env->insn_idx, insn);
 				if (err)
 					return err;
-				insn_idx++;
+				env->insn_idx++;
 				continue;
 			}
 
@@ -4903,13 +5179,13 @@
 			dst_reg_type = regs[insn->dst_reg].type;
 
 			/* check that memory (dst_reg + off) is writeable */
-			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
-					       BPF_SIZE(insn->code), BPF_WRITE,
-					       insn->src_reg, false);
+			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
+					       insn->off, BPF_SIZE(insn->code),
+					       BPF_WRITE, insn->src_reg, false);
 			if (err)
 				return err;
 
-			prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
+			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
 
 			if (*prev_dst_type == NOT_INIT) {
 				*prev_dst_type = dst_reg_type;
@@ -4938,9 +5214,9 @@
 			}
 
 			/* check that memory (dst_reg + off) is writeable */
-			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
-					       BPF_SIZE(insn->code), BPF_WRITE,
-					       -1, false);
+			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
+					       insn->off, BPF_SIZE(insn->code),
+					       BPF_WRITE, -1, false);
 			if (err)
 				return err;
 
@@ -4958,9 +5234,9 @@
 				}
 
 				if (insn->src_reg == BPF_PSEUDO_CALL)
-					err = check_func_call(env, insn, &insn_idx);
+					err = check_func_call(env, insn, &env->insn_idx);
 				else
-					err = check_helper_call(env, insn->imm, insn_idx);
+					err = check_helper_call(env, insn->imm, env->insn_idx);
 				if (err)
 					return err;
 
@@ -4973,7 +5249,7 @@
 					return -EINVAL;
 				}
 
-				insn_idx += insn->off + 1;
+				env->insn_idx += insn->off + 1;
 				continue;
 
 			} else if (opcode == BPF_EXIT) {
@@ -4987,8 +5263,8 @@
 
 				if (state->curframe) {
 					/* exit from nested function */
-					prev_insn_idx = insn_idx;
-					err = prepare_func_exit(env, &insn_idx);
+					env->prev_insn_idx = env->insn_idx;
+					err = prepare_func_exit(env, &env->insn_idx);
 					if (err)
 						return err;
 					do_print_state = true;
@@ -5014,7 +5290,8 @@
 				if (err)
 					return err;
 process_bpf_exit:
-				err = pop_stack(env, &prev_insn_idx, &insn_idx);
+				err = pop_stack(env, &env->prev_insn_idx,
+						&env->insn_idx);
 				if (err < 0) {
 					if (err != -ENOENT)
 						return err;
@@ -5024,7 +5301,7 @@
 					continue;
 				}
 			} else {
-				err = check_cond_jmp_op(env, insn, &insn_idx);
+				err = check_cond_jmp_op(env, insn, &env->insn_idx);
 				if (err)
 					return err;
 			}
@@ -5041,8 +5318,8 @@
 				if (err)
 					return err;
 
-				insn_idx++;
-				env->insn_aux_data[insn_idx].seen = true;
+				env->insn_idx++;
+				env->insn_aux_data[env->insn_idx].seen = true;
 			} else {
 				verbose(env, "invalid BPF_LD mode\n");
 				return -EINVAL;
@@ -5052,7 +5329,7 @@
 			return -EINVAL;
 		}
 
-		insn_idx++;
+		env->insn_idx++;
 	}
 
 	verbose(env, "processed %d insns (limit %d), stack depth ",
@@ -5341,10 +5618,10 @@
 	int i, cnt, size, ctx_field_size, delta = 0;
 	const int insn_cnt = env->prog->len;
 	struct bpf_insn insn_buf[16], *insn;
+	u32 target_size, size_default, off;
 	struct bpf_prog *new_prog;
 	enum bpf_access_type type;
 	bool is_narrower_load;
-	u32 target_size;
 
 	if (ops->gen_prologue) {
 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
@@ -5421,9 +5698,9 @@
 		 * we will apply proper mask to the result.
 		 */
 		is_narrower_load = size < ctx_field_size;
+		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
+		off = insn->off;
 		if (is_narrower_load) {
-			u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
-			u32 off = insn->off;
 			u8 size_code;
 
 			if (type == BPF_WRITE) {
@@ -5451,12 +5728,23 @@
 		}
 
 		if (is_narrower_load && size < target_size) {
-			if (ctx_field_size <= 4)
+			u8 shift = (off & (size_default - 1)) * 8;
+
+			if (ctx_field_size <= 4) {
+				if (shift)
+					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
+									insn->dst_reg,
+									shift);
 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
 								(1 << size * 8) - 1);
-			else
+			} else {
+				if (shift)
+					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
+									insn->dst_reg,
+									shift);
 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
 								(1 << size * 8) - 1);
+			}
 		}
 
 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
@@ -5737,6 +6025,58 @@
 			continue;
 		}
 
+		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
+		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
+			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
+			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
+			struct bpf_insn insn_buf[16];
+			struct bpf_insn *patch = &insn_buf[0];
+			bool issrc, isneg;
+			u32 off_reg;
+
+			aux = &env->insn_aux_data[i + delta];
+			if (!aux->alu_state ||
+			    aux->alu_state == BPF_ALU_NON_POINTER)
+				continue;
+
+			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
+			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
+				BPF_ALU_SANITIZE_SRC;
+
+			off_reg = issrc ? insn->src_reg : insn->dst_reg;
+			if (isneg)
+				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+			*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
+			*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
+			*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
+			*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+			*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
+			if (issrc) {
+				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
+							 off_reg);
+				insn->src_reg = BPF_REG_AX;
+			} else {
+				*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
+							 BPF_REG_AX);
+			}
+			if (isneg)
+				insn->code = insn->code == code_add ?
+					     code_sub : code_add;
+			*patch++ = *insn;
+			if (issrc && isneg)
+				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+			cnt = patch - insn_buf;
+
+			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+			if (!new_prog)
+				return -ENOMEM;
+
+			delta    += cnt - 1;
+			env->prog = prog = new_prog;
+			insn      = new_prog->insnsi + i + delta;
+			continue;
+		}
+
 		if (insn->code != (BPF_JMP | BPF_CALL))
 			continue;
 		if (insn->src_reg == BPF_PSEUDO_CALL)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 4a3dae2..eba5cab 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -55,6 +55,7 @@
 #include <linux/nsproxy.h>
 #include <linux/file.h>
 #include <linux/sched/cputime.h>
+#include <linux/psi.h>
 #include <net/sock.h>
 
 #define CREATE_TRACE_POINTS
@@ -832,7 +833,7 @@
 		 */
 		WARN_ON_ONCE(task->flags & PF_EXITING);
 
-		rcu_assign_pointer(task->cgroups, to_cset);
+		cgroup_move_task(task, to_cset);
 		list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
 							     &to_cset->tasks);
 	}
@@ -1743,7 +1744,7 @@
 
 	*root_flags = 0;
 
-	if (!data)
+	if (!data || *data == '\0')
 		return 0;
 
 	while ((token = strsep(&data, ",")) != NULL) {
@@ -3416,6 +3417,79 @@
 	return ret;
 }
 
+#ifdef CONFIG_PSI
+static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
+{
+	return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_IO);
+}
+static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
+{
+	return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_MEM);
+}
+static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
+{
+	return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_CPU);
+}
+
+static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
+					  size_t nbytes, enum psi_res res)
+{
+	struct psi_trigger *new;
+	struct cgroup *cgrp;
+
+	cgrp = cgroup_kn_lock_live(of->kn, false);
+	if (!cgrp)
+		return -ENODEV;
+
+	cgroup_get(cgrp);
+	cgroup_kn_unlock(of->kn);
+
+	new = psi_trigger_create(&cgrp->psi, buf, nbytes, res);
+	if (IS_ERR(new)) {
+		cgroup_put(cgrp);
+		return PTR_ERR(new);
+	}
+
+	psi_trigger_replace(&of->priv, new);
+
+	cgroup_put(cgrp);
+
+	return nbytes;
+}
+
+static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_IO);
+}
+
+static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_MEM);
+}
+
+static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_CPU);
+}
+
+static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
+					  poll_table *pt)
+{
+	return psi_trigger_poll(&of->priv, of->file, pt);
+}
+
+static void cgroup_pressure_release(struct kernfs_open_file *of)
+{
+	psi_trigger_replace(&of->priv, NULL);
+}
+#endif /* CONFIG_PSI */
+
 static int cgroup_file_open(struct kernfs_open_file *of)
 {
 	struct cftype *cft = of->kn->priv;
@@ -3483,6 +3557,16 @@
 	return ret ?: nbytes;
 }
 
+static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
+{
+	struct cftype *cft = of->kn->priv;
+
+	if (cft->poll)
+		return cft->poll(of, pt);
+
+	return kernfs_generic_poll(of, pt);
+}
+
 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
 {
 	return seq_cft(seq)->seq_start(seq, ppos);
@@ -3521,6 +3605,7 @@
 	.open			= cgroup_file_open,
 	.release		= cgroup_file_release,
 	.write			= cgroup_file_write,
+	.poll			= cgroup_file_poll,
 	.seq_show		= cgroup_seqfile_show,
 };
 
@@ -3529,6 +3614,7 @@
 	.open			= cgroup_file_open,
 	.release		= cgroup_file_release,
 	.write			= cgroup_file_write,
+	.poll			= cgroup_file_poll,
 	.seq_start		= cgroup_seqfile_start,
 	.seq_next		= cgroup_seqfile_next,
 	.seq_stop		= cgroup_seqfile_stop,
@@ -4186,20 +4272,25 @@
 
 	lockdep_assert_held(&css_set_lock);
 repeat:
-	/*
-	 * Advance iterator to find next entry.  cset->tasks is consumed
-	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
-	 * next cset.
-	 */
-	next = it->task_pos->next;
+	if (it->task_pos) {
+		/*
+		 * Advance iterator to find next entry.  cset->tasks is
+		 * consumed first and then ->mg_tasks.  After ->mg_tasks,
+		 * we move onto the next cset.
+		 */
+		next = it->task_pos->next;
 
-	if (next == it->tasks_head)
-		next = it->mg_tasks_head->next;
+		if (next == it->tasks_head)
+			next = it->mg_tasks_head->next;
 
-	if (next == it->mg_tasks_head)
+		if (next == it->mg_tasks_head)
+			css_task_iter_advance_css_set(it);
+		else
+			it->task_pos = next;
+	} else {
+		/* called from start, proceed to the first cset */
 		css_task_iter_advance_css_set(it);
-	else
-		it->task_pos = next;
+	}
 
 	/* if PROCS, skip over tasks which aren't group leaders */
 	if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
@@ -4239,7 +4330,7 @@
 
 	it->cset_head = it->cset_pos;
 
-	css_task_iter_advance_css_set(it);
+	css_task_iter_advance(it);
 
 	spin_unlock_irq(&css_set_lock);
 }
@@ -4546,6 +4637,32 @@
 		.flags = CFTYPE_NOT_ON_ROOT,
 		.seq_show = cpu_stat_show,
 	},
+#ifdef CONFIG_PSI
+	{
+		.name = "io.pressure",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.seq_show = cgroup_io_pressure_show,
+		.write = cgroup_io_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
+	},
+	{
+		.name = "memory.pressure",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.seq_show = cgroup_memory_pressure_show,
+		.write = cgroup_memory_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
+	},
+	{
+		.name = "cpu.pressure",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.seq_show = cgroup_cpu_pressure_show,
+		.write = cgroup_cpu_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
+	},
+#endif /* CONFIG_PSI */
 	{ }	/* terminate */
 };
 
@@ -4606,6 +4723,7 @@
 			 */
 			cgroup_put(cgroup_parent(cgrp));
 			kernfs_put(cgrp->kn);
+			psi_cgroup_free(cgrp);
 			if (cgroup_on_dfl(cgrp))
 				cgroup_rstat_exit(cgrp);
 			kfree(cgrp);
@@ -4862,10 +4980,15 @@
 	cgrp->self.parent = &parent->self;
 	cgrp->root = root;
 	cgrp->level = level;
-	ret = cgroup_bpf_inherit(cgrp);
+
+	ret = psi_cgroup_alloc(cgrp);
 	if (ret)
 		goto out_idr_free;
 
+	ret = cgroup_bpf_inherit(cgrp);
+	if (ret)
+		goto out_psi_free;
+
 	for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
 		cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
 
@@ -4903,6 +5026,8 @@
 
 	return cgrp;
 
+out_psi_free:
+	psi_cgroup_free(cgrp);
 out_idr_free:
 	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
 out_stat_exit:
diff --git a/kernel/cpu.c b/kernel/cpu.c
index fae0fb4..c40e62a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -358,9 +358,6 @@
 
 #ifdef CONFIG_HOTPLUG_SMT
 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
-EXPORT_SYMBOL_GPL(cpu_smt_control);
-
-static bool cpu_smt_available __read_mostly;
 
 void __init cpu_smt_disable(bool force)
 {
@@ -378,25 +375,11 @@
 
 /*
  * The decision whether SMT is supported can only be done after the full
- * CPU identification. Called from architecture code before non boot CPUs
- * are brought up.
- */
-void __init cpu_smt_check_topology_early(void)
-{
-	if (!topology_smt_supported())
-		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
-}
-
-/*
- * If SMT was disabled by BIOS, detect it here, after the CPUs have been
- * brought online. This ensures the smt/l1tf sysfs entries are consistent
- * with reality. cpu_smt_available is set to true during the bringup of non
- * boot CPUs when a SMT sibling is detected. Note, this may overwrite
- * cpu_smt_control's previous setting.
+ * CPU identification. Called from architecture code.
  */
 void __init cpu_smt_check_topology(void)
 {
-	if (!cpu_smt_available)
+	if (!topology_smt_supported())
 		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
 }
 
@@ -409,18 +392,10 @@
 
 static inline bool cpu_smt_allowed(unsigned int cpu)
 {
-	if (topology_is_primary_thread(cpu))
+	if (cpu_smt_control == CPU_SMT_ENABLED)
 		return true;
 
-	/*
-	 * If the CPU is not a 'primary' thread and the booted_once bit is
-	 * set then the processor has SMT support. Store this information
-	 * for the late check of SMT support in cpu_smt_check_topology().
-	 */
-	if (per_cpu(cpuhp_state, cpu).booted_once)
-		cpu_smt_available = true;
-
-	if (cpu_smt_control == CPU_SMT_ENABLED)
+	if (topology_is_primary_thread(cpu))
 		return true;
 
 	/*
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 67b02e1..da3b611 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -22,6 +22,8 @@
 #include <linux/spinlock.h>
 #include <linux/syscore_ops.h>
 
+bool from_suspend;
+
 static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
 
 static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
@@ -181,6 +183,7 @@
 {
 	int ret;
 
+	from_suspend = true;
 	ret = cpu_pm_enter();
 	if (ret)
 		return ret;
@@ -191,6 +194,7 @@
 
 static void cpu_pm_resume(void)
 {
+	from_suspend = false;
 	cpu_cluster_pm_exit();
 	cpu_pm_exit();
 }
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 65c0f13..94aa9ae 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -535,6 +535,8 @@
 				arch_kgdb_ops.correct_hw_break();
 			if (trace_on)
 				tracing_on();
+			kgdb_info[cpu].debuggerinfo = NULL;
+			kgdb_info[cpu].task = NULL;
 			kgdb_info[cpu].exception_state &=
 				~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
 			kgdb_info[cpu].enter_kgdb--;
@@ -667,6 +669,8 @@
 	if (trace_on)
 		tracing_on();
 
+	kgdb_info[cpu].debuggerinfo = NULL;
+	kgdb_info[cpu].task = NULL;
 	kgdb_info[cpu].exception_state &=
 		~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
 	kgdb_info[cpu].enter_kgdb--;
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 7921ae4..7e2379a 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -186,7 +186,16 @@
 		kdb_printf("btc: cpu status: ");
 		kdb_parse("cpu\n");
 		for_each_online_cpu(cpu) {
-			sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
+			void *kdb_tsk = KDB_TSK(cpu);
+
+			/* If a CPU failed to round up we could be here */
+			if (!kdb_tsk) {
+				kdb_printf("WARNING: no task for cpu %ld\n",
+					   cpu);
+				continue;
+			}
+
+			sprintf(buf, "btt 0x%px\n", kdb_tsk);
 			kdb_parse(buf);
 			touch_nmi_watchdog();
 		}
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 15e1a7a..53a0df6 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -118,13 +118,6 @@
 	kdb_bp_remove();
 	KDB_STATE_CLEAR(DOING_SS);
 	KDB_STATE_SET(PAGER);
-	/* zero out any offline cpu data */
-	for_each_present_cpu(i) {
-		if (!cpu_online(i)) {
-			kgdb_info[i].debuggerinfo = NULL;
-			kgdb_info[i].task = NULL;
-		}
-	}
 	if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
 		ks->pass_exception = 1;
 		KDB_FLAG_SET(CATASTROPHIC);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index f338d23..9592420 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2556,16 +2556,11 @@
 	}
 	kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
 
-	/* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
-
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
 	kdb_printf("load avg   %ld.%02ld %ld.%02ld %ld.%02ld\n",
 		LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
 		LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
 		LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
-#undef LOAD_INT
-#undef LOAD_FRAC
+
 	/* Display in kilobytes */
 #define K(x) ((x) << (PAGE_SHIFT - 10))
 	kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index ca8ac28..2a12b98 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -135,9 +135,12 @@
 	d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
 	tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
 	d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
+	tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
+	d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
 	d->blkio_count += tsk->delays->blkio_count;
 	d->swapin_count += tsk->delays->swapin_count;
 	d->freepages_count += tsk->delays->freepages_count;
+	d->thrashing_count += tsk->delays->thrashing_count;
 	raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
 
 	return 0;
@@ -169,3 +172,15 @@
 		&current->delays->freepages_count);
 }
 
+void __delayacct_thrashing_start(void)
+{
+	current->delays->thrashing_start = ktime_get_ns();
+}
+
+void __delayacct_thrashing_end(void)
+{
+	delayacct_end(&current->delays->lock,
+		      &current->delays->thrashing_start,
+		      &current->delays->thrashing_delay,
+		      &current->delays->thrashing_count);
+}
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 597d408..b75667d 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -29,14 +29,21 @@
 	return NULL;
 }
 
-static inline dma_addr_t dma_get_device_base(struct device *dev,
-					     struct dma_coherent_mem * mem)
+dma_addr_t dma_get_device_base(struct device *dev,
+			       struct dma_coherent_mem *mem)
 {
 	if (mem->use_dev_dma_pfn_offset)
 		return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
 	else
 		return mem->device_base;
 }
+EXPORT_SYMBOL(dma_get_device_base);
+
+unsigned long dma_get_size(struct dma_coherent_mem *mem)
+{
+	return mem->size << PAGE_SHIFT;
+}
+EXPORT_SYMBOL(dma_get_size);
 
 static int dma_init_coherent_memory(
 	phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index de87b02..1d2f147 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -168,7 +168,12 @@
 int dma_direct_supported(struct device *dev, u64 mask)
 {
 #ifdef CONFIG_ZONE_DMA
-	if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
+	/*
+	 * This check needs to be against the actual bit mask value, so
+	 * use __phys_to_dma() here so that the SME encryption mask isn't
+	 * part of the check.
+	 */
+	if (mask < __phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
 		return 0;
 #else
 	/*
@@ -176,8 +181,12 @@
 	 * to be able to satisfy them - either by not supporting more physical
 	 * memory, or by providing a ZONE_DMA32.  If neither is the case, the
 	 * architecture needs to use an IOMMU instead of the direct mapping.
+	 *
+	 * This check needs to be against the actual bit mask value, so
+	 * use __phys_to_dma() here so that the SME encryption mask isn't
+	 * part of the check.
 	 */
-	if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
+	if (mask < __phys_to_dma(dev, DMA_BIT_MASK(32)))
 		return 0;
 #endif
 	/*
diff --git a/kernel/dma/removed.c b/kernel/dma/removed.c
index fe3fd70..6a1c87f 100644
--- a/kernel/dma/removed.c
+++ b/kernel/dma/removed.c
@@ -75,7 +75,7 @@
 	bool skip_zeroing = attrs & DMA_ATTR_SKIP_ZEROING;
 	int pageno;
 	unsigned long order;
-	void *addr = NULL;
+	void __iomem *addr = NULL;
 	struct removed_region *dma_mem = dev->removed_mem;
 	int nbits;
 	unsigned int align;
@@ -108,7 +108,7 @@
 			goto out;
 		}
 
-		addr = ioremap(base, size);
+		addr = ioremap_wc(base, size);
 		if (WARN_ON(!addr)) {
 			bitmap_clear(dma_mem->bitmap, pageno, nbits);
 		} else {
@@ -202,10 +202,10 @@
 {
 }
 
-void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
-			size_t size, unsigned long attrs)
+static void __iomem *removed_remap(struct device *dev, void *cpu_addr,
+			dma_addr_t handle, size_t size, unsigned long attrs)
 {
-	return ioremap(handle, size);
+	return ioremap_wc(handle, size);
 }
 
 void removed_unremap(struct device *dev, void *remapped_address, size_t size)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6f257fa..c89f8ea 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -469,18 +469,18 @@
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos)
 {
-	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-
-	if (ret || !write)
-		return ret;
-
+	int ret;
+	int perf_cpu = sysctl_perf_cpu_time_max_percent;
 	/*
 	 * If throttling is disabled don't allow the write:
 	 */
-	if (sysctl_perf_cpu_time_max_percent == 100 ||
-	    sysctl_perf_cpu_time_max_percent == 0)
+	if (write && (perf_cpu == 100 || perf_cpu == 0))
 		return -EINVAL;
 
+	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+	if (ret || !write)
+		return ret;
+
 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
 	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
 	update_perf_cpu_limits();
@@ -5136,6 +5136,11 @@
 	}
 }
 
+static int perf_event_check_period(struct perf_event *event, u64 value)
+{
+	return event->pmu->check_period(event, value);
+}
+
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
 	u64 value;
@@ -5152,6 +5157,9 @@
 	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
 		return -EINVAL;
 
+	if (perf_event_check_period(event, value))
+		return -EINVAL;
+
 	event_function_call(event, __perf_event_period, &value);
 
 	return 0;
@@ -9539,6 +9547,11 @@
 	return 0;
 }
 
+static int perf_event_nop_int(struct perf_event *event, u64 value)
+{
+	return 0;
+}
+
 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
 
 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9839,6 +9852,9 @@
 		pmu->pmu_disable = perf_pmu_nop_void;
 	}
 
+	if (!pmu->check_period)
+		pmu->check_period = perf_event_nop_int;
+
 	if (!pmu->event_idx)
 		pmu->event_idx = perf_event_idx_default;
 
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 5d3cf40..5631af9 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -724,6 +724,9 @@
 	size = sizeof(struct ring_buffer);
 	size += nr_pages * sizeof(void *);
 
+	if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
+		goto fail;
+
 	rb = kzalloc(size, GFP_KERNEL);
 	if (!rb)
 		goto fail;
diff --git a/kernel/exit.c b/kernel/exit.c
index 8dd63b6..ddd2aa9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -307,7 +307,7 @@
 	 *        MB (A)	      MB (B)
 	 *    [L] cond		  [L] tsk
 	 */
-	smp_rmb(); /* (B) */
+	smp_mb(); /* (B) */
 
 	/*
 	 * Avoid using task_rcu_dereference() magic as long as we are careful,
@@ -558,12 +558,14 @@
 	return NULL;
 }
 
-static struct task_struct *find_child_reaper(struct task_struct *father)
+static struct task_struct *find_child_reaper(struct task_struct *father,
+						struct list_head *dead)
 	__releases(&tasklist_lock)
 	__acquires(&tasklist_lock)
 {
 	struct pid_namespace *pid_ns = task_active_pid_ns(father);
 	struct task_struct *reaper = pid_ns->child_reaper;
+	struct task_struct *p, *n;
 
 	if (likely(reaper != father))
 		return reaper;
@@ -579,6 +581,12 @@
 		panic("Attempted to kill init! exitcode=0x%08x\n",
 			father->signal->group_exit_code ?: father->exit_code);
 	}
+
+	list_for_each_entry_safe(p, n, dead, ptrace_entry) {
+		list_del_init(&p->ptrace_entry);
+		release_task(p);
+	}
+
 	zap_pid_ns_processes(pid_ns);
 	write_lock_irq(&tasklist_lock);
 
@@ -668,7 +676,7 @@
 		exit_ptrace(father, dead);
 
 	/* Can drop and reacquire tasklist_lock */
-	reaper = find_child_reaper(father);
+	reaper = find_child_reaper(father, dead);
 	if (list_empty(&father->children))
 		return;
 
diff --git a/kernel/fork.c b/kernel/fork.c
index b1e82e3..f70f668 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1784,13 +1784,15 @@
 
 	p->default_timer_slack_ns = current->timer_slack_ns;
 
+#ifdef CONFIG_PSI
+	p->psi_flags = 0;
+#endif
+
 	task_io_accounting_init(&p->ioac);
 	acct_clear_integrals(p);
 
 	posix_cpu_timers_init(p);
 
-	p->start_time = ktime_get_ns();
-	p->real_start_time = ktime_get_boot_ns();
 	p->io_context = NULL;
 	audit_set_context(p, NULL);
 	cgroup_fork(p);
@@ -1955,6 +1957,17 @@
 		goto bad_fork_free_pid;
 
 	/*
+	 * From this point on we must avoid any synchronous user-space
+	 * communication until we take the tasklist-lock. In particular, we do
+	 * not want user-space to be able to predict the process start-time by
+	 * stalling fork(2) after we recorded the start_time but before it is
+	 * visible to the system.
+	 */
+
+	p->start_time = ktime_get_ns();
+	p->real_start_time = ktime_get_boot_ns();
+
+	/*
 	 * Make it visible to the rest of the system, but dont wake it up yet.
 	 * Need tasklist lock for parent etc handling!
 	 */
diff --git a/kernel/futex.c b/kernel/futex.c
index 11fc3bb..c5fca74 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1148,11 +1148,65 @@
 	return ret;
 }
 
+static int handle_exit_race(u32 __user *uaddr, u32 uval,
+			    struct task_struct *tsk)
+{
+	u32 uval2;
+
+	/*
+	 * If PF_EXITPIDONE is not yet set, then try again.
+	 */
+	if (tsk && !(tsk->flags & PF_EXITPIDONE))
+		return -EAGAIN;
+
+	/*
+	 * Reread the user space value to handle the following situation:
+	 *
+	 * CPU0				CPU1
+	 *
+	 * sys_exit()			sys_futex()
+	 *  do_exit()			 futex_lock_pi()
+	 *                                futex_lock_pi_atomic()
+	 *   exit_signals(tsk)		    No waiters:
+	 *    tsk->flags |= PF_EXITING;	    *uaddr == 0x00000PID
+	 *  mm_release(tsk)		    Set waiter bit
+	 *   exit_robust_list(tsk) {	    *uaddr = 0x80000PID;
+	 *      Set owner died		    attach_to_pi_owner() {
+	 *    *uaddr = 0xC0000000;	     tsk = get_task(PID);
+	 *   }				     if (!tsk->flags & PF_EXITING) {
+	 *  ...				       attach();
+	 *  tsk->flags |= PF_EXITPIDONE;     } else {
+	 *				       if (!(tsk->flags & PF_EXITPIDONE))
+	 *				         return -EAGAIN;
+	 *				       return -ESRCH; <--- FAIL
+	 *				     }
+	 *
+	 * Returning ESRCH unconditionally is wrong here because the
+	 * user space value has been changed by the exiting task.
+	 *
+	 * The same logic applies to the case where the exiting task is
+	 * already gone.
+	 */
+	if (get_futex_value_locked(&uval2, uaddr))
+		return -EFAULT;
+
+	/* If the user space value has changed, try again. */
+	if (uval2 != uval)
+		return -EAGAIN;
+
+	/*
+	 * The exiting task did not have a robust list, the robust list was
+	 * corrupted or the user space value in *uaddr is simply bogus.
+	 * Give up and tell user space.
+	 */
+	return -ESRCH;
+}
+
 /*
  * Lookup the task for the TID provided from user space and attach to
  * it after doing proper sanity checks.
  */
-static int attach_to_pi_owner(u32 uval, union futex_key *key,
+static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
 			      struct futex_pi_state **ps)
 {
 	pid_t pid = uval & FUTEX_TID_MASK;
@@ -1162,12 +1216,15 @@
 	/*
 	 * We are the first waiter - try to look up the real owner and attach
 	 * the new pi_state to it, but bail out when TID = 0 [1]
+	 *
+	 * The !pid check is paranoid. None of the call sites should end up
+	 * with pid == 0, but better safe than sorry. Let the caller retry
 	 */
 	if (!pid)
-		return -ESRCH;
+		return -EAGAIN;
 	p = find_get_task_by_vpid(pid);
 	if (!p)
-		return -ESRCH;
+		return handle_exit_race(uaddr, uval, NULL);
 
 	if (unlikely(p->flags & PF_KTHREAD)) {
 		put_task_struct(p);
@@ -1187,7 +1244,7 @@
 		 * set, we know that the task has finished the
 		 * cleanup:
 		 */
-		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
+		int ret = handle_exit_race(uaddr, uval, p);
 
 		raw_spin_unlock_irq(&p->pi_lock);
 		put_task_struct(p);
@@ -1244,7 +1301,7 @@
 	 * We are the first waiter - try to look up the owner based on
 	 * @uval and attach to it.
 	 */
-	return attach_to_pi_owner(uval, key, ps);
+	return attach_to_pi_owner(uaddr, uval, key, ps);
 }
 
 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
@@ -1352,7 +1409,7 @@
 	 * attach to the owner. If that fails, no harm done, we only
 	 * set the FUTEX_WAITERS bit in the user space variable.
 	 */
-	return attach_to_pi_owner(uval, key, ps);
+	return attach_to_pi_owner(uaddr, newval, key, ps);
 }
 
 /**
@@ -1387,11 +1444,7 @@
 	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
 		return;
 
-	/*
-	 * Queue the task for later wakeup for after we've released
-	 * the hb->lock. wake_q_add() grabs reference to p.
-	 */
-	wake_q_add(wake_q, p);
+	get_task_struct(p);
 	__unqueue_futex(q);
 	/*
 	 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
@@ -1401,6 +1454,13 @@
 	 * plist_del in __unqueue_futex().
 	 */
 	smp_store_release(&q->lock_ptr, NULL);
+
+	/*
+	 * Queue the task for later wakeup for after we've released
+	 * the hb->lock. wake_q_add() grabs reference to p.
+	 */
+	wake_q_add(wake_q, p);
+	put_task_struct(p);
 }
 
 /*
@@ -2793,35 +2853,39 @@
 	 * and BUG when futex_unlock_pi() interleaves with this.
 	 *
 	 * Therefore acquire wait_lock while holding hb->lock, but drop the
-	 * latter before calling rt_mutex_start_proxy_lock(). This still fully
-	 * serializes against futex_unlock_pi() as that does the exact same
-	 * lock handoff sequence.
+	 * latter before calling __rt_mutex_start_proxy_lock(). This
+	 * interleaves with futex_unlock_pi() -- which does a similar lock
+	 * handoff -- such that the latter can observe the futex_q::pi_state
+	 * before __rt_mutex_start_proxy_lock() is done.
 	 */
 	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
 	spin_unlock(q.lock_ptr);
+	/*
+	 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
+	 * such that futex_unlock_pi() is guaranteed to observe the waiter when
+	 * it sees the futex_q::pi_state.
+	 */
 	ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
 	raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
 
 	if (ret) {
 		if (ret == 1)
 			ret = 0;
-
-		spin_lock(q.lock_ptr);
-		goto no_block;
+		goto cleanup;
 	}
 
-
 	if (unlikely(to))
 		hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
 
 	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
 
+cleanup:
 	spin_lock(q.lock_ptr);
 	/*
-	 * If we failed to acquire the lock (signal/timeout), we must
+	 * If we failed to acquire the lock (deadlock/signal/timeout), we must
 	 * first acquire the hb->lock before removing the lock from the
-	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
-	 * wait lists consistent.
+	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
+	 * lists consistent.
 	 *
 	 * In particular; it is important that futex_unlock_pi() can not
 	 * observe this inconsistency.
@@ -2945,6 +3009,10 @@
 		 * there is no point where we hold neither; and therefore
 		 * wake_futex_pi() must observe a state consistent with what we
 		 * observed.
+		 *
+		 * In particular; this forces __rt_mutex_start_proxy() to
+		 * complete such that we're guaranteed to observe the
+		 * rt_waiter. Also see the WARN in wake_futex_pi().
 		 */
 		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
 		spin_unlock(&hb->lock);
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index b9132d1..9eca237 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -33,7 +33,7 @@
  * is disabled during the critical section. It also controls the size of
  * the RCU grace period. So it needs to be upper-bound.
  */
-#define HUNG_TASK_BATCHING 1024
+#define HUNG_TASK_LOCK_BREAK (HZ / 10)
 
 /*
  * Zero means infinite timeout - no checking done:
@@ -111,8 +111,11 @@
 
 	trace_sched_process_hang(t);
 
-	if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
-		return;
+	if (sysctl_hung_task_panic) {
+		console_verbose();
+		hung_task_show_lock = true;
+		hung_task_call_panic = true;
+	}
 
 	/*
 	 * Ok, the task did not get scheduled for more than 2 minutes,
@@ -134,11 +137,6 @@
 	}
 
 	touch_nmi_watchdog();
-
-	if (sysctl_hung_task_panic) {
-		hung_task_show_lock = true;
-		hung_task_call_panic = true;
-	}
 }
 
 /*
@@ -172,7 +170,7 @@
 static void check_hung_uninterruptible_tasks(unsigned long timeout)
 {
 	int max_count = sysctl_hung_task_check_count;
-	int batch_count = HUNG_TASK_BATCHING;
+	unsigned long last_break = jiffies;
 	struct task_struct *g, *t;
 
 	/*
@@ -187,10 +185,10 @@
 	for_each_process_thread(g, t) {
 		if (!max_count--)
 			goto unlock;
-		if (!--batch_count) {
-			batch_count = HUNG_TASK_BATCHING;
+		if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
 			if (!rcu_lock_break(g, t))
 				goto unlock;
+			last_break = jiffies;
 		}
 		/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
 		if (t->state == TASK_UNINTERRUPTIBLE)
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index f4f29b9..e12cdf6 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -117,12 +117,11 @@
 	 */
 	if (numvecs <= nodes) {
 		for_each_node_mask(n, nodemsk) {
-			cpumask_copy(masks + curvec, node_to_cpumask[n]);
-			if (++done == numvecs)
-				break;
+			cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]);
 			if (++curvec == last_affv)
 				curvec = affd->pre_vectors;
 		}
+		done = numvecs;
 		goto out;
 	}
 
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index a2b3d9d..e521950 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1376,6 +1376,10 @@
 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
 {
 	data = data->parent_data;
+
+	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+		return 0;
+
 	if (data->chip->irq_set_wake)
 		return data->chip->irq_set_wake(data, on);
 
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6dfdb4d..eb584ad 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -396,6 +396,9 @@
 	}
 
 	cpumask_and(&mask, cpu_online_mask, set);
+	if (cpumask_empty(&mask))
+		cpumask_copy(&mask, cpu_online_mask);
+
 	if (node != NUMA_NO_NODE) {
 		const struct cpumask *nodemask = cpumask_of_node(node);
 
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5092494..9233770 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -14,6 +14,7 @@
 	unsigned int		available;
 	unsigned int		allocated;
 	unsigned int		managed;
+	unsigned int		managed_allocated;
 	bool			initialized;
 	bool			online;
 	unsigned long		alloc_map[IRQ_MATRIX_SIZE];
@@ -124,6 +125,48 @@
 	return area;
 }
 
+/* Find the best CPU which has the lowest vector allocation count */
+static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
+					const struct cpumask *msk)
+{
+	unsigned int cpu, best_cpu, maxavl = 0;
+	struct cpumap *cm;
+
+	best_cpu = UINT_MAX;
+
+	for_each_cpu(cpu, msk) {
+		cm = per_cpu_ptr(m->maps, cpu);
+
+		if (!cm->online || cm->available <= maxavl)
+			continue;
+
+		best_cpu = cpu;
+		maxavl = cm->available;
+	}
+	return best_cpu;
+}
+
+/* Find the best CPU which has the lowest number of managed IRQs allocated */
+static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
+						const struct cpumask *msk)
+{
+	unsigned int cpu, best_cpu, allocated = UINT_MAX;
+	struct cpumap *cm;
+
+	best_cpu = UINT_MAX;
+
+	for_each_cpu(cpu, msk) {
+		cm = per_cpu_ptr(m->maps, cpu);
+
+		if (!cm->online || cm->managed_allocated > allocated)
+			continue;
+
+		best_cpu = cpu;
+		allocated = cm->managed_allocated;
+	}
+	return best_cpu;
+}
+
 /**
  * irq_matrix_assign_system - Assign system wide entry in the matrix
  * @m:		Matrix pointer
@@ -239,11 +282,21 @@
  * @m:		Matrix pointer
  * @cpu:	On which CPU the interrupt should be allocated
  */
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+			     unsigned int *mapped_cpu)
 {
-	struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
-	unsigned int bit, end = m->alloc_end;
+	unsigned int bit, cpu, end = m->alloc_end;
+	struct cpumap *cm;
 
+	if (cpumask_empty(msk))
+		return -EINVAL;
+
+	cpu = matrix_find_best_cpu_managed(m, msk);
+	if (cpu == UINT_MAX)
+		return -ENOSPC;
+
+	cm = per_cpu_ptr(m->maps, cpu);
+	end = m->alloc_end;
 	/* Get managed bit which are not allocated */
 	bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
 	bit = find_first_bit(m->scratch_map, end);
@@ -251,7 +304,9 @@
 		return -ENOSPC;
 	set_bit(bit, cm->alloc_map);
 	cm->allocated++;
+	cm->managed_allocated++;
 	m->total_allocated++;
+	*mapped_cpu = cpu;
 	trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
 	return bit;
 }
@@ -322,37 +377,27 @@
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
 		     bool reserved, unsigned int *mapped_cpu)
 {
-	unsigned int cpu, best_cpu, maxavl = 0;
+	unsigned int cpu, bit;
 	struct cpumap *cm;
-	unsigned int bit;
 
-	best_cpu = UINT_MAX;
-	for_each_cpu(cpu, msk) {
-		cm = per_cpu_ptr(m->maps, cpu);
+	cpu = matrix_find_best_cpu(m, msk);
+	if (cpu == UINT_MAX)
+		return -ENOSPC;
 
-		if (!cm->online || cm->available <= maxavl)
-			continue;
+	cm = per_cpu_ptr(m->maps, cpu);
+	bit = matrix_alloc_area(m, cm, 1, false);
+	if (bit >= m->alloc_end)
+		return -ENOSPC;
+	cm->allocated++;
+	cm->available--;
+	m->total_allocated++;
+	m->global_available--;
+	if (reserved)
+		m->global_reserved--;
+	*mapped_cpu = cpu;
+	trace_irq_matrix_alloc(bit, cpu, m, cm);
+	return bit;
 
-		best_cpu = cpu;
-		maxavl = cm->available;
-	}
-
-	if (maxavl) {
-		cm = per_cpu_ptr(m->maps, best_cpu);
-		bit = matrix_alloc_area(m, cm, 1, false);
-		if (bit < m->alloc_end) {
-			cm->allocated++;
-			cm->available--;
-			m->total_allocated++;
-			m->global_available--;
-			if (reserved)
-				m->global_reserved--;
-			*mapped_cpu = best_cpu;
-			trace_irq_matrix_alloc(bit, best_cpu, m, cm);
-			return bit;
-		}
-	}
-	return -ENOSPC;
 }
 
 /**
@@ -373,6 +418,8 @@
 
 	clear_bit(bit, cm->alloc_map);
 	cm->allocated--;
+	if(managed)
+		cm->managed_allocated--;
 
 	if (cm->online)
 		m->total_allocated--;
@@ -442,13 +489,14 @@
 	seq_printf(sf, "Total allocated:  %6u\n", m->total_allocated);
 	seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
 		   m->system_map);
-	seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
+	seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
 	cpus_read_lock();
 	for_each_online_cpu(cpu) {
 		struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
 
-		seq_printf(sf, "%*s %4d  %4u  %4u  %4u  %*pbl\n", ind, " ",
-			   cpu, cm->available, cm->managed, cm->allocated,
+		seq_printf(sf, "%*s %4d  %4u  %4u  %4u %4u  %*pbl\n", ind, " ",
+			   cpu, cm->available, cm->managed,
+			   cm->managed_allocated, cm->allocated,
 			   m->matrix_bits, cm->alloc_map);
 	}
 	cpus_read_unlock();
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 97959d7..c2277db 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -112,7 +112,7 @@
 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
 
 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
-static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
+static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
 {
 	struct task_struct *t;
 	u64 *area;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d..e3dfad2 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -11,6 +11,7 @@
 #include <linux/kthread.h>
 #include <linux/completion.h>
 #include <linux/err.h>
+#include <linux/cgroup.h>
 #include <linux/cpuset.h>
 #include <linux/unistd.h>
 #include <linux/file.h>
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2823d41..9562aaa 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1726,12 +1726,33 @@
 	rt_mutex_set_owner(lock, NULL);
 }
 
+/**
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock:		the rt_mutex to take
+ * @waiter:		the pre-initialized rt_mutex_waiter
+ * @task:		the task to prepare
+ *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: does _NOT_ remove the @waiter on failure; must either call
+ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
+ *
+ * Returns:
+ *  0 - task blocked on lock
+ *  1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for PI-futex support.
+ */
 int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
 			      struct rt_mutex_waiter *waiter,
 			      struct task_struct *task)
 {
 	int ret;
 
+	lockdep_assert_held(&lock->wait_lock);
+
 	if (try_to_take_rt_mutex(lock, task, NULL))
 		return 1;
 
@@ -1749,9 +1770,6 @@
 		ret = 0;
 	}
 
-	if (unlikely(ret))
-		remove_waiter(lock, waiter);
-
 	debug_rt_mutex_print_deadlock(waiter);
 
 	return ret;
@@ -1763,12 +1781,18 @@
  * @waiter:		the pre-initialized rt_mutex_waiter
  * @task:		the task to prepare
  *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
+ * on failure.
+ *
  * Returns:
  *  0 - task blocked on lock
  *  1 - acquired the lock for task, caller should wake it up
  * <0 - error
  *
- * Special API call for FUTEX_REQUEUE_PI support.
+ * Special API call for PI-futex support.
  */
 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
 			      struct rt_mutex_waiter *waiter,
@@ -1778,6 +1802,8 @@
 
 	raw_spin_lock_irq(&lock->wait_lock);
 	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
+	if (unlikely(ret))
+		remove_waiter(lock, waiter);
 	raw_spin_unlock_irq(&lock->wait_lock);
 
 	return ret;
@@ -1845,7 +1871,8 @@
  * @lock:		the rt_mutex we were woken on
  * @waiter:		the pre-initialized rt_mutex_waiter
  *
- * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
+ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
+ * rt_mutex_wait_proxy_lock().
  *
  * Unless we acquired the lock; we're still enqueued on the wait-list and can
  * in fact still be granted ownership until we're removed. Therefore we can
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 3064c50..ef90935 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -198,15 +198,22 @@
 		woken++;
 		tsk = waiter->task;
 
-		wake_q_add(wake_q, tsk);
+		get_task_struct(tsk);
 		list_del(&waiter->list);
 		/*
-		 * Ensure that the last operation is setting the reader
+		 * Ensure calling get_task_struct() before setting the reader
 		 * waiter to nil such that rwsem_down_read_failed() cannot
 		 * race with do_exit() by always holding a reference count
 		 * to the task to wakeup.
 		 */
 		smp_store_release(&waiter->task, NULL);
+		/*
+		 * Ensure issuing the wakeup (either by us or someone else)
+		 * after setting the reader waiter to nil.
+		 */
+		wake_q_add(wake_q, tsk);
+		/* wake_q_add() already take the task ref */
+		put_task_struct(tsk);
 	}
 
 	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 5b8600d..7c5fb8a 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -122,23 +122,25 @@
 	resource_size_t align_start, align_size;
 	unsigned long pfn;
 
+	pgmap->kill(pgmap->ref);
 	for_each_device_pfn(pfn, pgmap)
 		put_page(pfn_to_page(pfn));
 
-	if (percpu_ref_tryget_live(pgmap->ref)) {
-		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
-		percpu_ref_put(pgmap->ref);
-	}
-
 	/* pages are dead and unused, undo the arch mapping */
 	align_start = res->start & ~(SECTION_SIZE - 1);
 	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
 		- align_start;
 
 	mem_hotplug_begin();
-	arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
-			&pgmap->altmap : NULL);
-	kasan_remove_zero_shadow(__va(align_start), align_size);
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+		pfn = align_start >> PAGE_SHIFT;
+		__remove_pages(page_zone(pfn_to_page(pfn)), pfn,
+				align_size >> PAGE_SHIFT, NULL);
+	} else {
+		arch_remove_memory(align_start, align_size,
+				pgmap->altmap_valid ? &pgmap->altmap : NULL);
+		kasan_remove_zero_shadow(__va(align_start), align_size);
+	}
 	mem_hotplug_done();
 
 	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
@@ -150,7 +152,7 @@
 /**
  * devm_memremap_pages - remap and provide memmap backing for the given resource
  * @dev: hosting device for @res
- * @pgmap: pointer to a struct dev_pgmap
+ * @pgmap: pointer to a struct dev_pagemap
  *
  * Notes:
  * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
@@ -159,11 +161,8 @@
  * 2/ The altmap field may optionally be initialized, in which case altmap_valid
  *    must be set to true
  *
- * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
- *    time (or devm release event). The expected order of events is that ref has
- *    been through percpu_ref_kill() before devm_memremap_pages_release(). The
- *    wait for the completion of all references being dropped and
- *    percpu_ref_exit() must occur after devm_memremap_pages_release().
+ * 3/ pgmap->ref must be 'live' on entry and will be killed at
+ *    devm_memremap_pages_release() time, or if this routine fails.
  *
  * 4/ res is expected to be a host memory range that could feasibly be
  *    treated as a "System RAM" range, i.e. not a device mmio range, but
@@ -180,6 +179,9 @@
 	int error, nid, is_ram;
 	struct dev_pagemap *conflict_pgmap;
 
+	if (!pgmap->ref || !pgmap->kill)
+		return ERR_PTR(-EINVAL);
+
 	align_start = res->start & ~(SECTION_SIZE - 1);
 	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
 		- align_start;
@@ -202,18 +204,13 @@
 	is_ram = region_intersects(align_start, align_size,
 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 
-	if (is_ram == REGION_MIXED) {
-		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
-				__func__, res);
-		return ERR_PTR(-ENXIO);
+	if (is_ram != REGION_DISJOINT) {
+		WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
+				is_ram == REGION_MIXED ? "mixed" : "ram", res);
+		error = -ENXIO;
+		goto err_array;
 	}
 
-	if (is_ram == REGION_INTERSECTS)
-		return __va(res->start);
-
-	if (!pgmap->ref)
-		return ERR_PTR(-EINVAL);
-
 	pgmap->dev = dev;
 
 	mutex_lock(&pgmap_lock);
@@ -241,17 +238,40 @@
 		goto err_pfn_remap;
 
 	mem_hotplug_begin();
-	error = kasan_add_zero_shadow(__va(align_start), align_size);
-	if (error) {
-		mem_hotplug_done();
-		goto err_kasan;
+
+	/*
+	 * For device private memory we call add_pages() as we only need to
+	 * allocate and initialize struct page for the device memory. More-
+	 * over the device memory is un-accessible thus we do not want to
+	 * create a linear mapping for the memory like arch_add_memory()
+	 * would do.
+	 *
+	 * For all other device memory types, which are accessible by
+	 * the CPU, we do want the linear mapping and thus use
+	 * arch_add_memory().
+	 */
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+		error = add_pages(nid, align_start >> PAGE_SHIFT,
+				align_size >> PAGE_SHIFT, NULL, false);
+	} else {
+		error = kasan_add_zero_shadow(__va(align_start), align_size);
+		if (error) {
+			mem_hotplug_done();
+			goto err_kasan;
+		}
+
+		error = arch_add_memory(nid, align_start, align_size, altmap,
+				false);
 	}
 
-	error = arch_add_memory(nid, align_start, align_size, altmap, false);
-	if (!error)
-		move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
-					align_start >> PAGE_SHIFT,
-					align_size >> PAGE_SHIFT, altmap);
+	if (!error) {
+		struct zone *zone;
+
+		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
+		move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
+				align_size >> PAGE_SHIFT, altmap);
+	}
+
 	mem_hotplug_done();
 	if (error)
 		goto err_add_memory;
@@ -270,7 +290,10 @@
 		percpu_ref_get(pgmap->ref);
 	}
 
-	devm_add_action(dev, devm_memremap_pages_release, pgmap);
+	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
+			pgmap);
+	if (error)
+		return ERR_PTR(error);
 
 	return __va(res->start);
 
@@ -281,9 +304,11 @@
  err_pfn_remap:
  err_radix:
 	pgmap_radix_release(res, pgoff);
+ err_array:
+	pgmap->kill(pgmap->ref);
 	return ERR_PTR(error);
 }
-EXPORT_SYMBOL(devm_memremap_pages);
+EXPORT_SYMBOL_GPL(devm_memremap_pages);
 
 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
 {
diff --git a/kernel/module.c b/kernel/module.c
index f896873..e179303 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1207,8 +1207,10 @@
 			    struct module_kobject *mk,
 			    const char *buffer, size_t count)
 {
-	kobject_synth_uevent(&mk->kobj, buffer, count);
-	return count;
+	int rc;
+
+	rc = kobject_synth_uevent(&mk->kobj, buffer, count);
+	return rc ? rc : count;
 }
 
 struct module_attribute module_uevent =
diff --git a/kernel/panic.c b/kernel/panic.c
index 21d1336..11d71ac 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -14,6 +14,7 @@
 #include <linux/kmsg_dump.h>
 #include <linux/kallsyms.h>
 #include <linux/notifier.h>
+#include <linux/vt_kern.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/ftrace.h>
@@ -235,7 +236,10 @@
 	if (_crash_kexec_post_notifiers)
 		__crash_kexec(NULL);
 
-	bust_spinlocks(0);
+#ifdef CONFIG_VT
+	unblank_screen();
+#endif
+	console_unblank();
 
 	/*
 	 * We may have ended up stopping the CPU holding the lock (in
diff --git a/kernel/pid.c b/kernel/pid.c
index cdf63e5..b88fe5e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -233,8 +233,10 @@
 
 out_free:
 	spin_lock_irq(&pidmap_lock);
-	while (++i <= ns->level)
-		idr_remove(&ns->idr, (pid->numbers + i)->nr);
+	while (++i <= ns->level) {
+		upid = pid->numbers + i;
+		idr_remove(&upid->ns->idr, upid->nr);
+	}
 
 	/* On failure to allocate the first pid, reset the state */
 	if (ns->pid_allocated == PIDNS_ADDING)
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index 5ec376d..7d66ee6 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -10,6 +10,7 @@
 
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
+#include <linux/debugfs.h>
 #include <linux/energy_model.h>
 #include <linux/sched/topology.h>
 #include <linux/slab.h>
@@ -23,82 +24,60 @@
  */
 static DEFINE_MUTEX(em_pd_mutex);
 
-static struct kobject *em_kobject;
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *rootdir;
 
-/* Getters for the attributes of em_perf_domain objects */
-struct em_pd_attr {
-	struct attribute attr;
-	ssize_t (*show)(struct em_perf_domain *pd, char *buf);
-	ssize_t (*store)(struct em_perf_domain *pd, const char *buf, size_t s);
-};
-
-#define EM_ATTR_LEN 13
-#define show_table_attr(_attr) \
-static ssize_t show_##_attr(struct em_perf_domain *pd, char *buf) \
-{ \
-	ssize_t cnt = 0; \
-	int i; \
-	for (i = 0; i < pd->nr_cap_states; i++) { \
-		if (cnt >= (ssize_t) (PAGE_SIZE / sizeof(char) \
-				      - (EM_ATTR_LEN + 2))) \
-			goto out; \
-		cnt += scnprintf(&buf[cnt], EM_ATTR_LEN + 1, "%lu ", \
-				 pd->table[i]._attr); \
-	} \
-out: \
-	cnt += sprintf(&buf[cnt], "\n"); \
-	return cnt; \
-}
-
-show_table_attr(power);
-show_table_attr(frequency);
-show_table_attr(cost);
-
-static ssize_t show_cpus(struct em_perf_domain *pd, char *buf)
+static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd)
 {
-	return sprintf(buf, "%*pbl\n", cpumask_pr_args(to_cpumask(pd->cpus)));
+	struct dentry *d;
+	char name[24];
+
+	snprintf(name, sizeof(name), "cs:%lu", cs->frequency);
+
+	/* Create per-cs directory */
+	d = debugfs_create_dir(name, pd);
+	debugfs_create_ulong("frequency", 0444, d, &cs->frequency);
+	debugfs_create_ulong("power", 0444, d, &cs->power);
+	debugfs_create_ulong("cost", 0444, d, &cs->cost);
 }
 
-#define pd_attr(_name) em_pd_##_name##_attr
-#define define_pd_attr(_name) static struct em_pd_attr pd_attr(_name) = \
-		__ATTR(_name, 0444, show_##_name, NULL)
-
-define_pd_attr(power);
-define_pd_attr(frequency);
-define_pd_attr(cost);
-define_pd_attr(cpus);
-
-static struct attribute *em_pd_default_attrs[] = {
-	&pd_attr(power).attr,
-	&pd_attr(frequency).attr,
-	&pd_attr(cost).attr,
-	&pd_attr(cpus).attr,
-	NULL
-};
-
-#define to_pd(k) container_of(k, struct em_perf_domain, kobj)
-#define to_pd_attr(a) container_of(a, struct em_pd_attr, attr)
-
-static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
+static int em_debug_cpus_show(struct seq_file *s, void *unused)
 {
-	struct em_perf_domain *pd = to_pd(kobj);
-	struct em_pd_attr *pd_attr = to_pd_attr(attr);
-	ssize_t ret;
+	seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private)));
 
-	ret = pd_attr->show(pd, buf);
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
 
-	return ret;
+static void em_debug_create_pd(struct em_perf_domain *pd, int cpu)
+{
+	struct dentry *d;
+	char name[8];
+	int i;
+
+	snprintf(name, sizeof(name), "pd%d", cpu);
+
+	/* Create the directory of the performance domain */
+	d = debugfs_create_dir(name, rootdir);
+
+	debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops);
+
+	/* Create a sub-directory for each capacity state */
+	for (i = 0; i < pd->nr_cap_states; i++)
+		em_debug_create_cs(&pd->table[i], d);
 }
 
-static const struct sysfs_ops em_pd_sysfs_ops = {
-	.show	= show,
-};
+static int __init em_debug_init(void)
+{
+	/* Create /sys/kernel/debug/energy_model directory */
+	rootdir = debugfs_create_dir("energy_model", NULL);
 
-static struct kobj_type ktype_em_pd = {
-	.sysfs_ops	= &em_pd_sysfs_ops,
-	.default_attrs	= em_pd_default_attrs,
-};
-
+	return 0;
+}
+core_initcall(em_debug_init);
+#else /* CONFIG_DEBUG_FS */
+static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
+#endif
 static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
 						struct em_data_callback *cb)
 {
@@ -178,10 +157,7 @@
 	pd->nr_cap_states = nr_states;
 	cpumask_copy(to_cpumask(pd->cpus), span);
 
-	ret = kobject_init_and_add(&pd->kobj, &ktype_em_pd, em_kobject,
-				   "pd%u", cpu);
-	if (ret)
-		pr_err("pd%d: failed kobject_init_and_add(): %d\n", cpu, ret);
+	em_debug_create_pd(pd, cpu);
 
 	return pd;
 
@@ -236,15 +212,6 @@
 	 */
 	mutex_lock(&em_pd_mutex);
 
-	if (!em_kobject) {
-		em_kobject = kobject_create_and_add("energy_model",
-						&cpu_subsys.dev_root->kobj);
-		if (!em_kobject) {
-			ret = -ENODEV;
-			goto unlock;
-		}
-	}
-
 	for_each_cpu(cpu, span) {
 		/* Make sure we don't register again an existing domain. */
 		if (READ_ONCE(per_cpu(em_data, cpu))) {
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 3d913f4..cfd3f4c 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -45,6 +45,7 @@
 #include <linux/seq_file.h>
 #include <linux/irq.h>
 #include <linux/irqdesc.h>
+#include <linux/cpumask.h>
 
 #include <linux/uaccess.h>
 #include <linux/export.h>
@@ -447,6 +448,9 @@
 
 int pm_qos_request_for_cpu(int pm_qos_class, int cpu)
 {
+	if (cpu_isolated(cpu))
+		return INT_MAX;
+
 	return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu];
 }
 EXPORT_SYMBOL(pm_qos_request_for_cpu);
@@ -469,6 +473,7 @@
 	val = c->default_value;
 
 	for_each_cpu(cpu, mask) {
+
 		switch (c->type) {
 		case PM_QOS_MIN:
 			if (c->target_per_cpu[cpu] < val)
@@ -728,12 +733,22 @@
 		/* silent return to keep pcm code cleaner */
 
 	if (!pm_qos_request_active(req)) {
-		WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+		WARN(1, "%s called for unknown object\n", __func__);
 		return;
 	}
 
 	cancel_delayed_work_sync(&req->work);
 
+#ifdef CONFIG_SMP
+	if (req->type == PM_QOS_REQ_AFFINE_IRQ) {
+		int ret = 0;
+		/* Get the current affinity */
+		ret = irq_set_affinity_notifier(req->irq, NULL);
+		if (ret)
+			WARN(1, "IRQ affinity notify set failed\n");
+	}
+#endif
+
 	trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
 	pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
 			     &req->node, PM_QOS_REMOVE_REQ,
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 6c9866a..1ff17e2 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -448,10 +448,12 @@
 
 	lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
 	WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
+	spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
 	rcu_segcblist_advance(&sdp->srcu_cblist,
 			      rcu_seq_current(&sp->srcu_gp_seq));
 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
 				       rcu_seq_snap(&sp->srcu_gp_seq));
+	spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
 	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
 	rcu_seq_start(&sp->srcu_gp_seq);
 	state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
diff --git a/kernel/relay.c b/kernel/relay.c
index 04f2486..9e0f523 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -428,6 +428,8 @@
 	dentry = chan->cb->create_buf_file(tmpname, chan->parent,
 					   S_IRUSR, buf,
 					   &chan->is_global);
+	if (IS_ERR(dentry))
+		dentry = NULL;
 
 	kfree(tmpname);
 
@@ -461,7 +463,7 @@
 		dentry = chan->cb->create_buf_file(NULL, NULL,
 						   S_IRUSR, buf,
 						   &chan->is_global);
-		if (WARN_ON(dentry))
+		if (IS_ERR_OR_NULL(dentry))
 			goto free_buf;
 	}
 
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 846af4d..9512fd7 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -31,3 +31,4 @@
 obj-$(CONFIG_MEMBARRIER) += membarrier.o
 obj-$(CONFIG_CPU_ISOLATION) += isolation.o
 obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
+obj-$(CONFIG_PSI) += psi.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 550bb21..fcfe2b3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -408,10 +408,11 @@
 	 * its already queued (either by us or someone else) and will get the
 	 * wakeup due to that.
 	 *
-	 * This cmpxchg() executes a full barrier, which pairs with the full
-	 * barrier executed by the wakeup in wake_up_q().
+	 * In order to ensure that a pending wakeup will observe our pending
+	 * state, even in the failed case, an explicit smp_mb() must be used.
 	 */
-	if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
+	smp_mb__before_atomic();
+	if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
 		return;
 
 	head->count++;
@@ -729,8 +730,10 @@
 	if (!(flags & ENQUEUE_NOCLOCK))
 		update_rq_clock(rq);
 
-	if (!(flags & ENQUEUE_RESTORE))
+	if (!(flags & ENQUEUE_RESTORE)) {
 		sched_info_queued(rq, p);
+		psi_enqueue(p, flags & ENQUEUE_WAKEUP);
+	}
 
 	p->sched_class->enqueue_task(rq, p, flags);
 	walt_update_last_enqueue(p);
@@ -742,8 +745,10 @@
 	if (!(flags & DEQUEUE_NOCLOCK))
 		update_rq_clock(rq);
 
-	if (!(flags & DEQUEUE_SAVE))
+	if (!(flags & DEQUEUE_SAVE)) {
 		sched_info_dequeued(rq, p);
+		psi_dequeue(p, flags & DEQUEUE_SLEEP);
+	}
 
 	p->sched_class->dequeue_task(rq, p, flags);
 #ifdef CONFIG_SCHED_WALT
@@ -2124,6 +2129,7 @@
 			     sibling_count_hint);
 	if (task_cpu(p) != cpu) {
 		wake_flags |= WF_MIGRATED;
+		psi_ttwu_dequeue(p);
 		set_task_cpu(p, cpu);
 	}
 
@@ -3169,6 +3175,7 @@
 	curr->sched_class->task_tick(rq, curr, 0);
 	cpu_load_update_active(rq);
 	calc_global_load_tick(rq);
+	psi_task_tick(rq);
 
 	early_notif = early_detection_notify(rq, wallclock);
 	if (early_notif)
@@ -5195,9 +5202,7 @@
 	struct rq_flags rf;
 	struct rq *rq;
 
-	local_irq_disable();
-	rq = this_rq();
-	rq_lock(rq, &rf);
+	rq = this_rq_lock_irq(&rf);
 
 	schedstat_inc(rq->yld_count);
 	current->sched_class->yield_task(rq);
@@ -5983,8 +5988,6 @@
 	struct rq *rq = cpu_rq(cpu);
 	struct rq_flags rf;
 
-	watchdog_disable(cpu);
-
 	local_irq_disable();
 
 	irq_migrate_all_off_this_cpu();
@@ -6131,7 +6134,10 @@
 	smp_call_function_any(&avail_cpus, hrtimer_quiesce_cpu, &cpu, 1);
 	smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1);
 
+	watchdog_disable(cpu);
+	irq_lock_sparse();
 	stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0);
+	irq_unlock_sparse();
 
 	calc_load_migrate(rq);
 	update_max_interval();
@@ -6174,7 +6180,7 @@
 		stop_cpus(cpumask_of(cpu), do_unisolation_work_cpu_stop, 0);
 
 		/* Kick CPU to immediately do load balancing */
-		if (!atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+		if (!atomic_fetch_or(NOHZ_KICK_MASK, nohz_flags(cpu)))
 			smp_send_reschedule(cpu);
 	}
 
@@ -6633,6 +6639,8 @@
 
 	init_schedstats();
 
+	psi_init();
+
 	scheduler_running = 1;
 }
 
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 5031645..46c1b5e 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -124,7 +124,8 @@
 	const struct sched_dl_entity *dl_se = &p->dl;
 
 	if (later_mask &&
-	    cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
+	    cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed) &&
+	    cpumask_and(later_mask, later_mask, cpu_active_mask)) {
 		return 1;
 	} else {
 		int best_cpu = cpudl_maximum(cp);
@@ -132,6 +133,7 @@
 		WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
 
 		if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) &&
+		    cpumask_test_cpu(best_cpu, cpu_active_mask) &&
 		    dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
 			if (later_mask)
 				cpumask_set_cpu(best_cpu, later_mask);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index d9e7f8d..3a84a1a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -127,7 +127,7 @@
 static inline bool use_pelt(void)
 {
 #ifdef CONFIG_SCHED_WALT
-	return (!sysctl_sched_use_walt_cpu_util || walt_disabled);
+	return false;
 #else
 	return true;
 #endif
@@ -180,7 +180,7 @@
 {
 	u64 delta_ns, cycles;
 
-	if (unlikely(!sysctl_sched_use_walt_cpu_util))
+	if (use_pelt())
 		return;
 
 	/* Track cycles in current window */
@@ -198,7 +198,7 @@
 	u64 last_ws = sg_policy->last_ws;
 	unsigned int avg_freq;
 
-	if (unlikely(!sysctl_sched_use_walt_cpu_util))
+	if (use_pelt())
 		return;
 
 	BUG_ON(curr_ws < last_ws);
@@ -236,7 +236,7 @@
 
 	policy->cur = next_freq;
 	for_each_cpu(cpu, policy->cpus)
-		trace_cpu_frequency(next_freq, smp_processor_id());
+		trace_cpu_frequency(next_freq, cpu);
 }
 
 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
@@ -317,8 +317,8 @@
 unsigned long schedutil_freq_util(int cpu, unsigned long util,
 				  unsigned long max, enum schedutil_type type)
 {
+	unsigned long dl_util, irq;
 	struct rq *rq = cpu_rq(cpu);
-	unsigned long irq;
 
 	if (sched_feat(SUGOV_RT_MAX_FREQ) && type == FREQUENCY_UTIL &&
 						rt_rq_is_runnable(&rq->rt))
@@ -339,29 +339,26 @@
 	 * to be delt with. The exact way of doing things depend on the calling
 	 * context.
 	 */
-	if (type == FREQUENCY_UTIL) {
-		/*
-		 * For frequency selection we do not make cpu_util_dl() a
-		 * permanent part of this sum because we want to use
-		 * cpu_bw_dl() later on, but we need to check if the
-		 * CFS+RT+DL sum is saturated (ie. no idle time) such
-		 * that we select f_max when there is no idle time.
-		 *
-		 * NOTE: numerical errors or stop class might cause us
-		 * to not quite hit saturation when we should --
-		 * something for later.
-		 */
-		if ((util + cpu_util_dl(rq)) >= max)
-			return max;
-	} else {
-		/*
-		 * OTOH, for energy computation we need the estimated
-		 * running time, so include util_dl and ignore dl_bw.
-		 */
-		util += cpu_util_dl(rq);
-		if (util >= max)
-			return max;
-	}
+	dl_util = cpu_util_dl(rq);
+
+	/*
+	 * For frequency selection we do not make cpu_util_dl() a permanent part
+	 * of this sum because we want to use cpu_bw_dl() later on, but we need
+	 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
+	 * that we select f_max when there is no idle time.
+	 *
+	 * NOTE: numerical errors or stop class might cause us to not quite hit
+	 * saturation when we should -- something for later.
+	 */
+	if (util + dl_util >= max)
+		return max;
+
+	/*
+	 * OTOH, for energy computation we need the estimated running time, so
+	 * include util_dl and ignore dl_bw.
+	 */
+	if (type == ENERGY_UTIL)
+		util += dl_util;
 
 	/*
 	 * There is still idle time; further improve the number by using the
@@ -375,21 +372,18 @@
 	util = scale_irq_capacity(util, irq, max);
 	util += irq;
 
-	if (type == FREQUENCY_UTIL) {
-		/*
-		 * Bandwidth required by DEADLINE must always be granted
-		 * while, for FAIR and RT, we use blocked utilization of
-		 * IDLE CPUs as a mechanism to gracefully reduce the
-		 * frequency when no tasks show up for longer periods of
-		 * time.
-		 *
-		 * Ideally we would like to set bw_dl as min/guaranteed
-		 * freq and util + bw_dl as requested freq. However,
-		 * cpufreq is not yet ready for such an interface. So,
-		 * we only do the latter for now.
-		 */
+	/*
+	 * Bandwidth required by DEADLINE must always be granted while, for
+	 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
+	 * to gracefully reduce the frequency when no tasks show up for longer
+	 * periods of time.
+	 *
+	 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
+	 * bw_dl as requested freq. However, cpufreq is not yet ready for such
+	 * an interface. So, we only do the latter for now.
+	 */
+	if (type == FREQUENCY_UTIL)
 		util += cpu_bw_dl(rq);
-	}
 
 	return min(max, util);
 }
@@ -577,7 +571,7 @@
 	unsigned long cpu_util = sg_cpu->util;
 	bool is_hiload;
 
-	if (unlikely(!sysctl_sched_use_walt_cpu_util))
+	if (use_pelt())
 		return;
 
 	is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
@@ -974,7 +968,7 @@
 
 /********************** cpufreq governor interface *********************/
 
-struct cpufreq_governor schedutil_gov;
+static struct cpufreq_governor schedutil_gov;
 
 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
 {
@@ -997,20 +991,7 @@
 static int sugov_kthread_create(struct sugov_policy *sg_policy)
 {
 	struct task_struct *thread;
-	struct sched_attr attr = {
-		.size		= sizeof(struct sched_attr),
-		.sched_policy	= SCHED_DEADLINE,
-		.sched_flags	= SCHED_FLAG_SUGOV,
-		.sched_nice	= 0,
-		.sched_priority	= 0,
-		/*
-		 * Fake (unused) bandwidth; workaround to "fix"
-		 * priority inheritance.
-		 */
-		.sched_runtime	=  1000000,
-		.sched_deadline = 10000000,
-		.sched_period	= 10000000,
-	};
+	struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
 	struct cpufreq_policy *policy = sg_policy->policy;
 	int ret;
 
@@ -1028,10 +1009,10 @@
 		return PTR_ERR(thread);
 	}
 
-	ret = sched_setattr_nocheck(thread, &attr);
+	ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
 	if (ret) {
 		kthread_stop(thread);
-		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+		pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
 		return ret;
 	}
 
@@ -1291,7 +1272,7 @@
 	sg_policy->need_freq_update = true;
 }
 
-struct cpufreq_governor schedutil_gov = {
+static struct cpufreq_governor schedutil_gov = {
 	.name			= "schedutil",
 	.owner			= THIS_MODULE,
 	.dynamic_switching	= true,
@@ -1314,36 +1295,3 @@
 	return cpufreq_register_governor(&schedutil_gov);
 }
 fs_initcall(sugov_register);
-
-#ifdef CONFIG_ENERGY_MODEL
-extern bool sched_energy_update;
-extern struct mutex sched_energy_mutex;
-
-static void rebuild_sd_workfn(struct work_struct *work)
-{
-	mutex_lock(&sched_energy_mutex);
-	sched_energy_update = true;
-	rebuild_sched_domains();
-	sched_energy_update = false;
-	mutex_unlock(&sched_energy_mutex);
-}
-static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
-
-/*
- * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
- * on governor changes to make sure the scheduler knows about it.
- */
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-				  struct cpufreq_governor *old_gov)
-{
-	if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
-		/*
-		 * When called from the cpufreq_register_driver() path, the
-		 * cpu_hotplug_lock is already held, so use a work item to
-		 * avoid nested locking in rebuild_sched_domains().
-		 */
-		schedule_work(&rebuild_sd_work);
-	}
-
-}
-#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fdf7436..6a45c8a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -135,8 +135,6 @@
 DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost);
 
 #ifdef CONFIG_SCHED_WALT
-unsigned int sysctl_sched_use_walt_cpu_util = 1;
-unsigned int sysctl_sched_use_walt_task_util = 1;
 __read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload =
 						(10 * NSEC_PER_MSEC);
 #endif
@@ -424,10 +422,9 @@
 	}
 }
 
-/* Iterate thr' all leaf cfs_rq's on a runqueue */
-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
-	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
-				 leaf_cfs_rq_list)
+/* Iterate through all leaf cfs_rq's on a runqueue: */
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 
 /* Do the two (enqueued) entities belong to the same group ? */
 static inline struct cfs_rq *
@@ -519,8 +516,8 @@
 {
 }
 
-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)	\
-		for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
+#define for_each_leaf_cfs_rq(rq, cfs_rq)	\
+		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
 
 static inline struct sched_entity *parent_entity(struct sched_entity *se)
 {
@@ -3709,8 +3706,7 @@
 static inline unsigned long task_util_est(struct task_struct *p)
 {
 #ifdef CONFIG_SCHED_WALT
-	if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
-		return p->ravg.demand_scaled;
+	return p->ravg.demand_scaled;
 #endif
 	return max(task_util(p), _task_util_est(p));
 }
@@ -3846,8 +3842,9 @@
 	if (capacity == max_capacity)
 		return true;
 
-	if (task_boost_policy(p) == SCHED_BOOST_ON_BIG
-				&& is_min_capacity_cpu(cpu))
+	if ((task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
+			schedtune_task_boost(p) > 0) &&
+			is_min_capacity_cpu(cpu))
 		return false;
 
 	return task_fits_capacity(p, capacity, cpu);
@@ -6295,6 +6292,7 @@
 
 #ifdef CONFIG_SCHED_SMT
 DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+EXPORT_SYMBOL_GPL(sched_smt_present);
 
 static inline void set_idle_cores(int cpu, int val)
 {
@@ -6553,8 +6551,7 @@
 	 * utilization from cpu utilization. Instead just use
 	 * cpu_util for this case.
 	 */
-	if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util) &&
-						p->state == TASK_WAKING)
+	if (p->state == TASK_WAKING)
 		return cpu_util(cpu);
 #endif
 
@@ -7436,8 +7433,7 @@
 	}
 
 #ifdef CONFIG_SCHED_WALT
-	if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
-						 p->state == TASK_WAKING)
+	if (p->state == TASK_WAKING)
 		delta = task_util(p);
 #endif
 	if (task_placement_boost_enabled(p) || need_idle || boosted ||
@@ -8400,11 +8396,13 @@
 
 	p->on_rq = TASK_ON_RQ_MIGRATING;
 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
+	lockdep_off();
 	double_lock_balance(env->src_rq, env->dst_rq);
 	if (!(env->src_rq->clock_update_flags & RQCF_UPDATED))
 		update_rq_clock(env->src_rq);
 	set_task_cpu(p, env->dst_cpu);
 	double_unlock_balance(env->src_rq, env->dst_rq);
+	lockdep_on();
 }
 
 /*
@@ -8638,27 +8636,10 @@
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-{
-	if (cfs_rq->load.weight)
-		return false;
-
-	if (cfs_rq->avg.load_sum)
-		return false;
-
-	if (cfs_rq->avg.util_sum)
-		return false;
-
-	if (cfs_rq->avg.runnable_load_sum)
-		return false;
-
-	return true;
-}
-
 static void update_blocked_averages(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
-	struct cfs_rq *cfs_rq, *pos;
+	struct cfs_rq *cfs_rq;
 	const struct sched_class *curr_class;
 	struct rq_flags rf;
 	bool done = true;
@@ -8670,7 +8651,7 @@
 	 * Iterates the task_group tree in a bottom up fashion, see
 	 * list_add_leaf_cfs_rq() for details.
 	 */
-	for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
+	for_each_leaf_cfs_rq(rq, cfs_rq) {
 		struct sched_entity *se;
 
 		/* throttled entities do not contribute to load */
@@ -8685,13 +8666,6 @@
 		if (se && !skip_blocked_update(se))
 			update_load_avg(cfs_rq_of(se), se, 0);
 
-		/*
-		 * There can be a lot of idle CPU cgroups.  Don't let fully
-		 * decayed cfs_rqs linger on the list.
-		 */
-		if (cfs_rq_is_decayed(cfs_rq))
-			list_del_leaf_cfs_rq(cfs_rq);
-
 		/* Don't need periodic decay once load/util_avg are null */
 		if (cfs_rq_has_blocked(cfs_rq))
 			done = false;
@@ -11899,10 +11873,10 @@
 #ifdef CONFIG_SCHED_DEBUG
 void print_cfs_stats(struct seq_file *m, int cpu)
 {
-	struct cfs_rq *cfs_rq, *pos;
+	struct cfs_rq *cfs_rq;
 
 	rcu_read_lock();
-	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
+	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
 		print_cfs_rq(m, cpu, cfs_rq);
 	rcu_read_unlock();
 }
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index a171c12..28a5165 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -91,19 +91,73 @@
 	return delta;
 }
 
-/*
- * a1 = a0 * e + a * (1 - e)
+/**
+ * fixed_power_int - compute: x^n, in O(log n) time
+ *
+ * @x:         base of the power
+ * @frac_bits: fractional bits of @x
+ * @n:         power to raise @x to.
+ *
+ * By exploiting the relation between the definition of the natural power
+ * function: x^n := x*x*...*x (x multiplied by itself for n times), and
+ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
+ * (where: n_i \elem {0, 1}, the binary vector representing n),
+ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
+ * of course trivially computable in O(log_2 n), the length of our binary
+ * vector.
  */
 static unsigned long
-calc_load(unsigned long load, unsigned long exp, unsigned long active)
+fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
 {
-	unsigned long newload;
+	unsigned long result = 1UL << frac_bits;
 
-	newload = load * exp + active * (FIXED_1 - exp);
-	if (active >= load)
-		newload += FIXED_1-1;
+	if (n) {
+		for (;;) {
+			if (n & 1) {
+				result *= x;
+				result += 1UL << (frac_bits - 1);
+				result >>= frac_bits;
+			}
+			n >>= 1;
+			if (!n)
+				break;
+			x *= x;
+			x += 1UL << (frac_bits - 1);
+			x >>= frac_bits;
+		}
+	}
 
-	return newload / FIXED_1;
+	return result;
+}
+
+/*
+ * a1 = a0 * e + a * (1 - e)
+ *
+ * a2 = a1 * e + a * (1 - e)
+ *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
+ *    = a0 * e^2 + a * (1 - e) * (1 + e)
+ *
+ * a3 = a2 * e + a * (1 - e)
+ *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
+ *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
+ *
+ *  ...
+ *
+ * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
+ *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
+ *    = a0 * e^n + a * (1 - e^n)
+ *
+ * [1] application of the geometric series:
+ *
+ *              n         1 - x^(n+1)
+ *     S_n := \Sum x^i = -------------
+ *             i=0          1 - x
+ */
+unsigned long
+calc_load_n(unsigned long load, unsigned long exp,
+	    unsigned long active, unsigned int n)
+{
+	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
@@ -225,75 +279,6 @@
 	return delta;
 }
 
-/**
- * fixed_power_int - compute: x^n, in O(log n) time
- *
- * @x:         base of the power
- * @frac_bits: fractional bits of @x
- * @n:         power to raise @x to.
- *
- * By exploiting the relation between the definition of the natural power
- * function: x^n := x*x*...*x (x multiplied by itself for n times), and
- * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
- * (where: n_i \elem {0, 1}, the binary vector representing n),
- * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
- * of course trivially computable in O(log_2 n), the length of our binary
- * vector.
- */
-static unsigned long
-fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
-{
-	unsigned long result = 1UL << frac_bits;
-
-	if (n) {
-		for (;;) {
-			if (n & 1) {
-				result *= x;
-				result += 1UL << (frac_bits - 1);
-				result >>= frac_bits;
-			}
-			n >>= 1;
-			if (!n)
-				break;
-			x *= x;
-			x += 1UL << (frac_bits - 1);
-			x >>= frac_bits;
-		}
-	}
-
-	return result;
-}
-
-/*
- * a1 = a0 * e + a * (1 - e)
- *
- * a2 = a1 * e + a * (1 - e)
- *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
- *    = a0 * e^2 + a * (1 - e) * (1 + e)
- *
- * a3 = a2 * e + a * (1 - e)
- *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
- *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
- *
- *  ...
- *
- * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
- *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
- *    = a0 * e^n + a * (1 - e^n)
- *
- * [1] application of the geometric series:
- *
- *              n         1 - x^(n+1)
- *     S_n := \Sum x^i = -------------
- *             i=0          1 - x
- */
-static unsigned long
-calc_load_n(unsigned long load, unsigned long exp,
-	    unsigned long active, unsigned int n)
-{
-	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
-}
-
 /*
  * NO_HZ can leave us missing all per-CPU ticks calling
  * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
new file mode 100644
index 0000000..e88918e
--- /dev/null
+++ b/kernel/sched/psi.c
@@ -0,0 +1,1280 @@
+/*
+ * Pressure stall information for CPU, memory and IO
+ *
+ * Copyright (c) 2018 Facebook, Inc.
+ * Author: Johannes Weiner <hannes@cmpxchg.org>
+ *
+ * Polling support by Suren Baghdasaryan <surenb@google.com>
+ * Copyright (c) 2018 Google, Inc.
+ *
+ * When CPU, memory and IO are contended, tasks experience delays that
+ * reduce throughput and introduce latencies into the workload. Memory
+ * and IO contention, in addition, can cause a full loss of forward
+ * progress in which the CPU goes idle.
+ *
+ * This code aggregates individual task delays into resource pressure
+ * metrics that indicate problems with both workload health and
+ * resource utilization.
+ *
+ *			Model
+ *
+ * The time in which a task can execute on a CPU is our baseline for
+ * productivity. Pressure expresses the amount of time in which this
+ * potential cannot be realized due to resource contention.
+ *
+ * This concept of productivity has two components: the workload and
+ * the CPU. To measure the impact of pressure on both, we define two
+ * contention states for a resource: SOME and FULL.
+ *
+ * In the SOME state of a given resource, one or more tasks are
+ * delayed on that resource. This affects the workload's ability to
+ * perform work, but the CPU may still be executing other tasks.
+ *
+ * In the FULL state of a given resource, all non-idle tasks are
+ * delayed on that resource such that nobody is advancing and the CPU
+ * goes idle. This leaves both workload and CPU unproductive.
+ *
+ * (Naturally, the FULL state doesn't exist for the CPU resource.)
+ *
+ *	SOME = nr_delayed_tasks != 0
+ *	FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
+ *
+ * The percentage of wallclock time spent in those compound stall
+ * states gives pressure numbers between 0 and 100 for each resource,
+ * where the SOME percentage indicates workload slowdowns and the FULL
+ * percentage indicates reduced CPU utilization:
+ *
+ *	%SOME = time(SOME) / period
+ *	%FULL = time(FULL) / period
+ *
+ *			Multiple CPUs
+ *
+ * The more tasks and available CPUs there are, the more work can be
+ * performed concurrently. This means that the potential that can go
+ * unrealized due to resource contention *also* scales with non-idle
+ * tasks and CPUs.
+ *
+ * Consider a scenario where 257 number crunching tasks are trying to
+ * run concurrently on 256 CPUs. If we simply aggregated the task
+ * states, we would have to conclude a CPU SOME pressure number of
+ * 100%, since *somebody* is waiting on a runqueue at all
+ * times. However, that is clearly not the amount of contention the
+ * workload is experiencing: only one out of 256 possible exceution
+ * threads will be contended at any given time, or about 0.4%.
+ *
+ * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
+ * given time *one* of the tasks is delayed due to a lack of memory.
+ * Again, looking purely at the task state would yield a memory FULL
+ * pressure number of 0%, since *somebody* is always making forward
+ * progress. But again this wouldn't capture the amount of execution
+ * potential lost, which is 1 out of 4 CPUs, or 25%.
+ *
+ * To calculate wasted potential (pressure) with multiple processors,
+ * we have to base our calculation on the number of non-idle tasks in
+ * conjunction with the number of available CPUs, which is the number
+ * of potential execution threads. SOME becomes then the proportion of
+ * delayed tasks to possibe threads, and FULL is the share of possible
+ * threads that are unproductive due to delays:
+ *
+ *	threads = min(nr_nonidle_tasks, nr_cpus)
+ *	   SOME = min(nr_delayed_tasks / threads, 1)
+ *	   FULL = (threads - min(nr_running_tasks, threads)) / threads
+ *
+ * For the 257 number crunchers on 256 CPUs, this yields:
+ *
+ *	threads = min(257, 256)
+ *	   SOME = min(1 / 256, 1)             = 0.4%
+ *	   FULL = (256 - min(257, 256)) / 256 = 0%
+ *
+ * For the 1 out of 4 memory-delayed tasks, this yields:
+ *
+ *	threads = min(4, 4)
+ *	   SOME = min(1 / 4, 1)               = 25%
+ *	   FULL = (4 - min(3, 4)) / 4         = 25%
+ *
+ * [ Substitute nr_cpus with 1, and you can see that it's a natural
+ *   extension of the single-CPU model. ]
+ *
+ *			Implementation
+ *
+ * To assess the precise time spent in each such state, we would have
+ * to freeze the system on task changes and start/stop the state
+ * clocks accordingly. Obviously that doesn't scale in practice.
+ *
+ * Because the scheduler aims to distribute the compute load evenly
+ * among the available CPUs, we can track task state locally to each
+ * CPU and, at much lower frequency, extrapolate the global state for
+ * the cumulative stall times and the running averages.
+ *
+ * For each runqueue, we track:
+ *
+ *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
+ *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
+ *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
+ *
+ * and then periodically aggregate:
+ *
+ *	tNONIDLE = sum(tNONIDLE[i])
+ *
+ *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
+ *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
+ *
+ *	   %SOME = tSOME / period
+ *	   %FULL = tFULL / period
+ *
+ * This gives us an approximation of pressure that is practical
+ * cost-wise, yet way more sensitive and accurate than periodic
+ * sampling of the aggregate task states would be.
+ */
+
+#include "../workqueue_internal.h"
+#include <linux/sched/loadavg.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/seqlock.h>
+#include <linux/uaccess.h>
+#include <linux/cgroup.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/psi.h>
+#include "sched.h"
+
+static int psi_bug __read_mostly;
+
+DEFINE_STATIC_KEY_FALSE(psi_disabled);
+
+#ifdef CONFIG_PSI_DEFAULT_DISABLED
+static bool psi_enable;
+#else
+static bool psi_enable = true;
+#endif
+static int __init setup_psi(char *str)
+{
+	return kstrtobool(str, &psi_enable) == 0;
+}
+__setup("psi=", setup_psi);
+
+/* Running averages - we need to be higher-res than loadavg */
+#define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
+#define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
+#define EXP_60s		1981		/* 1/exp(2s/60s) */
+#define EXP_300s	2034		/* 1/exp(2s/300s) */
+
+/* PSI trigger definitions */
+#define WINDOW_MIN_US 500000	/* Min window size is 500ms */
+#define WINDOW_MAX_US 10000000	/* Max window size is 10s */
+#define UPDATES_PER_WINDOW 10	/* 10 updates per window */
+
+/* Sampling frequency in nanoseconds */
+static u64 psi_period __read_mostly;
+
+/* System-level pressure and stall tracking */
+static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
+static struct psi_group psi_system = {
+	.pcpu = &system_group_pcpu,
+};
+
+static void psi_avgs_work(struct work_struct *work);
+
+static void group_init(struct psi_group *group)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
+	group->avg_next_update = sched_clock() + psi_period;
+	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
+	mutex_init(&group->avgs_lock);
+	/* Init trigger-related members */
+	atomic_set(&group->poll_scheduled, 0);
+	mutex_init(&group->trigger_lock);
+	INIT_LIST_HEAD(&group->triggers);
+	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
+	group->poll_states = 0;
+	group->poll_min_period = U32_MAX;
+	memset(group->polling_total, 0, sizeof(group->polling_total));
+	group->polling_next_update = ULLONG_MAX;
+	group->polling_until = 0;
+	rcu_assign_pointer(group->poll_kworker, NULL);
+}
+
+void __init psi_init(void)
+{
+	if (!psi_enable) {
+		static_branch_enable(&psi_disabled);
+		return;
+	}
+
+	psi_period = jiffies_to_nsecs(PSI_FREQ);
+	group_init(&psi_system);
+}
+
+static bool test_state(unsigned int *tasks, enum psi_states state)
+{
+	switch (state) {
+	case PSI_IO_SOME:
+		return tasks[NR_IOWAIT];
+	case PSI_IO_FULL:
+		return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
+	case PSI_MEM_SOME:
+		return tasks[NR_MEMSTALL];
+	case PSI_MEM_FULL:
+		return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
+	case PSI_CPU_SOME:
+		return tasks[NR_RUNNING] > 1;
+	case PSI_NONIDLE:
+		return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
+			tasks[NR_RUNNING];
+	default:
+		return false;
+	}
+}
+
+static void get_recent_times(struct psi_group *group, int cpu,
+			     enum psi_aggregators aggregator, u32 *times,
+			     u32 *pchanged_states)
+{
+	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
+	u64 now, state_start;
+	enum psi_states s;
+	unsigned int seq;
+	u32 state_mask;
+
+	*pchanged_states = 0;
+
+	/* Snapshot a coherent view of the CPU state */
+	do {
+		seq = read_seqcount_begin(&groupc->seq);
+		now = cpu_clock(cpu);
+		memcpy(times, groupc->times, sizeof(groupc->times));
+		state_mask = groupc->state_mask;
+		state_start = groupc->state_start;
+	} while (read_seqcount_retry(&groupc->seq, seq));
+
+	/* Calculate state time deltas against the previous snapshot */
+	for (s = 0; s < NR_PSI_STATES; s++) {
+		u32 delta;
+		/*
+		 * In addition to already concluded states, we also
+		 * incorporate currently active states on the CPU,
+		 * since states may last for many sampling periods.
+		 *
+		 * This way we keep our delta sampling buckets small
+		 * (u32) and our reported pressure close to what's
+		 * actually happening.
+		 */
+		if (state_mask & (1 << s))
+			times[s] += now - state_start;
+
+		delta = times[s] - groupc->times_prev[aggregator][s];
+		groupc->times_prev[aggregator][s] = times[s];
+
+		times[s] = delta;
+		if (delta)
+			*pchanged_states |= (1 << s);
+	}
+}
+
+static void calc_avgs(unsigned long avg[3], int missed_periods,
+		      u64 time, u64 period)
+{
+	unsigned long pct;
+
+	/* Fill in zeroes for periods of no activity */
+	if (missed_periods) {
+		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
+		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
+		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
+	}
+
+	/* Sample the most recent active period */
+	pct = div_u64(time * 100, period);
+	pct *= FIXED_1;
+	avg[0] = calc_load(avg[0], EXP_10s, pct);
+	avg[1] = calc_load(avg[1], EXP_60s, pct);
+	avg[2] = calc_load(avg[2], EXP_300s, pct);
+}
+
+static void collect_percpu_times(struct psi_group *group,
+				 enum psi_aggregators aggregator,
+				 u32 *pchanged_states)
+{
+	u64 deltas[NR_PSI_STATES - 1] = { 0, };
+	unsigned long nonidle_total = 0;
+	u32 changed_states = 0;
+	int cpu;
+	int s;
+
+	/*
+	 * Collect the per-cpu time buckets and average them into a
+	 * single time sample that is normalized to wallclock time.
+	 *
+	 * For averaging, each CPU is weighted by its non-idle time in
+	 * the sampling period. This eliminates artifacts from uneven
+	 * loading, or even entirely idle CPUs.
+	 */
+	for_each_possible_cpu(cpu) {
+		u32 times[NR_PSI_STATES];
+		u32 nonidle;
+		u32 cpu_changed_states;
+
+		get_recent_times(group, cpu, aggregator, times,
+				&cpu_changed_states);
+		changed_states |= cpu_changed_states;
+
+		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
+		nonidle_total += nonidle;
+
+		for (s = 0; s < PSI_NONIDLE; s++)
+			deltas[s] += (u64)times[s] * nonidle;
+	}
+
+	/*
+	 * Integrate the sample into the running statistics that are
+	 * reported to userspace: the cumulative stall times and the
+	 * decaying averages.
+	 *
+	 * Pressure percentages are sampled at PSI_FREQ. We might be
+	 * called more often when the user polls more frequently than
+	 * that; we might be called less often when there is no task
+	 * activity, thus no data, and clock ticks are sporadic. The
+	 * below handles both.
+	 */
+
+	/* total= */
+	for (s = 0; s < NR_PSI_STATES - 1; s++)
+		group->total[aggregator][s] +=
+				div_u64(deltas[s], max(nonidle_total, 1UL));
+
+	if (pchanged_states)
+		*pchanged_states = changed_states;
+}
+
+static u64 update_averages(struct psi_group *group, u64 now)
+{
+	unsigned long missed_periods = 0;
+	u64 expires, period;
+	u64 avg_next_update;
+	int s;
+
+	/* avgX= */
+	expires = group->avg_next_update;
+	if (now - expires >= psi_period)
+		missed_periods = div_u64(now - expires, psi_period);
+
+	/*
+	 * The periodic clock tick can get delayed for various
+	 * reasons, especially on loaded systems. To avoid clock
+	 * drift, we schedule the clock in fixed psi_period intervals.
+	 * But the deltas we sample out of the per-cpu buckets above
+	 * are based on the actual time elapsing between clock ticks.
+	 */
+	avg_next_update = expires + ((1 + missed_periods) * psi_period);
+	period = now - (group->avg_last_update + (missed_periods * psi_period));
+	group->avg_last_update = now;
+
+	for (s = 0; s < NR_PSI_STATES - 1; s++) {
+		u32 sample;
+
+		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
+		/*
+		 * Due to the lockless sampling of the time buckets,
+		 * recorded time deltas can slip into the next period,
+		 * which under full pressure can result in samples in
+		 * excess of the period length.
+		 *
+		 * We don't want to report non-sensical pressures in
+		 * excess of 100%, nor do we want to drop such events
+		 * on the floor. Instead we punt any overage into the
+		 * future until pressure subsides. By doing this we
+		 * don't underreport the occurring pressure curve, we
+		 * just report it delayed by one period length.
+		 *
+		 * The error isn't cumulative. As soon as another
+		 * delta slips from a period P to P+1, by definition
+		 * it frees up its time T in P.
+		 */
+		if (sample > period)
+			sample = period;
+		group->avg_total[s] += sample;
+		calc_avgs(group->avg[s], missed_periods, sample, period);
+	}
+
+	return avg_next_update;
+}
+
+static void psi_avgs_work(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct psi_group *group;
+	u32 changed_states;
+	bool nonidle;
+	u64 now;
+
+	dwork = to_delayed_work(work);
+	group = container_of(dwork, struct psi_group, avgs_work);
+
+	mutex_lock(&group->avgs_lock);
+
+	now = sched_clock();
+
+	collect_percpu_times(group, PSI_AVGS, &changed_states);
+	nonidle = changed_states & (1 << PSI_NONIDLE);
+	/*
+	 * If there is task activity, periodically fold the per-cpu
+	 * times and feed samples into the running averages. If things
+	 * are idle and there is no data to process, stop the clock.
+	 * Once restarted, we'll catch up the running averages in one
+	 * go - see calc_avgs() and missed_periods.
+	 */
+	if (now >= group->avg_next_update)
+		group->avg_next_update = update_averages(group, now);
+
+	if (nonidle) {
+		schedule_delayed_work(dwork, nsecs_to_jiffies(
+				group->avg_next_update - now) + 1);
+	}
+
+	mutex_unlock(&group->avgs_lock);
+}
+
+/* Trigger tracking window manupulations */
+static void window_reset(struct psi_window *win, u64 now, u64 value,
+			 u64 prev_growth)
+{
+	win->start_time = now;
+	win->start_value = value;
+	win->prev_growth = prev_growth;
+}
+
+/*
+ * PSI growth tracking window update and growth calculation routine.
+ *
+ * This approximates a sliding tracking window by interpolating
+ * partially elapsed windows using historical growth data from the
+ * previous intervals. This minimizes memory requirements (by not storing
+ * all the intermediate values in the previous window) and simplifies
+ * the calculations. It works well because PSI signal changes only in
+ * positive direction and over relatively small window sizes the growth
+ * is close to linear.
+ */
+static u64 window_update(struct psi_window *win, u64 now, u64 value)
+{
+	u64 elapsed;
+	u64 growth;
+
+	elapsed = now - win->start_time;
+	growth = value - win->start_value;
+	/*
+	 * After each tracking window passes win->start_value and
+	 * win->start_time get reset and win->prev_growth stores
+	 * the average per-window growth of the previous window.
+	 * win->prev_growth is then used to interpolate additional
+	 * growth from the previous window assuming it was linear.
+	 */
+	if (elapsed > win->size)
+		window_reset(win, now, value, growth);
+	else {
+		u32 remaining;
+
+		remaining = win->size - elapsed;
+		growth += div_u64(win->prev_growth * remaining, win->size);
+	}
+
+	return growth;
+}
+
+static void init_triggers(struct psi_group *group, u64 now)
+{
+	struct psi_trigger *t;
+
+	list_for_each_entry(t, &group->triggers, node)
+		window_reset(&t->win, now,
+				group->total[PSI_POLL][t->state], 0);
+	memcpy(group->polling_total, group->total[PSI_POLL],
+		   sizeof(group->polling_total));
+	group->polling_next_update = now + group->poll_min_period;
+}
+
+static u64 update_triggers(struct psi_group *group, u64 now)
+{
+	struct psi_trigger *t;
+	bool new_stall = false;
+	u64 *total = group->total[PSI_POLL];
+
+	/*
+	 * On subsequent updates, calculate growth deltas and let
+	 * watchers know when their specified thresholds are exceeded.
+	 */
+	list_for_each_entry(t, &group->triggers, node) {
+		u64 growth;
+
+		/* Check for stall activity */
+		if (group->polling_total[t->state] == total[t->state])
+			continue;
+
+		/*
+		 * Multiple triggers might be looking at the same state,
+		 * remember to update group->polling_total[] once we've
+		 * been through all of them. Also remember to extend the
+		 * polling time if we see new stall activity.
+		 */
+		new_stall = true;
+
+		/* Calculate growth since last update */
+		growth = window_update(&t->win, now, total[t->state]);
+		if (growth < t->threshold)
+			continue;
+
+		/* Limit event signaling to once per window */
+		if (now < t->last_event_time + t->win.size)
+			continue;
+
+		/* Generate an event */
+		if (cmpxchg(&t->event, 0, 1) == 0)
+			wake_up_interruptible(&t->event_wait);
+		t->last_event_time = now;
+	}
+
+	if (new_stall)
+		memcpy(group->polling_total, total,
+				sizeof(group->polling_total));
+
+	return now + group->poll_min_period;
+}
+
+/*
+ * Schedule polling if it's not already scheduled. It's safe to call even from
+ * hotpath because even though kthread_queue_delayed_work takes worker->lock
+ * spinlock that spinlock is never contended due to poll_scheduled atomic
+ * preventing such competition.
+ */
+static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
+{
+	struct kthread_worker *kworker;
+
+	/* Do not reschedule if already scheduled */
+	if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0)
+		return;
+
+	rcu_read_lock();
+
+	kworker = rcu_dereference(group->poll_kworker);
+	/*
+	 * kworker might be NULL in case psi_trigger_destroy races with
+	 * psi_task_change (hotpath) which can't use locks
+	 */
+	if (likely(kworker))
+		kthread_queue_delayed_work(kworker, &group->poll_work, delay);
+	else
+		atomic_set(&group->poll_scheduled, 0);
+
+	rcu_read_unlock();
+}
+
+static void psi_poll_work(struct kthread_work *work)
+{
+	struct kthread_delayed_work *dwork;
+	struct psi_group *group;
+	u32 changed_states;
+	u64 now;
+
+	dwork = container_of(work, struct kthread_delayed_work, work);
+	group = container_of(dwork, struct psi_group, poll_work);
+
+	atomic_set(&group->poll_scheduled, 0);
+
+	mutex_lock(&group->trigger_lock);
+
+	now = sched_clock();
+
+	collect_percpu_times(group, PSI_POLL, &changed_states);
+
+	if (changed_states & group->poll_states) {
+		/* Initialize trigger windows when entering polling mode */
+		if (now > group->polling_until)
+			init_triggers(group, now);
+
+		/*
+		 * Keep the monitor active for at least the duration of the
+		 * minimum tracking window as long as monitor states are
+		 * changing.
+		 */
+		group->polling_until = now +
+			group->poll_min_period * UPDATES_PER_WINDOW;
+	}
+
+	if (now > group->polling_until) {
+		group->polling_next_update = ULLONG_MAX;
+		goto out;
+	}
+
+	if (now >= group->polling_next_update)
+		group->polling_next_update = update_triggers(group, now);
+
+	psi_schedule_poll_work(group,
+		nsecs_to_jiffies(group->polling_next_update - now) + 1);
+
+out:
+	mutex_unlock(&group->trigger_lock);
+}
+
+static void record_times(struct psi_group_cpu *groupc, int cpu,
+			 bool memstall_tick)
+{
+	u32 delta;
+	u64 now;
+
+	now = cpu_clock(cpu);
+	delta = now - groupc->state_start;
+	groupc->state_start = now;
+
+	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
+		groupc->times[PSI_IO_SOME] += delta;
+		if (groupc->state_mask & (1 << PSI_IO_FULL))
+			groupc->times[PSI_IO_FULL] += delta;
+	}
+
+	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
+		groupc->times[PSI_MEM_SOME] += delta;
+		if (groupc->state_mask & (1 << PSI_MEM_FULL))
+			groupc->times[PSI_MEM_FULL] += delta;
+		else if (memstall_tick) {
+			u32 sample;
+			/*
+			 * Since we care about lost potential, a
+			 * memstall is FULL when there are no other
+			 * working tasks, but also when the CPU is
+			 * actively reclaiming and nothing productive
+			 * could run even if it were runnable.
+			 *
+			 * When the timer tick sees a reclaiming CPU,
+			 * regardless of runnable tasks, sample a FULL
+			 * tick (or less if it hasn't been a full tick
+			 * since the last state change).
+			 */
+			sample = min(delta, (u32)jiffies_to_nsecs(1));
+			groupc->times[PSI_MEM_FULL] += sample;
+		}
+	}
+
+	if (groupc->state_mask & (1 << PSI_CPU_SOME))
+		groupc->times[PSI_CPU_SOME] += delta;
+
+	if (groupc->state_mask & (1 << PSI_NONIDLE))
+		groupc->times[PSI_NONIDLE] += delta;
+}
+
+static u32 psi_group_change(struct psi_group *group, int cpu,
+			    unsigned int clear, unsigned int set)
+{
+	struct psi_group_cpu *groupc;
+	unsigned int t, m;
+	enum psi_states s;
+	u32 state_mask = 0;
+
+	groupc = per_cpu_ptr(group->pcpu, cpu);
+
+	/*
+	 * First we assess the aggregate resource states this CPU's
+	 * tasks have been in since the last change, and account any
+	 * SOME and FULL time these may have resulted in.
+	 *
+	 * Then we update the task counts according to the state
+	 * change requested through the @clear and @set bits.
+	 */
+	write_seqcount_begin(&groupc->seq);
+
+	record_times(groupc, cpu, false);
+
+	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
+		if (!(m & (1 << t)))
+			continue;
+		if (groupc->tasks[t] == 0 && !psi_bug) {
+			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
+					cpu, t, groupc->tasks[0],
+					groupc->tasks[1], groupc->tasks[2],
+					clear, set);
+			psi_bug = 1;
+		}
+		groupc->tasks[t]--;
+	}
+
+	for (t = 0; set; set &= ~(1 << t), t++)
+		if (set & (1 << t))
+			groupc->tasks[t]++;
+
+	/* Calculate state mask representing active states */
+	for (s = 0; s < NR_PSI_STATES; s++) {
+		if (test_state(groupc->tasks, s))
+			state_mask |= (1 << s);
+	}
+	groupc->state_mask = state_mask;
+
+	write_seqcount_end(&groupc->seq);
+
+	return state_mask;
+}
+
+static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
+{
+#ifdef CONFIG_CGROUPS
+	struct cgroup *cgroup = NULL;
+
+	if (!*iter)
+		cgroup = task->cgroups->dfl_cgrp;
+	else if (*iter == &psi_system)
+		return NULL;
+	else
+		cgroup = cgroup_parent(*iter);
+
+	if (cgroup && cgroup_parent(cgroup)) {
+		*iter = cgroup;
+		return cgroup_psi(cgroup);
+	}
+#else
+	if (*iter)
+		return NULL;
+#endif
+	*iter = &psi_system;
+	return &psi_system;
+}
+
+void psi_task_change(struct task_struct *task, int clear, int set)
+{
+	int cpu = task_cpu(task);
+	struct psi_group *group;
+	bool wake_clock = true;
+	void *iter = NULL;
+
+	if (!task->pid)
+		return;
+
+	if (((task->psi_flags & set) ||
+	     (task->psi_flags & clear) != clear) &&
+	    !psi_bug) {
+		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
+				task->pid, task->comm, cpu,
+				task->psi_flags, clear, set);
+		psi_bug = 1;
+	}
+
+	task->psi_flags &= ~clear;
+	task->psi_flags |= set;
+
+	/*
+	 * Periodic aggregation shuts off if there is a period of no
+	 * task changes, so we wake it back up if necessary. However,
+	 * don't do this if the task change is the aggregation worker
+	 * itself going to sleep, or we'll ping-pong forever.
+	 */
+	if (unlikely((clear & TSK_RUNNING) &&
+		     (task->flags & PF_WQ_WORKER) &&
+		     wq_worker_last_func(task) == psi_avgs_work))
+		wake_clock = false;
+
+	while ((group = iterate_groups(task, &iter))) {
+		u32 state_mask = psi_group_change(group, cpu, clear, set);
+
+		if (state_mask & group->poll_states)
+			psi_schedule_poll_work(group, 1);
+
+		if (wake_clock && !delayed_work_pending(&group->avgs_work))
+			schedule_delayed_work(&group->avgs_work, PSI_FREQ);
+	}
+}
+
+void psi_memstall_tick(struct task_struct *task, int cpu)
+{
+	struct psi_group *group;
+	void *iter = NULL;
+
+	while ((group = iterate_groups(task, &iter))) {
+		struct psi_group_cpu *groupc;
+
+		groupc = per_cpu_ptr(group->pcpu, cpu);
+		write_seqcount_begin(&groupc->seq);
+		record_times(groupc, cpu, true);
+		write_seqcount_end(&groupc->seq);
+	}
+}
+
+/**
+ * psi_memstall_enter - mark the beginning of a memory stall section
+ * @flags: flags to handle nested sections
+ *
+ * Marks the calling task as being stalled due to a lack of memory,
+ * such as waiting for a refault or performing reclaim.
+ */
+void psi_memstall_enter(unsigned long *flags)
+{
+	struct rq_flags rf;
+	struct rq *rq;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	*flags = current->flags & PF_MEMSTALL;
+	if (*flags)
+		return;
+	/*
+	 * PF_MEMSTALL setting & accounting needs to be atomic wrt
+	 * changes to the task's scheduling state, otherwise we can
+	 * race with CPU migration.
+	 */
+	rq = this_rq_lock_irq(&rf);
+
+	current->flags |= PF_MEMSTALL;
+	psi_task_change(current, 0, TSK_MEMSTALL);
+
+	rq_unlock_irq(rq, &rf);
+}
+
+/**
+ * psi_memstall_leave - mark the end of an memory stall section
+ * @flags: flags to handle nested memdelay sections
+ *
+ * Marks the calling task as no longer stalled due to lack of memory.
+ */
+void psi_memstall_leave(unsigned long *flags)
+{
+	struct rq_flags rf;
+	struct rq *rq;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	if (*flags)
+		return;
+	/*
+	 * PF_MEMSTALL clearing & accounting needs to be atomic wrt
+	 * changes to the task's scheduling state, otherwise we could
+	 * race with CPU migration.
+	 */
+	rq = this_rq_lock_irq(&rf);
+
+	current->flags &= ~PF_MEMSTALL;
+	psi_task_change(current, TSK_MEMSTALL, 0);
+
+	rq_unlock_irq(rq, &rf);
+}
+
+#ifdef CONFIG_CGROUPS
+int psi_cgroup_alloc(struct cgroup *cgroup)
+{
+	if (static_branch_likely(&psi_disabled))
+		return 0;
+
+	cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
+	if (!cgroup->psi.pcpu)
+		return -ENOMEM;
+	group_init(&cgroup->psi);
+	return 0;
+}
+
+void psi_cgroup_free(struct cgroup *cgroup)
+{
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	cancel_delayed_work_sync(&cgroup->psi.avgs_work);
+	free_percpu(cgroup->psi.pcpu);
+	/* All triggers must be removed by now */
+	WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
+}
+
+/**
+ * cgroup_move_task - move task to a different cgroup
+ * @task: the task
+ * @to: the target css_set
+ *
+ * Move task to a new cgroup and safely migrate its associated stall
+ * state between the different groups.
+ *
+ * This function acquires the task's rq lock to lock out concurrent
+ * changes to the task's scheduling state and - in case the task is
+ * running - concurrent changes to its stall state.
+ */
+void cgroup_move_task(struct task_struct *task, struct css_set *to)
+{
+	unsigned int task_flags = 0;
+	struct rq_flags rf;
+	struct rq *rq;
+
+	if (static_branch_likely(&psi_disabled)) {
+		/*
+		 * Lame to do this here, but the scheduler cannot be locked
+		 * from the outside, so we move cgroups from inside sched/.
+		 */
+		rcu_assign_pointer(task->cgroups, to);
+		return;
+	}
+
+	rq = task_rq_lock(task, &rf);
+
+	if (task_on_rq_queued(task))
+		task_flags = TSK_RUNNING;
+	else if (task->in_iowait)
+		task_flags = TSK_IOWAIT;
+
+	if (task->flags & PF_MEMSTALL)
+		task_flags |= TSK_MEMSTALL;
+
+	if (task_flags)
+		psi_task_change(task, task_flags, 0);
+
+	/* See comment above */
+	rcu_assign_pointer(task->cgroups, to);
+
+	if (task_flags)
+		psi_task_change(task, 0, task_flags);
+
+	task_rq_unlock(rq, task, &rf);
+}
+#endif /* CONFIG_CGROUPS */
+
+int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
+{
+	int full;
+	u64 now;
+
+	if (static_branch_likely(&psi_disabled))
+		return -EOPNOTSUPP;
+
+	/* Update averages before reporting them */
+	mutex_lock(&group->avgs_lock);
+	now = sched_clock();
+	collect_percpu_times(group, PSI_AVGS, NULL);
+	if (now >= group->avg_next_update)
+		group->avg_next_update = update_averages(group, now);
+	mutex_unlock(&group->avgs_lock);
+
+	for (full = 0; full < 2 - (res == PSI_CPU); full++) {
+		unsigned long avg[3];
+		u64 total;
+		int w;
+
+		for (w = 0; w < 3; w++)
+			avg[w] = group->avg[res * 2 + full][w];
+		total = div_u64(group->total[PSI_AVGS][res * 2 + full],
+				NSEC_PER_USEC);
+
+		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
+			   full ? "full" : "some",
+			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
+			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
+			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
+			   total);
+	}
+
+	return 0;
+}
+
+static int psi_io_show(struct seq_file *m, void *v)
+{
+	return psi_show(m, &psi_system, PSI_IO);
+}
+
+static int psi_memory_show(struct seq_file *m, void *v)
+{
+	return psi_show(m, &psi_system, PSI_MEM);
+}
+
+static int psi_cpu_show(struct seq_file *m, void *v)
+{
+	return psi_show(m, &psi_system, PSI_CPU);
+}
+
+static int psi_io_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, psi_io_show, NULL);
+}
+
+static int psi_memory_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, psi_memory_show, NULL);
+}
+
+static int psi_cpu_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, psi_cpu_show, NULL);
+}
+
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+			char *buf, size_t nbytes, enum psi_res res)
+{
+	struct psi_trigger *t;
+	enum psi_states state;
+	u32 threshold_us;
+	u32 window_us;
+
+	if (static_branch_likely(&psi_disabled))
+		return ERR_PTR(-EOPNOTSUPP);
+
+	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
+		state = PSI_IO_SOME + res * 2;
+	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
+		state = PSI_IO_FULL + res * 2;
+	else
+		return ERR_PTR(-EINVAL);
+
+	if (state >= PSI_NONIDLE)
+		return ERR_PTR(-EINVAL);
+
+	if (window_us < WINDOW_MIN_US ||
+		window_us > WINDOW_MAX_US)
+		return ERR_PTR(-EINVAL);
+
+	/* Check threshold */
+	if (threshold_us == 0 || threshold_us > window_us)
+		return ERR_PTR(-EINVAL);
+
+	t = kmalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		return ERR_PTR(-ENOMEM);
+
+	t->group = group;
+	t->state = state;
+	t->threshold = threshold_us * NSEC_PER_USEC;
+	t->win.size = window_us * NSEC_PER_USEC;
+	window_reset(&t->win, 0, 0, 0);
+
+	t->event = 0;
+	t->last_event_time = 0;
+	init_waitqueue_head(&t->event_wait);
+	kref_init(&t->refcount);
+
+	mutex_lock(&group->trigger_lock);
+
+	if (!rcu_access_pointer(group->poll_kworker)) {
+		struct sched_param param = {
+			.sched_priority = MAX_RT_PRIO - 1,
+		};
+		struct kthread_worker *kworker;
+
+		kworker = kthread_create_worker(0, "psimon");
+		if (IS_ERR(kworker)) {
+			kfree(t);
+			mutex_unlock(&group->trigger_lock);
+			return ERR_CAST(kworker);
+		}
+		sched_setscheduler(kworker->task, SCHED_FIFO, &param);
+		kthread_init_delayed_work(&group->poll_work,
+				psi_poll_work);
+		rcu_assign_pointer(group->poll_kworker, kworker);
+	}
+
+	list_add(&t->node, &group->triggers);
+	group->poll_min_period = min(group->poll_min_period,
+		div_u64(t->win.size, UPDATES_PER_WINDOW));
+	group->nr_triggers[t->state]++;
+	group->poll_states |= (1 << t->state);
+
+	mutex_unlock(&group->trigger_lock);
+
+	return t;
+}
+
+static void psi_trigger_destroy(struct kref *ref)
+{
+	struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
+	struct psi_group *group = t->group;
+	struct kthread_worker *kworker_to_destroy = NULL;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	/*
+	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
+	 * from under a polling process.
+	 */
+	wake_up_interruptible(&t->event_wait);
+
+	mutex_lock(&group->trigger_lock);
+
+	if (!list_empty(&t->node)) {
+		struct psi_trigger *tmp;
+		u64 period = ULLONG_MAX;
+
+		list_del(&t->node);
+		group->nr_triggers[t->state]--;
+		if (!group->nr_triggers[t->state])
+			group->poll_states &= ~(1 << t->state);
+		/* reset min update period for the remaining triggers */
+		list_for_each_entry(tmp, &group->triggers, node)
+			period = min(period, div_u64(tmp->win.size,
+					UPDATES_PER_WINDOW));
+		group->poll_min_period = period;
+		/* Destroy poll_kworker when the last trigger is destroyed */
+		if (group->poll_states == 0) {
+			group->polling_until = 0;
+			kworker_to_destroy = rcu_dereference_protected(
+					group->poll_kworker,
+					lockdep_is_held(&group->trigger_lock));
+			rcu_assign_pointer(group->poll_kworker, NULL);
+		}
+	}
+
+	mutex_unlock(&group->trigger_lock);
+
+	/*
+	 * Wait for both *trigger_ptr from psi_trigger_replace and
+	 * poll_kworker RCUs to complete their read-side critical sections
+	 * before destroying the trigger and optionally the poll_kworker
+	 */
+	synchronize_rcu();
+	/*
+	 * Destroy the kworker after releasing trigger_lock to prevent a
+	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
+	 */
+	if (kworker_to_destroy) {
+		kthread_cancel_delayed_work_sync(&group->poll_work);
+		kthread_destroy_worker(kworker_to_destroy);
+	}
+	kfree(t);
+}
+
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
+{
+	struct psi_trigger *old = *trigger_ptr;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	rcu_assign_pointer(*trigger_ptr, new);
+	if (old)
+		kref_put(&old->refcount, psi_trigger_destroy);
+}
+
+__poll_t psi_trigger_poll(void **trigger_ptr,
+				struct file *file, poll_table *wait)
+{
+	__poll_t ret = DEFAULT_POLLMASK;
+	struct psi_trigger *t;
+
+	if (static_branch_likely(&psi_disabled))
+		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
+
+	rcu_read_lock();
+
+	t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
+	if (!t) {
+		rcu_read_unlock();
+		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
+	}
+	kref_get(&t->refcount);
+
+	rcu_read_unlock();
+
+	poll_wait(file, &t->event_wait, wait);
+
+	if (cmpxchg(&t->event, 1, 0) == 1)
+		ret |= EPOLLPRI;
+
+	kref_put(&t->refcount, psi_trigger_destroy);
+
+	return ret;
+}
+
+static ssize_t psi_write(struct file *file, const char __user *user_buf,
+			 size_t nbytes, enum psi_res res)
+{
+	char buf[32];
+	size_t buf_size;
+	struct seq_file *seq;
+	struct psi_trigger *new;
+
+	if (static_branch_likely(&psi_disabled))
+		return -EOPNOTSUPP;
+
+	buf_size = min(nbytes, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size - 1] = '\0';
+
+	new = psi_trigger_create(&psi_system, buf, nbytes, res);
+	if (IS_ERR(new))
+		return PTR_ERR(new);
+
+	seq = file->private_data;
+	/* Take seq->lock to protect seq->private from concurrent writes */
+	mutex_lock(&seq->lock);
+	psi_trigger_replace(&seq->private, new);
+	mutex_unlock(&seq->lock);
+
+	return nbytes;
+}
+
+static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
+			    size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_IO);
+}
+
+static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
+				size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_MEM);
+}
+
+static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
+			     size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_CPU);
+}
+
+static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
+{
+	struct seq_file *seq = file->private_data;
+
+	return psi_trigger_poll(&seq->private, file, wait);
+}
+
+static int psi_fop_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+
+	psi_trigger_replace(&seq->private, NULL);
+	return single_release(inode, file);
+}
+
+static const struct file_operations psi_io_fops = {
+	.open           = psi_io_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.write          = psi_io_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
+};
+
+static const struct file_operations psi_memory_fops = {
+	.open           = psi_memory_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.write          = psi_memory_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
+};
+
+static const struct file_operations psi_cpu_fops = {
+	.open           = psi_cpu_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.write          = psi_cpu_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
+};
+
+static int __init psi_proc_init(void)
+{
+	proc_mkdir("pressure", NULL);
+	proc_create("pressure/io", 0, NULL, &psi_io_fops);
+	proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
+	proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
+	return 0;
+}
+module_init(psi_proc_init);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b0ed07e..1516804 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -56,6 +56,7 @@
 #include <linux/proc_fs.h>
 #include <linux/prefetch.h>
 #include <linux/profile.h>
+#include <linux/psi.h>
 #include <linux/rcupdate_wait.h>
 #include <linux/security.h>
 #include <linux/stackprotector.h>
@@ -141,17 +142,9 @@
 	unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
 	unsigned int max_possible_freq;
 	bool freq_init_done;
-	int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
-	unsigned int static_cluster_pwr_cost;
-	int notifier_sent;
-	bool wake_up_idle;
 	u64 aggr_grp_load;
 	u64 coloc_boost_load;
 };
-
-extern unsigned int sched_disable_window_stats;
-
-extern struct timer_list sched_grp_timer;
 #endif /* CONFIG_SCHED_WALT */
 
 /* task_struct::on_rq states: */
@@ -393,6 +386,7 @@
 #ifdef CONFIG_CGROUP_SCHED
 
 #include <linux/cgroup.h>
+#include <linux/psi.h>
 
 struct cfs_rq;
 struct rt_rq;
@@ -1102,6 +1096,8 @@
 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 #define raw_rq()		raw_cpu_ptr(&runqueues)
 
+extern void update_rq_clock(struct rq *rq);
+
 static inline u64 __rq_clock_broken(struct rq *rq)
 {
 	return READ_ONCE(rq->clock);
@@ -1220,6 +1216,98 @@
 #endif
 }
 
+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+	__acquires(rq->lock);
+
+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+	__acquires(p->pi_lock)
+	__acquires(rq->lock);
+
+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
+	__releases(rq->lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock(&rq->lock);
+}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+	__releases(rq->lock)
+	__releases(p->pi_lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock(&rq->lock);
+	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
+}
+
+static inline void
+rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	raw_spin_lock_irqsave(&rq->lock, rf->flags);
+	rq_pin_lock(rq, rf);
+}
+
+static inline void
+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	raw_spin_lock_irq(&rq->lock);
+	rq_pin_lock(rq, rf);
+}
+
+static inline void
+rq_lock(struct rq *rq, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	raw_spin_lock(&rq->lock);
+	rq_pin_lock(rq, rf);
+}
+
+static inline void
+rq_relock(struct rq *rq, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	raw_spin_lock(&rq->lock);
+	rq_repin_lock(rq, rf);
+}
+
+static inline void
+rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
+	__releases(rq->lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
+}
+
+static inline void
+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
+	__releases(rq->lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock_irq(&rq->lock);
+}
+
+static inline void
+rq_unlock(struct rq *rq, struct rq_flags *rf)
+	__releases(rq->lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock(&rq->lock);
+}
+
+static inline struct rq *
+this_rq_lock_irq(struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	local_irq_disable();
+	rq = this_rq();
+	rq_lock(rq, rf);
+	return rq;
+}
+
 #ifdef CONFIG_NUMA
 enum numa_topology_type {
 	NUMA_DIRECT,
@@ -1901,8 +1989,6 @@
 	sched_update_tick_dependency(rq);
 }
 
-extern void update_rq_clock(struct rq *rq);
-
 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
 
@@ -1975,14 +2061,10 @@
 	return cpu_rq(cpu)->cpu_capacity_orig;
 }
 
-extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int walt_disabled;
-
 static inline unsigned long task_util(struct task_struct *p)
 {
 #ifdef CONFIG_SCHED_WALT
-	if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
-		return p->ravg.demand_scaled;
+	return p->ravg.demand_scaled;
 #endif
 	return READ_ONCE(p->se.avg.util_avg);
 }
@@ -2031,13 +2113,10 @@
 	unsigned int util;
 
 #ifdef CONFIG_SCHED_WALT
-	if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util)) {
-		u64 walt_cpu_util =
-			cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
+	u64 walt_cpu_util =
+		cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
 
-		return min_t(unsigned long, walt_cpu_util,
-						capacity_orig_of(cpu));
-	}
+	return min_t(unsigned long, walt_cpu_util, capacity_orig_of(cpu));
 #endif
 
 	cfs_rq = &cpu_rq(cpu)->cfs;
@@ -2062,8 +2141,7 @@
 	unsigned long capacity = capacity_orig_of(cpu);
 
 #ifdef CONFIG_SCHED_WALT
-	if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
-		util = cpu_rq(cpu)->cum_window_demand_scaled;
+	util = cpu_rq(cpu)->cum_window_demand_scaled;
 #endif
 	delta += util;
 	if (delta < 0)
@@ -2090,9 +2168,6 @@
 	unsigned long capacity = capacity_orig_of(cpu);
 	int boost;
 
-	if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
-		return cpu_util(cpu);
-
 	boost = per_cpu(sched_load_boost, cpu);
 	util_unboosted = util = freq_policy_load(rq);
 	util = div64_u64(util * (100 + boost),
@@ -2137,7 +2212,6 @@
 }
 
 #define sched_ravg_window TICK_NSEC
-#define sysctl_sched_use_walt_cpu_util 0
 
 #endif /* CONFIG_SCHED_WALT */
 
@@ -2155,86 +2229,6 @@
 
 #endif
 
-struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-	__acquires(rq->lock);
-
-struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-	__acquires(p->pi_lock)
-	__acquires(rq->lock);
-
-static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
-	__releases(rq->lock)
-{
-	rq_unpin_lock(rq, rf);
-	raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
-	__releases(rq->lock)
-	__releases(p->pi_lock)
-{
-	rq_unpin_lock(rq, rf);
-	raw_spin_unlock(&rq->lock);
-	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-}
-
-static inline void
-rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
-	__acquires(rq->lock)
-{
-	raw_spin_lock_irqsave(&rq->lock, rf->flags);
-	rq_pin_lock(rq, rf);
-}
-
-static inline void
-rq_lock_irq(struct rq *rq, struct rq_flags *rf)
-	__acquires(rq->lock)
-{
-	raw_spin_lock_irq(&rq->lock);
-	rq_pin_lock(rq, rf);
-}
-
-static inline void
-rq_lock(struct rq *rq, struct rq_flags *rf)
-	__acquires(rq->lock)
-{
-	raw_spin_lock(&rq->lock);
-	rq_pin_lock(rq, rf);
-}
-
-static inline void
-rq_relock(struct rq *rq, struct rq_flags *rf)
-	__acquires(rq->lock)
-{
-	raw_spin_lock(&rq->lock);
-	rq_repin_lock(rq, rf);
-}
-
-static inline void
-rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
-	__releases(rq->lock)
-{
-	rq_unpin_lock(rq, rf);
-	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
-}
-
-static inline void
-rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
-	__releases(rq->lock)
-{
-	rq_unpin_lock(rq, rf);
-	raw_spin_unlock_irq(&rq->lock);
-}
-
-static inline void
-rq_unlock(struct rq *rq, struct rq_flags *rf)
-	__releases(rq->lock)
-{
-	rq_unpin_lock(rq, rf);
-	raw_spin_unlock(&rq->lock);
-}
-
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PREEMPT
 
@@ -2665,7 +2659,7 @@
 }
 #endif
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#ifdef CONFIG_ENERGY_MODEL
 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
 #else
 #define perf_domain_span(pd) NULL
@@ -2704,26 +2698,12 @@
 	u64 last_update;
 };
 
-extern struct list_head cluster_head;
 extern struct sched_cluster *sched_cluster[NR_CPUS];
 
-#define for_each_sched_cluster(cluster) \
-	list_for_each_entry_rcu(cluster, &cluster_head, list)
-
-#define WINDOW_STATS_RECENT		0
-#define WINDOW_STATS_MAX		1
-#define WINDOW_STATS_MAX_RECENT_AVG	2
-#define WINDOW_STATS_AVG		3
-#define WINDOW_STATS_INVALID_POLICY	4
-
-#define SCHED_UPMIGRATE_MIN_NICE 15
-#define EXITING_TASK_MARKER	0xdeaddead
-
 #define UP_MIGRATION		1
 #define DOWN_MIGRATION		2
 #define IRQLOAD_MIGRATION	3
 
-extern struct mutex policy_mutex;
 extern unsigned int sched_disable_window_stats;
 extern unsigned int max_possible_freq;
 extern unsigned int min_max_freq;
@@ -2731,23 +2711,10 @@
 extern unsigned int min_possible_efficiency;
 extern unsigned int max_capacity;
 extern unsigned int min_capacity;
-extern unsigned int max_load_scale_factor;
 extern unsigned int max_possible_capacity;
 extern unsigned int min_max_possible_capacity;
 extern unsigned int max_power_cost;
 extern unsigned int __read_mostly sched_init_task_load_windows;
-extern unsigned int up_down_migrate_scale_factor;
-extern unsigned int sysctl_sched_restrict_cluster_spill;
-extern unsigned int sched_pred_alert_load;
-extern struct sched_cluster init_cluster;
-extern unsigned int  __read_mostly sched_short_sleep_task_threshold;
-extern unsigned int  __read_mostly sched_long_cpu_selection_threshold;
-extern unsigned int  __read_mostly sched_big_waker_task_load;
-extern unsigned int  __read_mostly sched_small_wakee_task_load;
-extern unsigned int  __read_mostly sched_spill_load;
-extern unsigned int  __read_mostly sched_upmigrate;
-extern unsigned int  __read_mostly sched_downmigrate;
-extern unsigned int  __read_mostly sysctl_sched_spill_nr_run;
 extern unsigned int  __read_mostly sched_load_granule;
 
 extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
@@ -2780,16 +2747,6 @@
 	return cpu_rq(cpu)->cluster->load_scale_factor;
 }
 
-static inline int cpu_efficiency(int cpu)
-{
-	return cpu_rq(cpu)->cluster->efficiency;
-}
-
-static inline unsigned int cpu_min_freq(int cpu)
-{
-	return cpu_rq(cpu)->cluster->min_freq;
-}
-
 static inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
 {
 	/*
@@ -2869,16 +2826,6 @@
 	return load_scale;
 }
 
-static inline int cpu_max_power_cost(int cpu)
-{
-	return cpu_rq(cpu)->cluster->max_power_cost;
-}
-
-static inline int cpu_min_power_cost(int cpu)
-{
-	return cpu_rq(cpu)->cluster->min_power_cost;
-}
-
 static inline bool hmp_capable(void)
 {
 	return max_possible_capacity != min_max_possible_capacity;
@@ -3232,11 +3179,6 @@
 static inline void clear_reserved(int cpu) { }
 static inline int alloc_related_thread_groups(void) { return 0; }
 
-#define trace_sched_cpu_load(...)
-#define trace_sched_cpu_load_lb(...)
-#define trace_sched_cpu_load_cgroup(...)
-#define trace_sched_cpu_load_wakeup(...)
-
 static inline void walt_fixup_cum_window_demand(struct rq *rq,
 						s64 scaled_delta) { }
 
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 80e2e38..41e6e2d 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012, 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 /*
@@ -173,11 +173,8 @@
 	capacity = capacity_orig_of(cpu);
 
 #ifdef CONFIG_SCHED_WALT
-	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
-		util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
-		util = div64_u64(util,
-				 sched_ravg_window >> SCHED_CAPACITY_SHIFT);
-	}
+	util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
+	util = div64_u64(util, sched_ravg_window >> SCHED_CAPACITY_SHIFT);
 #endif
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 8aea199..aa0de24 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -55,6 +55,92 @@
 # define   schedstat_val_or_zero(var)	0
 #endif /* CONFIG_SCHEDSTATS */
 
+#ifdef CONFIG_PSI
+/*
+ * PSI tracks state that persists across sleeps, such as iowaits and
+ * memory stalls. As a result, it has to distinguish between sleeps,
+ * where a task's runnable state changes, and requeues, where a task
+ * and its state are being moved between CPUs and runqueues.
+ */
+static inline void psi_enqueue(struct task_struct *p, bool wakeup)
+{
+	int clear = 0, set = TSK_RUNNING;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	if (!wakeup || p->sched_psi_wake_requeue) {
+		if (p->flags & PF_MEMSTALL)
+			set |= TSK_MEMSTALL;
+		if (p->sched_psi_wake_requeue)
+			p->sched_psi_wake_requeue = 0;
+	} else {
+		if (p->in_iowait)
+			clear |= TSK_IOWAIT;
+	}
+
+	psi_task_change(p, clear, set);
+}
+
+static inline void psi_dequeue(struct task_struct *p, bool sleep)
+{
+	int clear = TSK_RUNNING, set = 0;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	if (!sleep) {
+		if (p->flags & PF_MEMSTALL)
+			clear |= TSK_MEMSTALL;
+	} else {
+		if (p->in_iowait)
+			set |= TSK_IOWAIT;
+	}
+
+	psi_task_change(p, clear, set);
+}
+
+static inline void psi_ttwu_dequeue(struct task_struct *p)
+{
+	if (static_branch_likely(&psi_disabled))
+		return;
+	/*
+	 * Is the task being migrated during a wakeup? Make sure to
+	 * deregister its sleep-persistent psi states from the old
+	 * queue, and let psi_enqueue() know it has to requeue.
+	 */
+	if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) {
+		struct rq_flags rf;
+		struct rq *rq;
+		int clear = 0;
+
+		if (p->in_iowait)
+			clear |= TSK_IOWAIT;
+		if (p->flags & PF_MEMSTALL)
+			clear |= TSK_MEMSTALL;
+
+		rq = __task_rq_lock(p, &rf);
+		psi_task_change(p, clear, 0);
+		p->sched_psi_wake_requeue = 1;
+		__task_rq_unlock(rq, &rf);
+	}
+}
+
+static inline void psi_task_tick(struct rq *rq)
+{
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	if (unlikely(rq->curr->flags & PF_MEMSTALL))
+		psi_memstall_tick(rq->curr, cpu_of(rq));
+}
+#else /* CONFIG_PSI */
+static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
+static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
+static inline void psi_ttwu_dequeue(struct task_struct *p) {}
+static inline void psi_task_tick(struct rq *rq) {}
+#endif /* CONFIG_PSI */
+
 #ifdef CONFIG_SCHED_INFO
 static inline void sched_info_reset_dequeued(struct task_struct *t)
 {
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 74a2e9c..9c392dd 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -202,35 +202,7 @@
 }
 
 DEFINE_STATIC_KEY_FALSE(sched_energy_present);
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-unsigned int sysctl_sched_energy_aware = 1;
-DEFINE_MUTEX(sched_energy_mutex);
-bool sched_energy_update;
-
-#ifdef CONFIG_PROC_SYSCTL
-int sched_energy_aware_handler(struct ctl_table *table, int write,
-			 void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	int ret, state;
-
-	if (write && !capable(CAP_SYS_ADMIN))
-		return -EPERM;
-
-	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-	if (!ret && write) {
-		state = static_branch_unlikely(&sched_energy_present);
-		if (state != sysctl_sched_energy_aware) {
-			mutex_lock(&sched_energy_mutex);
-			sched_energy_update = 1;
-			rebuild_sched_domains();
-			sched_energy_update = 0;
-			mutex_unlock(&sched_energy_mutex);
-		}
-	}
-
-	return ret;
-}
-#endif
+#ifdef CONFIG_ENERGY_MODEL
 
 static void free_pd(struct perf_domain *pd)
 {
@@ -279,7 +251,7 @@
 	if (!sched_debug() || !pd)
 		return;
 
-	printk(KERN_DEBUG "root_domain %*pbl: ", cpumask_pr_args(cpu_map));
+	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
 
 	while (pd) {
 		printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }",
@@ -300,29 +272,13 @@
 	free_pd(pd);
 }
 
-static void sched_energy_start(int ndoms_new, cpumask_var_t doms_new[])
+static void sched_energy_set(bool has_eas)
 {
-	/*
-	 * The conditions for EAS to start are checked during the creation of
-	 * root domains. If one of them meets all conditions, it will have a
-	 * non-null list of performance domains.
-	 */
-	while (ndoms_new) {
-		if (cpu_rq(cpumask_first(doms_new[ndoms_new - 1]))->rd->pd)
-			goto enable;
-		ndoms_new--;
-	}
-
-	if (static_branch_unlikely(&sched_energy_present)) {
+	if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
 		if (sched_debug())
 			pr_info("%s: stopping EAS\n", __func__);
 		static_branch_disable_cpuslocked(&sched_energy_present);
-	}
-
-	return;
-
-enable:
-	if (!static_branch_unlikely(&sched_energy_present)) {
+	} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
 		if (sched_debug())
 			pr_info("%s: starting EAS\n", __func__);
 		static_branch_enable_cpuslocked(&sched_energy_present);
@@ -334,7 +290,6 @@
  *    1. an Energy Model (EM) is available;
  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
  *    3. the EM complexity is low enough to keep scheduling overheads low;
- *    4. schedutil is driving the frequency of all CPUs of the rd;
  *
  * The complexity of the Energy Model is defined as:
  *
@@ -354,18 +309,12 @@
  */
 #define EM_MAX_COMPLEXITY 2048
 
-extern struct cpufreq_governor schedutil_gov;
-static void build_perf_domains(const struct cpumask *cpu_map)
+static bool build_perf_domains(const struct cpumask *cpu_map)
 {
 	int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
 	struct perf_domain *pd = NULL, *tmp;
 	int cpu = cpumask_first(cpu_map);
 	struct root_domain *rd = cpu_rq(cpu)->rd;
-	struct cpufreq_policy *policy;
-	struct cpufreq_governor *gov;
-
-	if (!sysctl_sched_energy_aware)
-		goto free;
 
 	/* EAS is enabled for asymmetric CPU capacity topologies. */
 	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
@@ -381,19 +330,6 @@
 		if (find_pd(pd, i))
 			continue;
 
-		/* Do not attempt EAS if schedutil is not being used. */
-		policy = cpufreq_cpu_get(i);
-		if (!policy)
-			goto free;
-		gov = policy->governor;
-		cpufreq_cpu_put(policy);
-		if (gov != &schedutil_gov) {
-			if (rd->pd)
-				pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
-						cpumask_pr_args(cpu_map));
-			goto free;
-		}
-
 		/* Create the new pd and add it to the local list. */
 		tmp = pd_init(i);
 		if (!tmp)
@@ -424,7 +360,7 @@
 	if (tmp)
 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
 
-	return;
+	return !!pd;
 
 free:
 	free_pd(pd);
@@ -432,10 +368,12 @@
 	rcu_assign_pointer(rd->pd, NULL);
 	if (tmp)
 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
+
+	return false;
 }
 #else
 static void free_pd(struct perf_domain *pd) { }
-#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
+#endif /* CONFIG_ENERGY_MODEL */
 
 static void free_rootdomain(struct rcu_head *rcu)
 {
@@ -2198,6 +2136,7 @@
 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 			     struct sched_domain_attr *dattr_new)
 {
+	bool __maybe_unused has_eas = false;
 	int i, j, n;
 	int new_topology;
 
@@ -2256,20 +2195,22 @@
 		;
 	}
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#ifdef CONFIG_ENERGY_MODEL
 	/* Build perf. domains: */
 	for (i = 0; i < ndoms_new; i++) {
-		for (j = 0; j < n && !sched_energy_update; j++) {
+		for (j = 0; j < n; j++) {
 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
-			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd)
+			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
+				has_eas = true;
 				goto match3;
+			}
 		}
 		/* No match - add perf. domains for a new rd */
-		build_perf_domains(doms_new[i]);
+		has_eas |= build_perf_domains(doms_new[i]);
 match3:
 		;
 	}
-	sched_energy_start(ndoms_new, doms_new);
+	sched_energy_set(has_eas);
 #endif
 
 	/* Remember the new sched domains: */
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index d5bdf1b..b731208 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -242,9 +242,15 @@
 
 bool task_sched_boost(struct task_struct *p)
 {
-	struct schedtune *st = task_schedtune(p);
+	struct schedtune *st;
+	bool sched_boost_enabled;
 
-	return st->sched_boost_enabled;
+	rcu_read_lock();
+	st = task_schedtune(p);
+	sched_boost_enabled = st->sched_boost_enabled;
+	rcu_read_unlock();
+
+	return sched_boost_enabled;
 }
 
 static u64
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 7554136..f015033 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -73,10 +73,16 @@
 				     unsigned long *flags)
 {
 	int cpu;
+	int level = 0;
 
 	local_irq_save(*flags);
-	for_each_cpu(cpu, cpus)
-		raw_spin_lock(&cpu_rq(cpu)->lock);
+	for_each_cpu(cpu, cpus) {
+		if (level == 0)
+			raw_spin_lock(&cpu_rq(cpu)->lock);
+		else
+			raw_spin_lock_nested(&cpu_rq(cpu)->lock, level);
+		level++;
+	}
 }
 
 static void release_rq_locks_irqrestore(const cpumask_t *cpus,
@@ -103,9 +109,6 @@
 /* Max window size (in ns) = 1s */
 #define MAX_SCHED_RAVG_WINDOW 1000000000
 
-/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
-unsigned int __read_mostly walt_disabled = 0;
-
 __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
 
 unsigned int sysctl_sched_walt_rotate_big_tasks;
@@ -2131,9 +2134,6 @@
 	cluster->max_mitigated_freq	=	UINT_MAX;
 	cluster->min_freq		=	1;
 	cluster->max_possible_freq	=	1;
-	cluster->dstate			=	0;
-	cluster->dstate_wakeup_energy	=	0;
-	cluster->dstate_wakeup_latency	=	0;
 	cluster->freq_init_done		=	false;
 
 	raw_spin_lock_init(&cluster->load_lock);
@@ -2145,7 +2145,6 @@
 	if (cluster->efficiency < min_possible_efficiency)
 		min_possible_efficiency = cluster->efficiency;
 
-	cluster->notifier_sent = 0;
 	return cluster;
 }
 
@@ -2317,12 +2316,7 @@
 	.max_mitigated_freq	=	UINT_MAX,
 	.min_freq		=	1,
 	.max_possible_freq	=	1,
-	.dstate			=	0,
-	.dstate_wakeup_energy	=	0,
-	.dstate_wakeup_latency	=	0,
 	.exec_scale_factor	=	1024,
-	.notifier_sent		=	0,
-	.wake_up_idle		=	0,
 	.aggr_grp_load		=	0,
 	.coloc_boost_load	=	0,
 };
@@ -2638,8 +2632,6 @@
 	return 0;
 }
 
-DEFINE_MUTEX(policy_mutex);
-
 #define pct_to_real(tunable)	\
 		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
 
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 44d1277..fe971da 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __WALT_H
@@ -31,8 +31,6 @@
 extern unsigned int max_possible_efficiency;
 extern unsigned int min_possible_efficiency;
 extern unsigned int max_possible_freq;
-extern unsigned int sched_major_task_runtime;
-extern unsigned int __read_mostly sched_init_task_load_windows;
 extern unsigned int __read_mostly sched_load_granule;
 
 extern struct mutex cluster_lock;
diff --git a/kernel/signal.c b/kernel/signal.c
index 03f4fa9..e3bb6c1 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -683,6 +683,48 @@
 	return signr;
 }
 
+static int dequeue_synchronous_signal(siginfo_t *info)
+{
+	struct task_struct *tsk = current;
+	struct sigpending *pending = &tsk->pending;
+	struct sigqueue *q, *sync = NULL;
+
+	/*
+	 * Might a synchronous signal be in the queue?
+	 */
+	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
+		return 0;
+
+	/*
+	 * Return the first synchronous signal in the queue.
+	 */
+	list_for_each_entry(q, &pending->list, list) {
+		/* Synchronous signals have a postive si_code */
+		if ((q->info.si_code > SI_USER) &&
+		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
+			sync = q;
+			goto next;
+		}
+	}
+	return 0;
+next:
+	/*
+	 * Check if there is another siginfo for the same signal.
+	 */
+	list_for_each_entry_continue(q, &pending->list, list) {
+		if (q->info.si_signo == sync->info.si_signo)
+			goto still_pending;
+	}
+
+	sigdelset(&pending->signal, sync->info.si_signo);
+	recalc_sigpending();
+still_pending:
+	list_del_init(&sync->list);
+	copy_siginfo(info, &sync->info);
+	__sigqueue_free(sync);
+	return info->si_signo;
+}
+
 /*
  * Tell a process that it has a new active signal..
  *
@@ -2395,6 +2437,14 @@
 		goto relock;
 	}
 
+	/* Has this task already been marked for death? */
+	if (signal_group_exit(signal)) {
+		ksig->info.si_signo = signr = SIGKILL;
+		sigdelset(&current->pending.signal, SIGKILL);
+		recalc_sigpending();
+		goto fatal;
+	}
+
 	for (;;) {
 		struct k_sigaction *ka;
 
@@ -2408,7 +2458,15 @@
 			goto relock;
 		}
 
-		signr = dequeue_signal(current, &current->blocked, &ksig->info);
+		/*
+		 * Signals generated by the execution of an instruction
+		 * need to be delivered before any other pending signals
+		 * so that the instruction pointer in the signal stack
+		 * frame points to the faulting instruction.
+		 */
+		signr = dequeue_synchronous_signal(&ksig->info);
+		if (!signr)
+			signr = dequeue_signal(current, &current->blocked, &ksig->info);
 
 		if (!signr)
 			break; /* will return 0 */
@@ -2490,6 +2548,7 @@
 			continue;
 		}
 
+	fatal:
 		spin_unlock_irq(&sighand->siglock);
 
 		/*
diff --git a/kernel/smp.c b/kernel/smp.c
index bfde5a3..6e5cb7e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/sched/idle.h>
 #include <linux/hypervisor.h>
+#include <linux/suspend.h>
 
 #include "smpboot.h"
 
@@ -615,8 +616,6 @@
 		num_nodes, (num_nodes > 1 ? "s" : ""),
 		num_cpus,  (num_cpus  > 1 ? "s" : ""));
 
-	/* Final decision about SMT support */
-	cpu_smt_check_topology();
 	/* Any cleanup work */
 	smp_cpus_done(setup_max_cpus);
 }
@@ -771,8 +770,9 @@
 	for_each_online_cpu(cpu) {
 		if (cpu == smp_processor_id())
 			continue;
-
-		wake_up_if_idle(cpu);
+		if (s2idle_state == S2IDLE_STATE_ENTER ||
+		    !cpu_isolated(cpu))
+			wake_up_if_idle(cpu);
 	}
 	preempt_enable();
 }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index eaa6c18..2b331b5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -599,17 +599,6 @@
 		.extra1		= &one,
 	},
 #endif
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-	{
-		.procname	= "sched_energy_aware",
-		.data		= &sysctl_sched_energy_aware,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_energy_aware_handler,
-		.extra1		= &zero,
-		.extra2		= &one,
-	},
-#endif
 	{
 		.procname	= "sched_lib_name",
 		.data		= sched_lib_name,
@@ -2993,6 +2982,8 @@
 			bool neg;
 
 			left -= proc_skip_spaces(&p);
+			if (!left)
+				break;
 
 			err = proc_get_long(&p, &left, &val, &neg,
 					     proc_wspace_sep,
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 901aee2..4d506f6 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -499,6 +499,12 @@
 	struct hrtimer_clock_base *base;
 	ktime_t expires;
 
+	/*
+	 * Skip initializing cpu_base->next_timer to NULL as we skip updating
+	 * next_timer in below loop if the timer is being exluded.
+	 */
+	if (!exclude)
+		cpu_base->next_timer = NULL;
 	for_each_active_base(base, cpu_base, active) {
 		struct timerqueue_node *next;
 		struct hrtimer *timer;
@@ -1893,12 +1899,6 @@
 	unsigned long flags;
 	int i;
 
-	/*
-	 * this BH disable ensures that raise_softirq_irqoff() does
-	 * not wakeup ksoftirqd (and acquire the pi-lock) while
-	 * holding the cpu_base lock
-	 */
-	local_bh_disable();
 	local_irq_save(flags);
 	old_base = &per_cpu(hrtimer_bases, scpu);
 	new_base = this_cpu_ptr(&hrtimer_bases);
@@ -1926,7 +1926,6 @@
 	/* Check, if we got expired work to do */
 	__hrtimer_peek_ahead_timers();
 	local_irq_restore(flags);
-	local_bh_enable();
 }
 
 int hrtimers_dead_cpu(unsigned int scpu)
@@ -1934,7 +1933,14 @@
 	BUG_ON(cpu_online(scpu));
 	tick_cancel_sched_timer(scpu);
 
+	/*
+	 * this BH disable ensures that raise_softirq_irqoff() does
+	 * not wakeup ksoftirqd (and acquire the pi-lock) while
+	 * holding the cpu_base lock
+	 */
+	local_bh_disable();
 	__migrate_hrtimers(scpu, true);
+	local_bh_enable();
 	return 0;
 }
 
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index ce32cf7..76801b9 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -685,6 +685,7 @@
 	 * set up the signal and overrun bookkeeping.
 	 */
 	timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
+	timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
 
 	/*
 	 * This acts as a modification timestamp for the timer,
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 4b9127e..5a01c4f 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -289,9 +289,6 @@
 {
 	struct hrtimer *timer = &timr->it.real.timer;
 
-	if (!timr->it_interval)
-		return;
-
 	timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
 					    timr->it_interval);
 	hrtimer_restart(timer);
@@ -317,7 +314,7 @@
 	if (!timr)
 		return;
 
-	if (timr->it_requeue_pending == info->si_sys_private) {
+	if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
 		timr->kclock->timer_rearm(timr);
 
 		timr->it_active = 1;
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 5446510..145e0c7 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -278,7 +278,7 @@
 	return cd.read_data[seq & 1].epoch_cyc;
 }
 
-static int sched_clock_suspend(void)
+int sched_clock_suspend(void)
 {
 	struct clock_read_data *rd = &cd.read_data[0];
 
@@ -294,7 +294,7 @@
 	return 0;
 }
 
-static void sched_clock_resume(void)
+void sched_clock_resume(void)
 {
 	struct clock_read_data *rd = &cd.read_data[0];
 
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 14de372..de7ebe5 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -18,6 +18,7 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
+#include <linux/sched_clock.h>
 #include <linux/module.h>
 #include <trace/events/power.h>
 
@@ -491,6 +492,7 @@
 		trace_suspend_resume(TPS("timekeeping_freeze"),
 				     smp_processor_id(), true);
 		system_state = SYSTEM_SUSPEND;
+		sched_clock_suspend();
 		timekeeping_suspend();
 	} else {
 		tick_suspend_local();
@@ -515,6 +517,7 @@
 	if (tick_freeze_depth == num_online_cpus()) {
 		timekeeping_resume();
 		system_state = SYSTEM_RUNNING;
+		sched_clock_resume();
 		trace_suspend_resume(TPS("timekeeping_freeze"),
 				     smp_processor_id(), false);
 	} else {
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6a492f7..778ce93 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -29,6 +29,7 @@
 #include <linux/timer.h>
 #include <linux/context_tracking.h>
 #include <linux/mm.h>
+#include <linux/rq_stats.h>
 
 #include <asm/irq_regs.h>
 
@@ -36,6 +37,10 @@
 
 #include <trace/events/timer.h>
 
+struct rq_data rq_info;
+struct workqueue_struct *rq_wq;
+spinlock_t rq_lock;
+
 /*
  * Per-CPU nohz control structure
  */
@@ -1259,6 +1264,17 @@
  * High resolution timer specific code
  */
 #ifdef CONFIG_HIGH_RES_TIMERS
+static void wakeup_user(void)
+{
+	unsigned long jiffy_gap;
+
+	jiffy_gap = jiffies - rq_info.def_timer_last_jiffy;
+	if (jiffy_gap >= rq_info.def_timer_jiffies) {
+		rq_info.def_timer_last_jiffy = jiffies;
+		queue_work(rq_wq, &rq_info.def_timer_work);
+	}
+}
+
 /*
  * We rearm the timer until we get disabled by the idle code.
  * Called with interrupts disabled.
@@ -1276,8 +1292,16 @@
 	 * Do not call, when we are not in irq context and have
 	 * no valid regs pointer
 	 */
-	if (regs)
+	if (regs) {
 		tick_sched_handle(ts, regs);
+		if (rq_info.init == 1 &&
+				tick_do_timer_cpu == smp_processor_id()) {
+			/*
+			 * wakeup user if needed
+			 */
+			wakeup_user();
+		}
+	}
 	else
 		ts->next_tick = 0;
 
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f3b22f4..7846ce2 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -50,7 +50,9 @@
 static struct {
 	seqcount_t		seq;
 	struct timekeeper	timekeeper;
-} tk_core ____cacheline_aligned;
+} tk_core ____cacheline_aligned = {
+	.seq = SEQCNT_ZERO(tk_core.seq),
+};
 
 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
 static struct timekeeper shadow_timekeeper;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9864a35..6c28d51 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1158,22 +1158,12 @@
 
 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
 {
-	int err;
-
-	mutex_lock(&bpf_event_mutex);
-	err = __bpf_probe_register(btp, prog);
-	mutex_unlock(&bpf_event_mutex);
-	return err;
+	return __bpf_probe_register(btp, prog);
 }
 
 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
 {
-	int err;
-
-	mutex_lock(&bpf_event_mutex);
-	err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
-	mutex_unlock(&bpf_event_mutex);
-	return err;
+	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
 }
 
 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c98179f..c2ed6a7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3386,6 +3386,8 @@
 	const char tgid_space[] = "          ";
 	const char space[] = "  ";
 
+	print_event_info(buf, m);
+
 	seq_printf(m, "#                          %s  _-----=> irqs-off\n",
 		   tgid ? tgid_space : space);
 	seq_printf(m, "#                          %s / _----=> need-resched\n",
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 5574e86..5a1c64a 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1301,7 +1301,7 @@
 		/* go past the last quote */
 		i++;
 
-	} else if (isdigit(str[i])) {
+	} else if (isdigit(str[i]) || str[i] == '-') {
 
 		/* Make sure the field is not a string */
 		if (is_string_field(field)) {
@@ -1314,6 +1314,9 @@
 			goto err_free;
 		}
 
+		if (str[i] == '-')
+			i++;
+
 		/* We allow 0xDEADBEEF */
 		while (isalnum(str[i]))
 			i++;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index f6c10b6..d681cf3 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -629,13 +629,17 @@
 {
 	unsigned int pc = preempt_count();
 #ifdef CONFIG_PREEMPTIRQ_EVENTS
-	struct irqsoff_store *is = &per_cpu(the_irqsoff,
-						raw_smp_processor_id());
-	u64 delta = sched_clock() - is->ts;
+	struct irqsoff_store *is;
+	u64 delta;
+
+	lockdep_off();
+	is = &per_cpu(the_irqsoff, raw_smp_processor_id());
+	delta = sched_clock() - is->ts;
 
 	if (delta > sysctl_irqsoff_tracing_threshold_ns)
 		trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
 						is->caddr[2], is->caddr[3]);
+	lockdep_on();
 #endif /* CONFIG_PREEMPTIRQ_EVENTS */
 
 	if (!preempt_trace(pc) && irq_trace())
@@ -646,14 +650,16 @@
 {
 	unsigned int pc = preempt_count();
 #ifdef CONFIG_PREEMPTIRQ_EVENTS
-	struct irqsoff_store *is = &per_cpu(the_irqsoff,
-						raw_smp_processor_id());
+	struct irqsoff_store *is;
 
+	lockdep_off();
+	is = &per_cpu(the_irqsoff, raw_smp_processor_id());
 	is->ts = sched_clock();
 	is->caddr[0] = CALLER_ADDR0;
 	is->caddr[1] = CALLER_ADDR1;
 	is->caddr[2] = CALLER_ADDR2;
 	is->caddr[3] = CALLER_ADDR3;
+	lockdep_on();
 #endif /* CONFIG_PREEMPTIRQ_EVENTS */
 
 	if (!preempt_trace(pc) && irq_trace())
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 3061901..1df8e38 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -5,7 +5,7 @@
  * Copyright (C) IBM Corporation, 2010-2012
  * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
  */
-#define pr_fmt(fmt)	"trace_kprobe: " fmt
+#define pr_fmt(fmt)	"trace_uprobe: " fmt
 
 #include <linux/module.h>
 #include <linux/uaccess.h>
@@ -141,7 +141,14 @@
 
 	ret = strncpy_from_user(dst, src, maxlen);
 	if (ret == maxlen)
-		dst[--ret] = '\0';
+		dst[ret - 1] = '\0';
+	else if (ret >= 0)
+		/*
+		 * Include the terminating null byte. In this case it
+		 * was copied by strncpy_from_user but not accounted
+		 * for in ret.
+		 */
+		ret++;
 
 	if (ret < 0) {	/* Failed to fetch string */
 		((u8 *)get_rloc_data(dest))[0] = '\0';
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9699e3e9..dff0e01 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -512,14 +512,12 @@
 
 void watchdog_disable(unsigned int cpu)
 {
-	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
-	unsigned int *enabled = this_cpu_ptr(&watchdog_en);
+	struct hrtimer *hrtimer = per_cpu_ptr(&watchdog_hrtimer, cpu);
+	unsigned int *enabled = per_cpu_ptr(&watchdog_en, cpu);
 
 	if (!*enabled)
 		return;
 
-	WARN_ON_ONCE(cpu != smp_processor_id());
-
 	/*
 	 * Disable the perf event first. That prevents that a large delay
 	 * between disabling the timer and disabling the perf event causes
@@ -527,7 +525,7 @@
 	 */
 	watchdog_nmi_disable(cpu);
 	hrtimer_cancel(hrtimer);
-	wait_for_completion(this_cpu_ptr(&softlockup_completion));
+	wait_for_completion(per_cpu_ptr(&softlockup_completion, cpu));
 
 	/*
 	 * No need for barrier here since disabling the watchdog is
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3c76867..7d8ae47 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -912,6 +912,36 @@
 }
 
 /**
+ * wq_worker_last_func - retrieve worker's last work function
+ *
+ * Determine the last function a worker executed. This is called from
+ * the scheduler to get a worker's last known identity.
+ *
+ * CONTEXT:
+ * spin_lock_irq(rq->lock)
+ *
+ * This function is called during schedule() when a kworker is going
+ * to sleep. It's used by psi to identify aggregation workers during
+ * dequeuing, to allow periodic aggregation to shut-off when that
+ * worker is the last task in the system or cgroup to go to sleep.
+ *
+ * As this function doesn't involve any workqueue-related locking, it
+ * only returns stable values when called from inside the scheduler's
+ * queuing and dequeuing paths, when @task, which must be a kworker,
+ * is guaranteed to not be processing any works.
+ *
+ * Return:
+ * The last work function %current executed as a worker, NULL if it
+ * hasn't executed any work yet.
+ */
+work_func_t wq_worker_last_func(struct task_struct *task)
+{
+	struct worker *worker = kthread_data(task);
+
+	return worker->last_func;
+}
+
+/**
  * worker_set_flags - set worker flags and adjust nr_running accordingly
  * @worker: self
  * @flags: flags to set
@@ -2192,6 +2222,9 @@
 	if (unlikely(cpu_intensive))
 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 
+	/* tag the worker for identification in schedule() */
+	worker->last_func = worker->current_func;
+
 	/* we're done with it, release */
 	hash_del(&worker->hentry);
 	worker->current_work = NULL;
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 66fbb5a..cb68b03 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -53,6 +53,9 @@
 
 	/* used only by rescuers to point to the target workqueue */
 	struct workqueue_struct	*rescue_wq;	/* I: the workqueue to rescue */
+
+	/* used by the scheduler to determine a worker's last known identity */
+	work_func_t		last_func;
 };
 
 /**
@@ -67,9 +70,10 @@
 
 /*
  * Scheduler hooks for concurrency managed workqueue.  Only to be used from
- * sched/core.c and workqueue.c.
+ * sched/ and workqueue.c.
  */
 void wq_worker_waking_up(struct task_struct *task, int cpu);
 struct task_struct *wq_worker_sleeping(struct task_struct *task);
+work_func_t wq_worker_last_func(struct task_struct *task);
 
 #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3eba254..2cf6663 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1114,7 +1114,6 @@
 	select DEBUG_MUTEXES
 	select DEBUG_RT_MUTEXES if RT_MUTEXES
 	select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER
-	select DEBUG_WW_MUTEX_SLOWPATH
 	select DEBUG_LOCK_ALLOC
 	select TRACE_IRQFLAGS
 	default n
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 14436f4..30e0f97 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -52,7 +52,7 @@
 	if (x <= ULONG_MAX)
 		return int_sqrt((unsigned long) x);
 
-	m = 1ULL << (fls64(x) & ~1ULL);
+	m = 1ULL << ((fls64(x) - 1) & ~1ULL);
 	while (m != 0) {
 		b = y + m;
 		y >>= 1;
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 2f8b61d..7ed43ea 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -18,6 +18,21 @@
 
 ifeq ($(CONFIG_ALTIVEC),y)
 altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
+
+ifdef CONFIG_CC_IS_CLANG
+# clang ppc port does not yet support -maltivec when -msoft-float is
+# enabled. A future release of clang will resolve this
+# https://bugs.llvm.org/show_bug.cgi?id=31177
+CFLAGS_REMOVE_altivec1.o  += -msoft-float
+CFLAGS_REMOVE_altivec2.o  += -msoft-float
+CFLAGS_REMOVE_altivec4.o  += -msoft-float
+CFLAGS_REMOVE_altivec8.o  += -msoft-float
+CFLAGS_REMOVE_altivec8.o  += -msoft-float
+CFLAGS_REMOVE_vpermxor1.o += -msoft-float
+CFLAGS_REMOVE_vpermxor2.o += -msoft-float
+CFLAGS_REMOVE_vpermxor4.o += -msoft-float
+CFLAGS_REMOVE_vpermxor8.o += -msoft-float
+endif
 endif
 
 # The GCC option -ffreestanding is required in order to compile code containing
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 11f2ae0..6aabb60 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -144,9 +144,13 @@
 
 	WARN_ON(s->size == 0);
 
+	/* Add 1 to len for the trailing null byte which must be there */
+	len += 1;
+
 	if (seq_buf_can_fit(s, len)) {
 		memcpy(s->buffer + s->len, str, len);
-		s->len += len;
+		/* Don't count the trailing null byte against the capacity */
+		s->len += len - 1;
 		return 0;
 	}
 	seq_buf_set_overflow(s);
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
index d5a06ad..bf864c7 100644
--- a/lib/test_debug_virtual.c
+++ b/lib/test_debug_virtual.c
@@ -5,6 +5,7 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/sizes.h>
+#include <linux/io.h>
 
 #include <asm/page.h>
 #ifdef CONFIG_MIPS
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index d82d022..9cf7762 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -632,7 +632,7 @@
 	config->test_driver = NULL;
 
 	kfree_const(config->test_fs);
-	config->test_driver = NULL;
+	config->test_fs = NULL;
 }
 
 static void kmod_config_free(struct kmod_test_device *test_dev)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 82ac39c..aecc099 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -541,38 +541,45 @@
 static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
 				  int cnt, bool slow)
 {
-	struct rhltable rhlt;
+	struct rhltable *rhlt;
 	unsigned int i, ret;
 	const char *key;
 	int err = 0;
 
-	err = rhltable_init(&rhlt, &test_rht_params_dup);
-	if (WARN_ON(err))
+	rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
+	if (WARN_ON(!rhlt))
+		return -EINVAL;
+
+	err = rhltable_init(rhlt, &test_rht_params_dup);
+	if (WARN_ON(err)) {
+		kfree(rhlt);
 		return err;
+	}
 
 	for (i = 0; i < cnt; i++) {
 		rhl_test_objects[i].value.tid = i;
-		key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
+		key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
 		key += test_rht_params_dup.key_offset;
 
 		if (slow) {
-			err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
+			err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
 							     &rhl_test_objects[i].list_node.rhead));
 			if (err == -EAGAIN)
 				err = 0;
 		} else
-			err = rhltable_insert(&rhlt,
+			err = rhltable_insert(rhlt,
 					      &rhl_test_objects[i].list_node,
 					      test_rht_params_dup);
 		if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
 			goto skip_print;
 	}
 
-	ret = print_ht(&rhlt);
+	ret = print_ht(rhlt);
 	WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
 
 skip_print:
-	rhltable_destroy(&rhlt);
+	rhltable_destroy(rhlt);
+	kfree(rhlt);
 
 	return 0;
 }
diff --git a/mm/Kconfig b/mm/Kconfig
index 7319a3e..6975182 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -651,6 +651,15 @@
 
 	  A sane initial value is 80 MB.
 
+config BALANCE_ANON_FILE_RECLAIM
+	bool "During reclaim treat anon and file backed pages equally"
+	depends on SWAP
+	help
+	  When performing memory reclaim treat anonymous and file backed pages
+	  equally.
+	  Swapping anonymous pages out to memory can be efficient enough to justify
+	  treating anonymous and file backed pages equally.
+
 config DEFERRED_STRUCT_PAGE_INIT
 	bool "Defer initialisation of struct pages to kthreads"
 	default n
@@ -772,6 +781,28 @@
 	  information includes global and per chunk statistics, which can
 	  be used to help understand percpu memory usage.
 
+config ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
+       def_bool n
+
+config SPECULATIVE_PAGE_FAULT
+       bool "Speculative page faults"
+       default y
+       depends on ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
+       depends on MMU && SMP
+       help
+         Try to handle user space page faults without holding the mmap_sem.
+
+	 This should allow better concurrency for massively threaded process
+	 since the page fault handler will not wait for other threads memory
+	 layout change to be done, assuming that this change is done in another
+	 part of the process's memory space. This type of page fault is named
+	 speculative page fault.
+
+	 If the speculative page fault fails because of a concurrency is
+	 detected or because underlying PMD or PTE tables are not yet
+	 allocating, it is failing its processing and a classic page fault
+	 is then tried.
+
 config GUP_BENCHMARK
 	bool "Enable infrastructure for get_user_pages_fast() benchmarking"
 	default n
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8a8bb87..72e6d0c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -689,6 +689,7 @@
 	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 	bdi->cgwb_congested_tree = RB_ROOT;
 	mutex_init(&bdi->cgwb_release_mutex);
+	init_rwsem(&bdi->wb_switch_rwsem);
 
 	ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 	if (!ret) {
diff --git a/mm/cma.c b/mm/cma.c
index 3c79c67..d8cc98f 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -453,6 +453,8 @@
 	if (!count)
 		return NULL;
 
+	trace_cma_alloc_start(count, align);
+
 	mask = cma_bitmap_aligned_mask(cma, align);
 	offset = cma_bitmap_aligned_offset(cma, align);
 	bitmap_maxno = cma_bitmap_maxno(cma);
@@ -520,6 +522,8 @@
 
 		pr_debug("%s(): memory range at %p is busy, retrying\n",
 			 __func__, pfn_to_page(pfn));
+
+		trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
 		/* try again with a bit different memory target */
 		start = bitmap_no + mask + 1;
 	}
diff --git a/mm/compaction.c b/mm/compaction.c
index faca45e..7c60747 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -22,6 +22,7 @@
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 #include <linux/page_owner.h>
+#include <linux/psi.h>
 #include "internal.h"
 
 #ifdef CONFIG_COMPACTION
@@ -2068,11 +2069,15 @@
 	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
 
 	while (!kthread_should_stop()) {
+		unsigned long pflags;
+
 		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
 		wait_event_freezable(pgdat->kcompactd_wait,
 				kcompactd_work_requested(pgdat));
 
+		psi_memstall_enter(&pflags);
 		kcompactd_do_work(pgdat);
+		psi_memstall_leave(&pflags);
 	}
 
 	return 0;
diff --git a/mm/filemap.c b/mm/filemap.c
index 843429e..cbb202b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -36,6 +36,8 @@
 #include <linux/cleancache.h>
 #include <linux/shmem_fs.h>
 #include <linux/rmap.h>
+#include <linux/delayacct.h>
+#include <linux/psi.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -919,12 +921,9 @@
 		 * data from the working set, only to cache data that will
 		 * get overwritten with something else, is a waste of memory.
 		 */
-		if (!(gfp_mask & __GFP_WRITE) &&
-		    shadow && workingset_refault(shadow)) {
-			SetPageActive(page);
-			workingset_activation(page);
-		} else
-			ClearPageActive(page);
+		WARN_ON_ONCE(PageActive(page));
+		if (!(gfp_mask & __GFP_WRITE) && shadow)
+			workingset_refault(page, shadow);
 		lru_cache_add(page);
 	}
 	return ret;
@@ -1080,8 +1079,18 @@
 {
 	struct wait_page_queue wait_page;
 	wait_queue_entry_t *wait = &wait_page.wait;
+	bool thrashing = false;
+	unsigned long pflags;
 	int ret = 0;
 
+	if (bit_nr == PG_locked &&
+	    !PageUptodate(page) && PageWorkingset(page)) {
+		if (!PageSwapBacked(page))
+			delayacct_thrashing_start();
+		psi_memstall_enter(&pflags);
+		thrashing = true;
+	}
+
 	init_wait(wait);
 	wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0;
 	wait->func = wake_page_function;
@@ -1120,6 +1129,12 @@
 
 	finish_wait(q, wait);
 
+	if (thrashing) {
+		if (!PageSwapBacked(page))
+			delayacct_thrashing_end();
+		psi_memstall_leave(&pflags);
+	}
+
 	/*
 	 * A signal could leave PageWaiters set. Clearing it here if
 	 * !waitqueue_active would be possible (by open-coding finish_wait),
diff --git a/mm/hmm.c b/mm/hmm.c
index 90193a7..57f0d2a 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -945,7 +945,6 @@
 
 	devmem = container_of(ref, struct hmm_devmem, ref);
 	percpu_ref_exit(ref);
-	devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
 }
 
 static void hmm_devmem_ref_kill(void *data)
@@ -956,7 +955,6 @@
 	devmem = container_of(ref, struct hmm_devmem, ref);
 	percpu_ref_kill(ref);
 	wait_for_completion(&devmem->completion);
-	devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
 }
 
 static int hmm_devmem_fault(struct vm_area_struct *vma,
@@ -994,7 +992,7 @@
 	mutex_unlock(&hmm_devmem_lock);
 }
 
-static void hmm_devmem_release(struct device *dev, void *data)
+static void hmm_devmem_release(void *data)
 {
 	struct hmm_devmem *devmem = data;
 	struct resource *resource = devmem->resource;
@@ -1002,11 +1000,6 @@
 	struct zone *zone;
 	struct page *page;
 
-	if (percpu_ref_tryget_live(&devmem->ref)) {
-		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
-		percpu_ref_put(&devmem->ref);
-	}
-
 	/* pages are dead and unused, undo the arch mapping */
 	start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
 	npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
@@ -1130,19 +1123,6 @@
 	return ret;
 }
 
-static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
-{
-	struct hmm_devmem *devmem = data;
-
-	return devmem->resource == match_data;
-}
-
-static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
-{
-	devres_release(devmem->device, &hmm_devmem_release,
-		       &hmm_devmem_match, devmem->resource);
-}
-
 /*
  * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
  *
@@ -1170,8 +1150,7 @@
 
 	dev_pagemap_get_ops();
 
-	devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
-				   GFP_KERNEL, dev_to_node(device));
+	devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
 	if (!devmem)
 		return ERR_PTR(-ENOMEM);
 
@@ -1185,11 +1164,11 @@
 	ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
 			      0, GFP_KERNEL);
 	if (ret)
-		goto error_percpu_ref;
+		return ERR_PTR(ret);
 
-	ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
+	ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
 	if (ret)
-		goto error_devm_add_action;
+		return ERR_PTR(ret);
 
 	size = ALIGN(size, PA_SECTION_SIZE);
 	addr = min((unsigned long)iomem_resource.end,
@@ -1209,16 +1188,12 @@
 
 		devmem->resource = devm_request_mem_region(device, addr, size,
 							   dev_name(device));
-		if (!devmem->resource) {
-			ret = -ENOMEM;
-			goto error_no_resource;
-		}
+		if (!devmem->resource)
+			return ERR_PTR(-ENOMEM);
 		break;
 	}
-	if (!devmem->resource) {
-		ret = -ERANGE;
-		goto error_no_resource;
-	}
+	if (!devmem->resource)
+		return ERR_PTR(-ERANGE);
 
 	devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
 	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
@@ -1227,30 +1202,15 @@
 
 	ret = hmm_devmem_pages_create(devmem);
 	if (ret)
-		goto error_pages;
-
-	devres_add(device, devmem);
-
-	ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
-	if (ret) {
-		hmm_devmem_remove(devmem);
 		return ERR_PTR(ret);
-	}
+
+	ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
+	if (ret)
+		return ERR_PTR(ret);
 
 	return devmem;
-
-error_pages:
-	devm_release_mem_region(device, devmem->resource->start,
-				resource_size(devmem->resource));
-error_no_resource:
-error_devm_add_action:
-	hmm_devmem_ref_kill(&devmem->ref);
-	hmm_devmem_ref_exit(&devmem->ref);
-error_percpu_ref:
-	devres_free(devmem);
-	return ERR_PTR(ret);
 }
-EXPORT_SYMBOL(hmm_devmem_add);
+EXPORT_SYMBOL_GPL(hmm_devmem_add);
 
 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
 					   struct device *device,
@@ -1264,8 +1224,7 @@
 
 	dev_pagemap_get_ops();
 
-	devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
-				   GFP_KERNEL, dev_to_node(device));
+	devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
 	if (!devmem)
 		return ERR_PTR(-ENOMEM);
 
@@ -1279,12 +1238,12 @@
 	ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
 			      0, GFP_KERNEL);
 	if (ret)
-		goto error_percpu_ref;
+		return ERR_PTR(ret);
 
-	ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
+	ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
+			&devmem->ref);
 	if (ret)
-		goto error_devm_add_action;
-
+		return ERR_PTR(ret);
 
 	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
 	devmem->pfn_last = devmem->pfn_first +
@@ -1292,58 +1251,20 @@
 
 	ret = hmm_devmem_pages_create(devmem);
 	if (ret)
-		goto error_devm_add_action;
-
-	devres_add(device, devmem);
-
-	ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
-	if (ret) {
-		hmm_devmem_remove(devmem);
 		return ERR_PTR(ret);
-	}
+
+	ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = devm_add_action_or_reset(device, hmm_devmem_ref_kill,
+			&devmem->ref);
+	if (ret)
+		return ERR_PTR(ret);
 
 	return devmem;
-
-error_devm_add_action:
-	hmm_devmem_ref_kill(&devmem->ref);
-	hmm_devmem_ref_exit(&devmem->ref);
-error_percpu_ref:
-	devres_free(devmem);
-	return ERR_PTR(ret);
 }
-EXPORT_SYMBOL(hmm_devmem_add_resource);
-
-/*
- * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
- *
- * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
- *
- * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
- * of the device driver. It will free struct page and remove the resource that
- * reserved the physical address range for this device memory.
- */
-void hmm_devmem_remove(struct hmm_devmem *devmem)
-{
-	resource_size_t start, size;
-	struct device *device;
-	bool cdm = false;
-
-	if (!devmem)
-		return;
-
-	device = devmem->device;
-	start = devmem->resource->start;
-	size = resource_size(devmem->resource);
-
-	cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
-	hmm_devmem_ref_kill(&devmem->ref);
-	hmm_devmem_ref_exit(&devmem->ref);
-	hmm_devmem_pages_remove(devmem);
-
-	if (!cdm)
-		devm_release_mem_region(device, start, size);
-}
-EXPORT_SYMBOL(hmm_devmem_remove);
+EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
 
 /*
  * A device driver that wants to handle multiple devices memory through a
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 15310f1..17422cc 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2127,23 +2127,25 @@
 	 */
 	old_pmd = pmdp_invalidate(vma, haddr, pmd);
 
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 	pmd_migration = is_pmd_migration_entry(old_pmd);
-	if (pmd_migration) {
+	if (unlikely(pmd_migration)) {
 		swp_entry_t entry;
 
 		entry = pmd_to_swp_entry(old_pmd);
 		page = pfn_to_page(swp_offset(entry));
-	} else
-#endif
+		write = is_write_migration_entry(entry);
+		young = false;
+		soft_dirty = pmd_swp_soft_dirty(old_pmd);
+	} else {
 		page = pmd_page(old_pmd);
+		if (pmd_dirty(old_pmd))
+			SetPageDirty(page);
+		write = pmd_write(old_pmd);
+		young = pmd_young(old_pmd);
+		soft_dirty = pmd_soft_dirty(old_pmd);
+	}
 	VM_BUG_ON_PAGE(!page_count(page), page);
 	page_ref_add(page, HPAGE_PMD_NR - 1);
-	if (pmd_dirty(old_pmd))
-		SetPageDirty(page);
-	write = pmd_write(old_pmd);
-	young = pmd_young(old_pmd);
-	soft_dirty = pmd_soft_dirty(old_pmd);
 
 	/*
 	 * Withdraw the table only after we mark the pmd entry invalid.
@@ -2369,6 +2371,7 @@
 			 (1L << PG_mlocked) |
 			 (1L << PG_uptodate) |
 			 (1L << PG_active) |
+			 (1L << PG_workingset) |
 			 (1L << PG_locked) |
 			 (1L << PG_unevictable) |
 			 (1L << PG_dirty)));
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 309fb8c..9e5f66c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3624,7 +3624,6 @@
 	copy_user_huge_page(new_page, old_page, address, vma,
 			    pages_per_huge_page(h));
 	__SetPageUptodate(new_page);
-	set_page_huge_active(new_page);
 
 	mmun_start = haddr;
 	mmun_end = mmun_start + huge_page_size(h);
@@ -3646,6 +3645,7 @@
 				make_huge_pte(vma, new_page, 1));
 		page_remove_rmap(old_page, true);
 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
+		set_page_huge_active(new_page);
 		/* Make the old page be freed below */
 		new_page = old_page;
 	}
@@ -3730,6 +3730,7 @@
 	pte_t new_pte;
 	spinlock_t *ptl;
 	unsigned long haddr = address & huge_page_mask(h);
+	bool new_page = false;
 
 	/*
 	 * Currently, we are forced to kill the process in the event the
@@ -3791,7 +3792,7 @@
 		}
 		clear_huge_page(page, address, pages_per_huge_page(h));
 		__SetPageUptodate(page);
-		set_page_huge_active(page);
+		new_page = true;
 
 		if (vma->vm_flags & VM_MAYSHARE) {
 			int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3862,6 +3863,15 @@
 	}
 
 	spin_unlock(ptl);
+
+	/*
+	 * Only make newly allocated pages active.  Existing pages found
+	 * in the pagecache could be !page_huge_active() if they have been
+	 * isolated for migration.
+	 */
+	if (new_page)
+		set_page_huge_active(page);
+
 	unlock_page(page);
 out:
 	return ret;
@@ -4096,7 +4106,6 @@
 	 * the set_pte_at() write.
 	 */
 	__SetPageUptodate(page);
-	set_page_huge_active(page);
 
 	mapping = dst_vma->vm_file->f_mapping;
 	idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4164,6 +4173,7 @@
 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
 	spin_unlock(ptl);
+	set_page_huge_active(page);
 	if (vm_shared)
 		unlock_page(page);
 	ret = 0;
@@ -4269,7 +4279,8 @@
 				break;
 			}
 			if (ret & VM_FAULT_RETRY) {
-				if (nonblocking)
+				if (nonblocking &&
+				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
 					*nonblocking = 0;
 				*nr_pages = 0;
 				/*
diff --git a/mm/memblock.c b/mm/memblock.c
index bec0b05..cb26bcf 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -799,7 +799,8 @@
 	memblock_dbg("   memblock_free: [%pa-%pa] %pF\n",
 		     &base, &end, (void *)_RET_IP_);
 
-	kmemleak_free_part_phys(base, size);
+	if (base < memblock.current_limit)
+		kmemleak_free_part(__va(base), size);
 	return memblock_remove_range(&memblock.reserved, base, size);
 }
 
@@ -1248,7 +1249,9 @@
 		 * The min_count is set to 0 so that memblock allocations are
 		 * never reported as leaks.
 		 */
-		kmemleak_alloc_phys(found, size, 0, 0);
+		if (found < memblock.current_limit)
+			kmemleak_alloc(__va(found), size, 0, 0);
+
 		return found;
 	}
 	return 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e79cb59..9518aef 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1666,6 +1666,9 @@
 
 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 {
+	enum oom_status ret;
+	bool locked;
+
 	if (order > PAGE_ALLOC_COSTLY_ORDER)
 		return OOM_SKIPPED;
 
@@ -1698,10 +1701,23 @@
 		return OOM_ASYNC;
 	}
 
-	if (mem_cgroup_out_of_memory(memcg, mask, order))
-		return OOM_SUCCESS;
+	mem_cgroup_mark_under_oom(memcg);
 
-	return OOM_FAILED;
+	locked = mem_cgroup_oom_trylock(memcg);
+
+	if (locked)
+		mem_cgroup_oom_notify(memcg);
+
+	mem_cgroup_unmark_under_oom(memcg);
+	if (mem_cgroup_out_of_memory(memcg, mask, order))
+		ret = OOM_SUCCESS;
+	else
+		ret = OOM_FAILED;
+
+	if (locked)
+		mem_cgroup_oom_unlock(memcg);
+
+	return ret;
 }
 
 /**
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 0cd3de3..d9b8a24 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -372,7 +372,8 @@
 			if (fail || tk->addr_valid == 0) {
 				pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
 				       pfn, tk->tsk->comm, tk->tsk->pid);
-				force_sig(SIGKILL, tk->tsk);
+				do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
+						 tk->tsk, PIDTYPE_PID);
 			}
 
 			/*
diff --git a/mm/memory.c b/mm/memory.c
index cf84116..58ff2c6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3237,6 +3237,29 @@
 	struct vm_area_struct *vma = vmf->vma;
 	vm_fault_t ret;
 
+	/*
+	 * Preallocate pte before we take page_lock because this might lead to
+	 * deadlocks for memcg reclaim which waits for pages under writeback:
+	 *				lock_page(A)
+	 *				SetPageWriteback(A)
+	 *				unlock_page(A)
+	 * lock_page(B)
+	 *				lock_page(B)
+	 * pte_alloc_pne
+	 *   shrink_page_list
+	 *     wait_on_page_writeback(A)
+	 *				SetPageWriteback(B)
+	 *				unlock_page(B)
+	 *				# flush A, B to clear the writeback
+	 */
+	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
+		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
+						  vmf->address);
+		if (!vmf->prealloc_pte)
+			return VM_FAULT_OOM;
+		smp_wmb(); /* See comment in __pte_alloc() */
+	}
+
 	ret = vma->vm_ops->fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
 			    VM_FAULT_DONE_COW)))
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 4f1610c..2cf470a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -36,6 +36,7 @@
 #include <linux/bootmem.h>
 #include <linux/compaction.h>
 #include <linux/device.h>
+#include <linux/rmap.h>
 
 #include <asm/tlbflush.h>
 
@@ -1286,11 +1287,13 @@
 	return PageBuddy(page) && page_order(page) >= pageblock_order;
 }
 
-/* Return the start of the next active pageblock after a given page */
-static struct page *next_active_pageblock(struct page *page)
+/* Return the pfn of the start of the next active pageblock after a given pfn */
+static unsigned long next_active_pageblock(unsigned long pfn)
 {
+	struct page *page = pfn_to_page(pfn);
+
 	/* Ensure the starting page is pageblock-aligned */
-	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
+	BUG_ON(pfn & (pageblock_nr_pages - 1));
 
 	/* If the entire pageblock is free, move to the end of free page */
 	if (pageblock_free(page)) {
@@ -1298,16 +1301,16 @@
 		/* be careful. we don't have locks, page_order can be changed.*/
 		order = page_order(page);
 		if ((order < MAX_ORDER) && (order >= pageblock_order))
-			return page + (1 << order);
+			return pfn + (1 << order);
 	}
 
-	return page + pageblock_nr_pages;
+	return pfn + pageblock_nr_pages;
 }
 
-static bool is_pageblock_removable_nolock(struct page *page)
+static bool is_pageblock_removable_nolock(unsigned long pfn)
 {
+	struct page *page = pfn_to_page(pfn);
 	struct zone *zone;
-	unsigned long pfn;
 
 	/*
 	 * We have to be careful here because we are iterating over memory
@@ -1330,12 +1333,14 @@
 /* Checks if this range of memory is likely to be hot-removable. */
 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 {
-	struct page *page = pfn_to_page(start_pfn);
-	struct page *end_page = page + nr_pages;
+	unsigned long end_pfn, pfn;
+
+	end_pfn = min(start_pfn + nr_pages,
+			zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
 
 	/* Check the starting page of each pageblock within the range */
-	for (; page < end_page; page = next_active_pageblock(page)) {
-		if (!is_pageblock_removable_nolock(page))
+	for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
+		if (!is_pageblock_removable_nolock(pfn))
 			return false;
 		cond_resched();
 	}
@@ -1371,6 +1376,9 @@
 				i++;
 			if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
 				continue;
+			/* Check if we got outside of the zone */
+			if (zone && !zone_spans_pfn(zone, pfn + i))
+				return 0;
 			page = pfn_to_page(pfn + i);
 			if (zone && page_zone(page) != zone)
 				return 0;
@@ -1399,23 +1407,27 @@
 static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
 {
 	unsigned long pfn;
-	struct page *page;
+
 	for (pfn = start; pfn < end; pfn++) {
-		if (pfn_valid(pfn)) {
-			page = pfn_to_page(pfn);
-			if (PageLRU(page))
-				return pfn;
-			if (__PageMovable(page))
-				return pfn;
-			if (PageHuge(page)) {
-				if (hugepage_migration_supported(page_hstate(page)) &&
-				    page_huge_active(page))
-					return pfn;
-				else
-					pfn = round_up(pfn + 1,
-						1 << compound_order(page)) - 1;
-			}
-		}
+		struct page *page, *head;
+		unsigned long skip;
+
+		if (!pfn_valid(pfn))
+			continue;
+		page = pfn_to_page(pfn);
+		if (PageLRU(page))
+			return pfn;
+		if (__PageMovable(page))
+			return pfn;
+
+		if (!PageHuge(page))
+			continue;
+		head = compound_head(page);
+		if (hugepage_migration_supported(page_hstate(head)) &&
+		    page_huge_active(head))
+			return pfn;
+		skip = (1 << compound_order(head)) - (page - head);
+		pfn += skip - 1;
 	}
 	return 0;
 }
@@ -1467,6 +1479,21 @@
 			pfn = page_to_pfn(compound_head(page))
 				+ hpage_nr_pages(page) - 1;
 
+		/*
+		 * HWPoison pages have elevated reference counts so the migration would
+		 * fail on them. It also doesn't make any sense to migrate them in the
+		 * first place. Still try to unmap such a page in case it is still mapped
+		 * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
+		 * the unmap as the catch all safety net).
+		 */
+		if (PageHWPoison(page)) {
+			if (WARN_ON(PageLRU(page)))
+				isolate_lru_page(page);
+			if (page_mapped(page))
+				try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
+			continue;
+		}
+
 		if (!get_page_unless_zero(page))
 			continue;
 		/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 106ae83..1cfb9f8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1301,7 +1301,7 @@
 			      nodemask_t *nodes)
 {
 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
-	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
+	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
 
 	if (copy > nbytes) {
 		if (copy > PAGE_SIZE)
@@ -1478,7 +1478,7 @@
 	int uninitialized_var(pval);
 	nodemask_t nodes;
 
-	if (nmask != NULL && maxnode < MAX_NUMNODES)
+	if (nmask != NULL && maxnode < nr_node_ids)
 		return -EINVAL;
 
 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
@@ -1514,7 +1514,7 @@
 	unsigned long nr_bits, alloc_size;
 	DECLARE_BITMAP(bm, MAX_NUMNODES);
 
-	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
+	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
 
 	if (nmask)
diff --git a/mm/migrate.c b/mm/migrate.c
index 84381b5..b80f4da 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -685,6 +685,8 @@
 		SetPageActive(newpage);
 	} else if (TestClearPageUnevictable(page))
 		SetPageUnevictable(newpage);
+	if (PageWorkingset(page))
+		SetPageWorkingset(newpage);
 	if (PageChecked(page))
 		SetPageChecked(newpage);
 	if (PageMappedToDisk(page))
@@ -1118,10 +1120,13 @@
 	 * If migration is successful, decrease refcount of the newpage
 	 * which will not free the page because new page owner increased
 	 * refcounter. As well, if it is LRU page, add the page to LRU
-	 * list in here.
+	 * list in here. Use the old state of the isolated source page to
+	 * determine if we migrated a LRU page. newpage was already unlocked
+	 * and possibly modified by its owner - don't rely on the page
+	 * state.
 	 */
 	if (rc == MIGRATEPAGE_SUCCESS) {
-		if (unlikely(__PageMovable(newpage)))
+		if (unlikely(!is_lru))
 			put_page(newpage);
 		else
 			putback_lru_page(newpage);
@@ -1300,6 +1305,16 @@
 		lock_page(hpage);
 	}
 
+	/*
+	 * Check for pages which are in the process of being freed.  Without
+	 * page_mapping() set, hugetlbfs specific move page routine will not
+	 * be called and we could leak usage counts for subpools.
+	 */
+	if (page_private(hpage) && !page_mapping(hpage)) {
+		rc = -EBUSY;
+		goto out_unlock;
+	}
+
 	if (PageAnon(hpage))
 		anon_vma = page_get_anon_vma(hpage);
 
@@ -1330,6 +1345,7 @@
 		put_new_page = NULL;
 	}
 
+out_unlock:
 	unlock_page(hpage);
 out:
 	if (rc != -EAGAIN)
@@ -1382,6 +1398,8 @@
 	int swapwrite = current->flags & PF_SWAPWRITE;
 	int rc;
 
+	trace_mm_migrate_pages_start(mode, reason);
+
 	if (!swapwrite)
 		current->flags |= PF_SWAPWRITE;
 
diff --git a/mm/mmap.c b/mm/mmap.c
index 53bbe0d..2ffb564 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2400,12 +2400,11 @@
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct vm_area_struct *prev;
-	int error;
+	int error = 0;
 
 	address &= PAGE_MASK;
-	error = security_mmap_addr(address);
-	if (error)
-		return error;
+	if (address < mmap_min_addr)
+		return -EPERM;
 
 	/* Enforce stack_guard_gap */
 	prev = vma->vm_prev;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index dde2ee6..e56b072 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -647,8 +647,8 @@
 	 */
 	spin_lock(&oom_reaper_lock);
 
-	/* tsk is already queued? */
-	if (tsk == oom_reaper_list || tsk->oom_reaper_list) {
+	/* mm is already queued? */
+	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) {
 		spin_unlock(&oom_reaper_lock);
 		return;
 	}
@@ -981,6 +981,13 @@
 	 * still freeing memory.
 	 */
 	read_lock(&tasklist_lock);
+
+	/*
+	 * The task 'p' might have already exited before reaching here. The
+	 * put_task_struct() will free task_struct 'p' while the loop still try
+	 * to access the field of 'p', so, get an extra reference.
+	 */
+	get_task_struct(p);
 	for_each_thread(p, t) {
 		list_for_each_entry(child, &t->children, sibling) {
 			unsigned int child_points;
@@ -1000,6 +1007,7 @@
 			}
 		}
 	}
+	put_task_struct(p);
 	read_unlock(&tasklist_lock);
 
 	/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 84ae9bf..ea4fd3a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2156,6 +2156,7 @@
 {
 	int ret = 0;
 	int done = 0;
+	int error;
 	struct pagevec pvec;
 	int nr_pages;
 	pgoff_t uninitialized_var(writeback_index);
@@ -2236,25 +2237,31 @@
 				goto continue_unlock;
 
 			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
-			ret = (*writepage)(page, wbc, data);
-			if (unlikely(ret)) {
-				if (ret == AOP_WRITEPAGE_ACTIVATE) {
+			error = (*writepage)(page, wbc, data);
+			if (unlikely(error)) {
+				/*
+				 * Handle errors according to the type of
+				 * writeback. There's no need to continue for
+				 * background writeback. Just push done_index
+				 * past this page so media errors won't choke
+				 * writeout for the entire file. For integrity
+				 * writeback, we must process the entire dirty
+				 * set regardless of errors because the fs may
+				 * still have state to clear for each page. In
+				 * that case we continue processing and return
+				 * the first error.
+				 */
+				if (error == AOP_WRITEPAGE_ACTIVATE) {
 					unlock_page(page);
-					ret = 0;
-				} else {
-					/*
-					 * done_index is set past this page,
-					 * so media errors will not choke
-					 * background writeout for the entire
-					 * file. This has consequences for
-					 * range_cyclic semantics (ie. it may
-					 * not be suitable for data integrity
-					 * writeout).
-					 */
+					error = 0;
+				} else if (wbc->sync_mode != WB_SYNC_ALL) {
+					ret = error;
 					done_index = page->index + 1;
 					done = 1;
 					break;
 				}
+				if (!ret)
+					ret = error;
 			}
 
 			/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ed57047..c24fc87 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -67,6 +67,7 @@
 #include <linux/ftrace.h>
 #include <linux/lockdep.h>
 #include <linux/nmi.h>
+#include <linux/psi.h>
 
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -307,6 +308,32 @@
 int page_group_by_mobility_disabled __read_mostly;
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+/*
+ * During boot we initialize deferred pages on-demand, as needed, but once
+ * page_alloc_init_late() has finished, the deferred pages are all initialized,
+ * and we can permanently disable that path.
+ */
+static DEFINE_STATIC_KEY_TRUE(deferred_pages);
+
+/*
+ * Calling kasan_free_pages() only after deferred memory initialization
+ * has completed. Poisoning pages during deferred memory init will greatly
+ * lengthen the process and cause problem in large memory systems as the
+ * deferred pages initialization is done with interrupt disabled.
+ *
+ * Assuming that there will be no reference to those newly initialized
+ * pages before they are ever allocated, this should have no effect on
+ * KASAN memory tracking as the poison will be properly inserted at page
+ * allocation time. The only corner case is when pages are allocated by
+ * on-demand allocation and then freed again before the deferred pages
+ * initialization is done, but this is not likely to happen.
+ */
+static inline void kasan_free_nondeferred_pages(struct page *page, int order)
+{
+	if (!static_branch_unlikely(&deferred_pages))
+		kasan_free_pages(page, order);
+}
+
 /* Returns true if the struct page for the pfn is uninitialised */
 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
 {
@@ -339,6 +366,8 @@
 	return true;
 }
 #else
+#define kasan_free_nondeferred_pages(p, o)	kasan_free_pages(p, o)
+
 static inline bool early_page_uninitialised(unsigned long pfn)
 {
 	return false;
@@ -1043,7 +1072,7 @@
 	arch_free_page(page, order);
 	kernel_poison_pages(page, 1 << order, 0);
 	kernel_map_pages(page, 1 << order, 0);
-	kasan_free_pages(page, order);
+	kasan_free_nondeferred_pages(page, order);
 
 	return true;
 }
@@ -1607,13 +1636,6 @@
 }
 
 /*
- * During boot we initialize deferred pages on-demand, as needed, but once
- * page_alloc_init_late() has finished, the deferred pages are all initialized,
- * and we can permanently disable that path.
- */
-static DEFINE_STATIC_KEY_TRUE(deferred_pages);
-
-/*
  * If this zone has deferred pages, try to grow it by initializing enough
  * deferred pages to satisfy the allocation specified by order, rounded up to
  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
@@ -1913,9 +1935,9 @@
 	set_page_refcounted(page);
 
 	arch_alloc_page(page, order);
+	kasan_alloc_pages(page, order);
 	kernel_map_pages(page, 1 << order, 1);
 	kernel_poison_pages(page, 1 << order, 1);
-	kasan_alloc_pages(page, order);
 	set_page_owner(page, order, gfp_flags);
 }
 
@@ -3637,15 +3659,20 @@
 		enum compact_priority prio, enum compact_result *compact_result)
 {
 	struct page *page;
+	unsigned long pflags;
 	unsigned int noreclaim_flag;
 
 	if (!order)
 		return NULL;
 
+	psi_memstall_enter(&pflags);
 	noreclaim_flag = memalloc_noreclaim_save();
+
 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
 									prio);
+
 	memalloc_noreclaim_restore(noreclaim_flag);
+	psi_memstall_leave(&pflags);
 
 	if (*compact_result <= COMPACT_INACTIVE)
 		return NULL;
@@ -3887,11 +3914,13 @@
 	struct reclaim_state reclaim_state;
 	int progress;
 	unsigned int noreclaim_flag;
+	unsigned long pflags;
 
 	cond_resched();
 
 	/* We now go into synchronous reclaim */
 	cpuset_memory_pressure_bump();
+	psi_memstall_enter(&pflags);
 	fs_reclaim_acquire(gfp_mask);
 	noreclaim_flag = memalloc_noreclaim_save();
 	reclaim_state.reclaimed_slab = 0;
@@ -3903,6 +3932,7 @@
 	current->reclaim_state = NULL;
 	memalloc_noreclaim_restore(noreclaim_flag);
 	fs_reclaim_release(gfp_mask);
+	psi_memstall_leave(&pflags);
 
 	cond_resched();
 
@@ -7845,11 +7875,14 @@
 		 * handle each tail page individually in migration.
 		 */
 		if (PageHuge(page)) {
+			struct page *head = compound_head(page);
+			unsigned int skip_pages;
 
-			if (!hugepage_migration_supported(page_hstate(page)))
+			if (!hugepage_migration_supported(page_hstate(head)))
 				goto unmovable;
 
-			iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+			skip_pages = (1 << compound_order(head)) - (page - head);
+			iter += skip_pages - 1;
 			continue;
 		}
 
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 38de70a..0f643dc 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -50,6 +50,7 @@
 	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
 	struct pcpu_chunk *chunk;
 	struct page *pages;
+	unsigned long flags;
 	int i;
 
 	chunk = pcpu_alloc_chunk(gfp);
@@ -68,9 +69,9 @@
 	chunk->data = pages;
 	chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
 
-	spin_lock_irq(&pcpu_lock);
+	spin_lock_irqsave(&pcpu_lock, flags);
 	pcpu_chunk_populated(chunk, 0, nr_pages, false);
-	spin_unlock_irq(&pcpu_lock);
+	spin_unlock_irqrestore(&pcpu_lock, flags);
 
 	pcpu_stats_chunk_alloc();
 	trace_percpu_create_chunk(chunk->base_addr);
diff --git a/mm/shmem.c b/mm/shmem.c
index e994935..645885c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2171,20 +2171,21 @@
 {
 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
 
-	/*
-	 * New PROT_READ and MAP_SHARED mmaps are not allowed when "future
-	 * write" seal active.
-	 */
-	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE) &&
-	    (info->seals & F_SEAL_FUTURE_WRITE))
-		return -EPERM;
+	if (info->seals & F_SEAL_FUTURE_WRITE) {
+		/*
+		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+		 * "future write" seal active.
+		 */
+		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+			return -EPERM;
 
-	/*
-	 * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED read-only
-	 * mapping, take care to not allow mprotect to revert protections.
-	 */
-	if (info->seals & F_SEAL_FUTURE_WRITE)
+		/*
+		 * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
+		 * read-only mapping, take care to not allow mprotect to revert
+		 * protections.
+		 */
 		vma->vm_flags &= ~(VM_MAYWRITE);
+	}
 
 	file_accessed(file);
 	vma->vm_ops = &shmem_vm_ops;
diff --git a/mm/slab.c b/mm/slab.c
index d73c7a4..fad6839 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -679,8 +679,10 @@
 	struct alien_cache *alc = NULL;
 
 	alc = kmalloc_node(memsize, gfp, node);
-	init_arraycache(&alc->ac, entries, batch);
-	spin_lock_init(&alc->lock);
+	if (alc) {
+		init_arraycache(&alc->ac, entries, batch);
+		spin_lock_init(&alc->lock);
+	}
 	return alc;
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index 0fc48b4..eacb2b24 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1693,6 +1693,7 @@
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += pages;
 	memcg_uncharge_slab(page, order, s);
+	kasan_alloc_pages(page, order);
 	__free_pages(page, order);
 }
 
@@ -3912,6 +3913,7 @@
 	if (unlikely(!PageSlab(page))) {
 		BUG_ON(!PageCompound(page));
 		kfree_hook(object);
+		kasan_alloc_pages(page, compound_order(page));
 		__free_pages(page, compound_order(page));
 		return;
 	}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ecee9c6..0d6a7f2 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -448,6 +448,7 @@
 			/*
 			 * Initiate read into locked page and return.
 			 */
+			SetPageWorkingset(new_page);
 			lru_cache_add_anon(new_page);
 			*new_page_allocated = true;
 			return new_page;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 380d259..f09534f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2229,7 +2229,8 @@
 		 */
 		if (PageSwapCache(page) &&
 		    likely(page_private(page) == entry.val) &&
-		    !page_swapped(page))
+		    (!PageTransCompound(page) ||
+		     !swap_page_trans_huge_swapped(si, entry)))
 			delete_from_swap_cache(compound_head(page));
 
 		/*
@@ -2840,8 +2841,9 @@
 	struct swap_info_struct *p;
 	unsigned int type;
 	int i;
+	int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
 
-	p = kvzalloc(sizeof(*p), GFP_KERNEL);
+	p = kvzalloc(size, GFP_KERNEL);
 	if (!p)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 0293645..ac85aeb 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -167,6 +167,8 @@
 	const void *end = ptr + n - 1;
 	struct page *endpage;
 	bool is_reserved, is_cma;
+	const void * const stack = task_stack_page(current);
+	const void * const stackend = stack + THREAD_SIZE;
 
 	/*
 	 * Sometimes the kernel data regions are not marked Reserved (see
@@ -191,6 +193,10 @@
 	    end <= (const void *)__bss_stop)
 		return;
 
+	/* Allow stack region to span multiple pages */
+	if (ptr >= stack && end <= stackend)
+		return;
+
 	/* Is the object wholly within one base page? */
 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
@@ -247,7 +253,8 @@
 /*
  * Validates that the given object is:
  * - not bogus address
- * - known-safe heap or stack object
+ * - fully contained by stack (or stack frame, when available)
+ * - fully within SLAB object (or object whitelist area, when available)
  * - not in kernel text
  */
 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
@@ -262,9 +269,6 @@
 	/* Check for invalid addresses. */
 	check_bogus_address((const unsigned long)ptr, n, to_user);
 
-	/* Check for bad heap object. */
-	check_heap_object(ptr, n, to_user);
-
 	/* Check for bad stack object. */
 	switch (check_stack_object(ptr, n)) {
 	case NOT_STACK:
@@ -282,6 +286,9 @@
 		usercopy_abort("process stack", NULL, to_user, 0, n);
 	}
 
+	/* Check for bad heap object. */
+	check_heap_object(ptr, n, to_user);
+
 	/* Check for object in kernel to avoid text exposure. */
 	check_kernel_text_object((const unsigned long)ptr, n, to_user);
 }
diff --git a/mm/util.c b/mm/util.c
index 9e3ebd2..6a24a10 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -485,7 +485,7 @@
 		return true;
 	if (PageHuge(page))
 		return false;
-	for (i = 0; i < hpage_nr_pages(page); i++) {
+	for (i = 0; i < (1 << compound_order(page)); i++) {
 		if (atomic_read(&page[i]._mapcount) >= 0)
 			return true;
 	}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index edbeb4d..261ff7b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -49,6 +49,7 @@
 #include <linux/prefetch.h>
 #include <linux/printk.h>
 #include <linux/dax.h>
+#include <linux/psi.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -458,6 +459,10 @@
 	long batch_size = shrinker->batch ? shrinker->batch
 					  : SHRINK_BATCH;
 	long scanned = 0, next_deferred;
+	long min_cache_size = batch_size;
+
+	if (current_is_kswapd())
+		min_cache_size = 0;
 
 	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
 		nid = 0;
@@ -478,16 +483,6 @@
 	delta *= 4;
 	do_div(delta, shrinker->seeks);
 
-	/*
-	 * Make sure we apply some minimal pressure on default priority
-	 * even on small cgroups. Stale objects are not only consuming memory
-	 * by themselves, but can also hold a reference to a dying cgroup,
-	 * preventing it from being reclaimed. A dying cgroup with all
-	 * corresponding structures like per-cpu stats and kmem caches
-	 * can be really big, so it may lead to a significant waste of memory.
-	 */
-	delta = max_t(unsigned long long, delta, min(freeable, batch_size));
-
 	total_scan += delta;
 	if (total_scan < 0) {
 		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
@@ -538,7 +533,7 @@
 	 * scanning at high prio and therefore should try to reclaim as much as
 	 * possible.
 	 */
-	while (total_scan >= batch_size ||
+	while (total_scan > min_cache_size ||
 	       total_scan >= freeable) {
 		unsigned long ret;
 		unsigned long nr_to_scan = min(batch_size, total_scan);
@@ -2148,6 +2143,7 @@
 		}
 
 		ClearPageActive(page);	/* we are de-activating */
+		SetPageWorkingset(page);
 		list_add(&page->lru, &l_inactive);
 	}
 
@@ -2377,7 +2373,8 @@
 	 * lruvec even if it has plenty of old anonymous pages unless the
 	 * system is under heavy pressure.
 	 */
-	if (!inactive_list_is_low(lruvec, true, memcg, sc, false) &&
+	if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) &&
+	    !inactive_list_is_low(lruvec, true, memcg, sc, false) &&
 	    lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
 		scan_balance = SCAN_FILE;
 		goto out;
@@ -2459,9 +2456,11 @@
 			/*
 			 * Scan types proportional to swappiness and
 			 * their relative recent reclaim efficiency.
+			 * Make sure we don't miss the last page
+			 * because of a round-off error.
 			 */
-			scan = div64_u64(scan * fraction[file],
-					 denominator);
+			scan = DIV64_U64_ROUND_UP(scan * fraction[file],
+						  denominator);
 			break;
 		case SCAN_FILE:
 		case SCAN_ANON:
@@ -3312,6 +3311,7 @@
 {
 	struct zonelist *zonelist;
 	unsigned long nr_reclaimed;
+	unsigned long pflags;
 	int nid;
 	unsigned int noreclaim_flag;
 	struct scan_control sc = {
@@ -3340,9 +3340,13 @@
 					    sc.gfp_mask,
 					    sc.reclaim_idx);
 
+	psi_memstall_enter(&pflags);
 	noreclaim_flag = memalloc_noreclaim_save();
+
 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+
 	memalloc_noreclaim_restore(noreclaim_flag);
+	psi_memstall_leave(&pflags);
 
 	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
 
@@ -3507,6 +3511,7 @@
 	int i;
 	unsigned long nr_soft_reclaimed;
 	unsigned long nr_soft_scanned;
+	unsigned long pflags;
 	struct zone *zone;
 	struct scan_control sc = {
 		.gfp_mask = GFP_KERNEL,
@@ -3517,6 +3522,7 @@
 		.may_swap = 1,
 	};
 
+	psi_memstall_enter(&pflags);
 	__fs_reclaim_acquire();
 
 	count_vm_event(PAGEOUTRUN);
@@ -3618,6 +3624,7 @@
 out:
 	snapshot_refaults(NULL, pgdat);
 	__fs_reclaim_release();
+	psi_memstall_leave(&pflags);
 	/*
 	 * Return the order kswapd stopped reclaiming at as
 	 * prepare_kswapd_sleep() takes it into account. If another caller
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 1843d76..3ec78a0 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1145,6 +1145,7 @@
 	"nr_isolated_file",
 	"workingset_refault",
 	"workingset_activate",
+	"workingset_restore",
 	"workingset_nodereclaim",
 	"nr_anon_pages",
 	"nr_mapped",
diff --git a/mm/workingset.c b/mm/workingset.c
index 4516dd7..99b7f7c 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -121,7 +121,7 @@
  * the only thing eating into inactive list space is active pages.
  *
  *
- *		Activating refaulting pages
+ *		Refaulting inactive pages
  *
  * All that is known about the active list is that the pages have been
  * accessed more than once in the past.  This means that at any given
@@ -134,6 +134,10 @@
  * used less frequently than the refaulting page - or even not used at
  * all anymore.
  *
+ * That means if inactive cache is refaulting with a suitable refault
+ * distance, we assume the cache workingset is transitioning and put
+ * pressure on the current active list.
+ *
  * If this is wrong and demotion kicks in, the pages which are truly
  * used more frequently will be reactivated while the less frequently
  * used once will be evicted from memory.
@@ -141,6 +145,14 @@
  * But if this is right, the stale pages will be pushed out of memory
  * and the used pages get to stay in cache.
  *
+ *		Refaulting active pages
+ *
+ * If on the other hand the refaulting pages have recently been
+ * deactivated, it means that the active list is no longer protecting
+ * actively used cache from reclaim. The cache is NOT transitioning to
+ * a different workingset; the existing workingset is thrashing in the
+ * space allocated to the page cache.
+ *
  *
  *		Implementation
  *
@@ -156,8 +168,7 @@
  */
 
 #define EVICTION_SHIFT	(RADIX_TREE_EXCEPTIONAL_ENTRY + \
-			 NODES_SHIFT +	\
-			 MEM_CGROUP_ID_SHIFT)
+			 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
 #define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
 
 /*
@@ -170,23 +181,28 @@
  */
 static unsigned int bucket_order __read_mostly;
 
-static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
+static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
+			 bool workingset)
 {
 	eviction >>= bucket_order;
 	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
 	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
+	eviction = (eviction << 1) | workingset;
 	eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
 
 	return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
 }
 
 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
-			  unsigned long *evictionp)
+			  unsigned long *evictionp, bool *workingsetp)
 {
 	unsigned long entry = (unsigned long)shadow;
 	int memcgid, nid;
+	bool workingset;
 
 	entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
+	workingset = entry & 1;
+	entry >>= 1;
 	nid = entry & ((1UL << NODES_SHIFT) - 1);
 	entry >>= NODES_SHIFT;
 	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
@@ -195,6 +211,7 @@
 	*memcgidp = memcgid;
 	*pgdat = NODE_DATA(nid);
 	*evictionp = entry << bucket_order;
+	*workingsetp = workingset;
 }
 
 /**
@@ -207,8 +224,8 @@
  */
 void *workingset_eviction(struct address_space *mapping, struct page *page)
 {
-	struct mem_cgroup *memcg = page_memcg(page);
 	struct pglist_data *pgdat = page_pgdat(page);
+	struct mem_cgroup *memcg = page_memcg(page);
 	int memcgid = mem_cgroup_id(memcg);
 	unsigned long eviction;
 	struct lruvec *lruvec;
@@ -220,30 +237,30 @@
 
 	lruvec = mem_cgroup_lruvec(pgdat, memcg);
 	eviction = atomic_long_inc_return(&lruvec->inactive_age);
-	return pack_shadow(memcgid, pgdat, eviction);
+	return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
 }
 
 /**
  * workingset_refault - evaluate the refault of a previously evicted page
+ * @page: the freshly allocated replacement page
  * @shadow: shadow entry of the evicted page
  *
  * Calculates and evaluates the refault distance of the previously
  * evicted page in the context of the node it was allocated in.
- *
- * Returns %true if the page should be activated, %false otherwise.
  */
-bool workingset_refault(void *shadow)
+void workingset_refault(struct page *page, void *shadow)
 {
 	unsigned long refault_distance;
+	struct pglist_data *pgdat;
 	unsigned long active_file;
 	struct mem_cgroup *memcg;
 	unsigned long eviction;
 	struct lruvec *lruvec;
 	unsigned long refault;
-	struct pglist_data *pgdat;
+	bool workingset;
 	int memcgid;
 
-	unpack_shadow(shadow, &memcgid, &pgdat, &eviction);
+	unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
 
 	rcu_read_lock();
 	/*
@@ -263,41 +280,51 @@
 	 * configurations instead.
 	 */
 	memcg = mem_cgroup_from_id(memcgid);
-	if (!mem_cgroup_disabled() && !memcg) {
-		rcu_read_unlock();
-		return false;
-	}
+	if (!mem_cgroup_disabled() && !memcg)
+		goto out;
 	lruvec = mem_cgroup_lruvec(pgdat, memcg);
 	refault = atomic_long_read(&lruvec->inactive_age);
 	active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
 
 	/*
-	 * The unsigned subtraction here gives an accurate distance
-	 * across inactive_age overflows in most cases.
+	 * Calculate the refault distance
 	 *
-	 * There is a special case: usually, shadow entries have a
-	 * short lifetime and are either refaulted or reclaimed along
-	 * with the inode before they get too old.  But it is not
-	 * impossible for the inactive_age to lap a shadow entry in
-	 * the field, which can then can result in a false small
-	 * refault distance, leading to a false activation should this
-	 * old entry actually refault again.  However, earlier kernels
-	 * used to deactivate unconditionally with *every* reclaim
-	 * invocation for the longest time, so the occasional
-	 * inappropriate activation leading to pressure on the active
-	 * list is not a problem.
+	 * The unsigned subtraction here gives an accurate distance
+	 * across inactive_age overflows in most cases. There is a
+	 * special case: usually, shadow entries have a short lifetime
+	 * and are either refaulted or reclaimed along with the inode
+	 * before they get too old.  But it is not impossible for the
+	 * inactive_age to lap a shadow entry in the field, which can
+	 * then result in a false small refault distance, leading to a
+	 * false activation should this old entry actually refault
+	 * again.  However, earlier kernels used to deactivate
+	 * unconditionally with *every* reclaim invocation for the
+	 * longest time, so the occasional inappropriate activation
+	 * leading to pressure on the active list is not a problem.
 	 */
 	refault_distance = (refault - eviction) & EVICTION_MASK;
 
 	inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
 
-	if (refault_distance <= active_file) {
-		inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
-		rcu_read_unlock();
-		return true;
+	/*
+	 * Compare the distance to the existing workingset size. We
+	 * don't act on pages that couldn't stay resident even if all
+	 * the memory was available to the page cache.
+	 */
+	if (refault_distance > active_file)
+		goto out;
+
+	SetPageActive(page);
+	atomic_long_inc(&lruvec->inactive_age);
+	inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
+
+	/* Page was active prior to eviction */
+	if (workingset) {
+		SetPageWorkingset(page);
+		inc_lruvec_state(lruvec, WORKINGSET_RESTORE);
 	}
+out:
 	rcu_read_unlock();
-	return false;
 }
 
 /**
@@ -364,7 +391,7 @@
 {
 	unsigned long max_nodes;
 	unsigned long nodes;
-	unsigned long cache;
+	unsigned long pages;
 
 	nodes = list_lru_shrink_count(&shadow_nodes, sc);
 
@@ -390,14 +417,20 @@
 	 *
 	 * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
 	 */
+#ifdef CONFIG_MEMCG
 	if (sc->memcg) {
-		cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
-						     LRU_ALL_FILE);
-	} else {
-		cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
-			node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
-	}
-	max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
+		struct lruvec *lruvec;
+
+		pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
+						     LRU_ALL);
+		lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg);
+		pages += lruvec_page_state(lruvec, NR_SLAB_RECLAIMABLE);
+		pages += lruvec_page_state(lruvec, NR_SLAB_UNRECLAIMABLE);
+	} else
+#endif
+		pages = node_present_pages(sc->nid);
+
+	max_nodes = pages >> (RADIX_TREE_MAP_SHIFT - 3);
 
 	if (!nodes)
 		return SHRINK_EMPTY;
diff --git a/net/9p/client.c b/net/9p/client.c
index deae53a..75b7bf7 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -181,6 +181,12 @@
 				ret = r;
 				continue;
 			}
+			if (option < 4096) {
+				p9_debug(P9_DEBUG_ERROR,
+					 "msize should be at least 4k\n");
+				ret = -EINVAL;
+				continue;
+			}
 			clnt->msize = option;
 			break;
 		case Opt_trans:
@@ -993,10 +999,18 @@
 	else if (!strncmp(version, "9P2000", 6))
 		c->proto_version = p9_proto_legacy;
 	else {
+		p9_debug(P9_DEBUG_ERROR,
+			 "server returned an unknown version: %s\n", version);
 		err = -EREMOTEIO;
 		goto error;
 	}
 
+	if (msize < 4096) {
+		p9_debug(P9_DEBUG_ERROR,
+			 "server returned a msize < 4096: %d\n", msize);
+		err = -EREMOTEIO;
+		goto error;
+	}
 	if (msize < c->msize)
 		c->msize = msize;
 
@@ -1055,6 +1069,13 @@
 	if (clnt->msize > clnt->trans_mod->maxsize)
 		clnt->msize = clnt->trans_mod->maxsize;
 
+	if (clnt->msize < 4096) {
+		p9_debug(P9_DEBUG_ERROR,
+			 "Please specify a msize of at least 4k\n");
+		err = -EINVAL;
+		goto free_client;
+	}
+
 	err = p9_client_version(clnt);
 	if (err)
 		goto close_trans;
diff --git a/net/Kconfig b/net/Kconfig
index 56bf7db..f46a913 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -329,6 +329,15 @@
 	  with many clients some protection against DoS by a single (spoofed)
 	  flow that greatly exceeds average workload.
 
+config SOCKEV_NLMCAST
+	bool "Enable SOCKEV Netlink Multicast"
+	default n
+	help
+	  Default client for SOCKEV notifier events. Sends multicast netlink
+	  messages whenever the socket event notifier is invoked. Enable if
+	  user space entities need to be notified of socket events without
+	  having to poll /proc
+
 menu "Network testing"
 
 config NET_PKTGEN
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index c603d33..5d01edf 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -653,15 +653,22 @@
 			break;
 		}
 
-		dev = dev_get_by_name(&init_net, devname);
+		rtnl_lock();
+		dev = __dev_get_by_name(&init_net, devname);
 		if (!dev) {
+			rtnl_unlock();
 			res = -ENODEV;
 			break;
 		}
 
 		ax25->ax25_dev = ax25_dev_ax25dev(dev);
+		if (!ax25->ax25_dev) {
+			rtnl_unlock();
+			res = -ENODEV;
+			break;
+		}
 		ax25_fillin_cb(ax25, ax25->ax25_dev);
-		dev_put(dev);
+		rtnl_unlock();
 		break;
 
 	default:
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 9a3a301..d92195c 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -116,6 +116,7 @@
 	if ((s = ax25_dev_list) == ax25_dev) {
 		ax25_dev_list = s->next;
 		spin_unlock_bh(&ax25_dev_lock);
+		dev->ax25_ptr = NULL;
 		dev_put(dev);
 		kfree(ax25_dev);
 		return;
@@ -125,6 +126,7 @@
 		if (s->next == ax25_dev) {
 			s->next = ax25_dev->next;
 			spin_unlock_bh(&ax25_dev_lock);
+			dev->ax25_ptr = NULL;
 			dev_put(dev);
 			kfree(ax25_dev);
 			return;
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 70417e9..314bbc8 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -114,6 +114,7 @@
 	dst = (ax25_address *)(bp + 1);
 	src = (ax25_address *)(bp + 8);
 
+	ax25_route_lock_use();
 	route = ax25_get_route(dst, NULL);
 	if (route) {
 		digipeat = route->digipeat;
@@ -206,9 +207,8 @@
 	ax25_queue_xmit(skb, dev);
 
 put:
-	if (route)
-		ax25_put_route(route);
 
+	ax25_route_lock_unuse();
 	return NETDEV_TX_OK;
 }
 
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a0eff32..66f74c8 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -40,7 +40,7 @@
 #include <linux/export.h>
 
 static ax25_route *ax25_route_list;
-static DEFINE_RWLOCK(ax25_route_lock);
+DEFINE_RWLOCK(ax25_route_lock);
 
 void ax25_rt_device_down(struct net_device *dev)
 {
@@ -335,6 +335,7 @@
  *	Find AX.25 route
  *
  *	Only routes with a reference count of zero can be destroyed.
+ *	Must be called with ax25_route_lock read locked.
  */
 ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
 {
@@ -342,7 +343,6 @@
 	ax25_route *ax25_def_rt = NULL;
 	ax25_route *ax25_rt;
 
-	read_lock(&ax25_route_lock);
 	/*
 	 *	Bind to the physical interface we heard them on, or the default
 	 *	route if none is found;
@@ -365,11 +365,6 @@
 	if (ax25_spe_rt != NULL)
 		ax25_rt = ax25_spe_rt;
 
-	if (ax25_rt != NULL)
-		ax25_hold_route(ax25_rt);
-
-	read_unlock(&ax25_route_lock);
-
 	return ax25_rt;
 }
 
@@ -400,9 +395,12 @@
 	ax25_route *ax25_rt;
 	int err = 0;
 
-	if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
+	ax25_route_lock_use();
+	ax25_rt = ax25_get_route(addr, NULL);
+	if (!ax25_rt) {
+		ax25_route_lock_unuse();
 		return -EHOSTUNREACH;
-
+	}
 	if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
 		err = -EHOSTUNREACH;
 		goto put;
@@ -437,8 +435,7 @@
 	}
 
 put:
-	ax25_put_route(ax25_rt);
-
+	ax25_route_lock_unuse();
 	return err;
 }
 
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index e8090f0..ef0dec2 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,6 +104,9 @@
 
 		ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
 
+		/* free the TID stats immediately */
+		cfg80211_sinfo_release_content(&sinfo);
+
 		dev_put(real_netdev);
 		if (ret == -ENOENT) {
 			/* Node is not associated anymore! It would be
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 2f0d42f..08690d0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -20,7 +20,6 @@
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/gfp.h>
@@ -179,8 +178,10 @@
 	parent_dev = __dev_get_by_index((struct net *)parent_net,
 					dev_get_iflink(net_dev));
 	/* if we got a NULL parent_dev there is something broken.. */
-	if (WARN(!parent_dev, "Cannot find parent device"))
+	if (!parent_dev) {
+		pr_err("Cannot find parent device\n");
 		return false;
+	}
 
 	if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
 		return false;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 626ddca..a2976ad 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -221,10 +221,14 @@
 
 	netif_trans_update(soft_iface);
 	vid = batadv_get_vid(skb, 0);
+
+	skb_reset_mac_header(skb);
 	ethhdr = eth_hdr(skb);
 
 	switch (ntohs(ethhdr->h_proto)) {
 	case ETH_P_8021Q:
+		if (!pskb_may_pull(skb, sizeof(*vhdr)))
+			goto dropped;
 		vhdr = vlan_eth_hdr(skb);
 
 		/* drop batman-in-batman packets to prevent loops */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6fa61b8..a4d6d77 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -183,15 +183,25 @@
 }
 EXPORT_SYMBOL(bt_sock_unlink);
 
-void bt_accept_enqueue(struct sock *parent, struct sock *sk)
+void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
 {
 	BT_DBG("parent %p, sk %p", parent, sk);
 
 	sock_hold(sk);
-	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+	if (bh)
+		bh_lock_sock_nested(sk);
+	else
+		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
 	bt_sk(sk)->parent = parent;
-	release_sock(sk);
+
+	if (bh)
+		bh_unlock_sock(sk);
+	else
+		release_sock(sk);
+
 	parent->sk_ack_backlog++;
 }
 EXPORT_SYMBOL(bt_accept_enqueue);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f12555f..7f800c3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -5668,6 +5668,12 @@
 		return true;
 	}
 
+	/* Check if request ended in Command Status - no way to retreive
+	 * any extra parameters in this case.
+	 */
+	if (hdr->evt == HCI_EV_CMD_STATUS)
+		return false;
+
 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
 			   hdr->evt);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 686bdc6..a3a2cd5 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1252,7 +1252,7 @@
 
 	l2cap_sock_init(sk, parent);
 
-	bt_accept_enqueue(parent, sk);
+	bt_accept_enqueue(parent, sk, false);
 
 	release_sock(parent);
 
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index d606e92..c044ff2 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -988,7 +988,7 @@
 	rfcomm_pi(sk)->channel = channel;
 
 	sk->sk_state = BT_CONFIG;
-	bt_accept_enqueue(parent, sk);
+	bt_accept_enqueue(parent, sk, true);
 
 	/* Accept connection and return socket DLC */
 	*d = rfcomm_pi(sk)->dlc;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8f0f927..a4ca55d 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -193,7 +193,7 @@
 	conn->sk = sk;
 
 	if (parent)
-		bt_accept_enqueue(parent, sk);
+		bt_accept_enqueue(parent, sk, true);
 }
 
 static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 502f663..4d4b9b5 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -1088,6 +1088,8 @@
 			err = -ENOMEM;
 			goto err_unlock;
 		}
+		if (swdev_notify)
+			fdb->added_by_user = 1;
 		fdb->added_by_external_learn = 1;
 		fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
 	} else {
@@ -1107,6 +1109,9 @@
 			modified = true;
 		}
 
+		if (swdev_notify)
+			fdb->added_by_user = 1;
+
 		if (modified)
 			fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
 	}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 5372e20..48ddc60 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -36,10 +36,10 @@
 
 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	skb_push(skb, ETH_HLEN);
 	if (!is_skb_forwardable(skb->dev, skb))
 		goto drop;
 
-	skb_push(skb, ETH_HLEN);
 	br_drop_fake_rtable(skb);
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
@@ -65,6 +65,7 @@
 
 int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	skb->tstamp = 0;
 	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
 		       net, sk, skb, NULL, skb->dev,
 		       br_dev_queue_push_xmit);
@@ -97,12 +98,11 @@
 		net = dev_net(indev);
 	} else {
 		if (unlikely(netpoll_tx_running(to->br->dev))) {
-			if (!is_skb_forwardable(skb->dev, skb)) {
+			skb_push(skb, ETH_HLEN);
+			if (!is_skb_forwardable(skb->dev, skb))
 				kfree_skb(skb);
-			} else {
-				skb_push(skb, ETH_HLEN);
+			else
 				br_netpoll_send_skb(to, skb);
-			}
 			return;
 		}
 		br_hook = NF_BR_LOCAL_OUT;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 6dec8e9..20ed7ad 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1420,14 +1420,7 @@
 		return;
 
 	br_multicast_update_query_timer(br, query, max_delay);
-
-	/* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
-	 * the arrival port for IGMP Queries where the source address
-	 * is 0.0.0.0 should not be added to router port list.
-	 */
-	if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
-	    saddr->proto == htons(ETH_P_IPV6))
-		br_multicast_mark_router(br, port);
+	br_multicast_mark_router(br, port);
 }
 
 static void br_ip4_multicast_query(struct net_bridge *br,
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 37278dc..e07a7e6 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -278,7 +278,7 @@
 		struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 		int ret;
 
-		if (neigh->hh.hh_len) {
+		if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
 			neigh_hh_bridge(&neigh->hh, skb);
 			skb->dev = nf_bridge->physindev;
 			ret = br_handle_frame_finish(net, sk, skb);
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 96c072e..5811208 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -131,6 +131,7 @@
 					IPSTATS_MIB_INDISCARDS);
 			goto drop;
 		}
+		hdr = ipv6_hdr(skb);
 	}
 	if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
 		goto drop;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 4918287..6693e20 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1137,14 +1137,16 @@
 	tmp.name[sizeof(tmp.name) - 1] = 0;
 
 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
-	newinfo = vmalloc(sizeof(*newinfo) + countersize);
+	newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
+			    PAGE_KERNEL);
 	if (!newinfo)
 		return -ENOMEM;
 
 	if (countersize)
 		memset(newinfo->counters, 0, countersize);
 
-	newinfo->entries = vmalloc(tmp.entries_size);
+	newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
+				     PAGE_KERNEL);
 	if (!newinfo->entries) {
 		ret = -ENOMEM;
 		goto free_newinfo;
@@ -2291,9 +2293,12 @@
 
 	xt_compat_lock(NFPROTO_BRIDGE);
 
-	ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
-	if (ret < 0)
-		goto out_unlock;
+	if (tmp.nentries) {
+		ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
+		if (ret < 0)
+			goto out_unlock;
+	}
+
 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
 	if (ret < 0)
 		goto out_unlock;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 08cbed7..419e8ed 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -229,6 +229,7 @@
 	    pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
 		return false;
 
+	ip6h = ipv6_hdr(skb);
 	thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
 	if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
 		return false;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 0af8f0d..79bb8af 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -67,6 +67,9 @@
  */
 #define MAX_NFRAMES 256
 
+/* limit timers to 400 days for sending/timeouts */
+#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
+
 /* use of last_frames[index].flags */
 #define RX_RECV    0x40 /* received data for this element */
 #define RX_THR     0x80 /* element not been sent due to throttle feature */
@@ -140,6 +143,22 @@
 	return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
 }
 
+/* check limitations for timeval provided by user */
+static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
+{
+	if ((msg_head->ival1.tv_sec < 0) ||
+	    (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
+	    (msg_head->ival1.tv_usec < 0) ||
+	    (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
+	    (msg_head->ival2.tv_sec < 0) ||
+	    (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
+	    (msg_head->ival2.tv_usec < 0) ||
+	    (msg_head->ival2.tv_usec >= USEC_PER_SEC))
+		return true;
+
+	return false;
+}
+
 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
 #define OPSIZ sizeof(struct bcm_op)
 #define MHSIZ sizeof(struct bcm_msg_head)
@@ -873,6 +892,10 @@
 	if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
 		return -EINVAL;
 
+	/* check timeval limitations */
+	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+		return -EINVAL;
+
 	/* check the given can_id */
 	op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
 	if (op) {
@@ -1053,6 +1076,10 @@
 	     (!(msg_head->can_id & CAN_RTR_FLAG))))
 		return -EINVAL;
 
+	/* check timeval limitations */
+	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+		return -EINVAL;
+
 	/* check the given can_id */
 	op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
 	if (op) {
diff --git a/net/can/gw.c b/net/can/gw.c
index faa3da8..53859346 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -416,13 +416,29 @@
 	while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
 		(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
 
-	/* check for checksum updates when the CAN frame has been modified */
+	/* Has the CAN frame been modified? */
 	if (modidx) {
-		if (gwj->mod.csumfunc.crc8)
-			(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+		/* get available space for the processed CAN frame type */
+		int max_len = nskb->len - offsetof(struct can_frame, data);
 
-		if (gwj->mod.csumfunc.xor)
+		/* dlc may have changed, make sure it fits to the CAN frame */
+		if (cf->can_dlc > max_len)
+			goto out_delete;
+
+		/* check for checksum updates in classic CAN length only */
+		if (gwj->mod.csumfunc.crc8) {
+			if (cf->can_dlc > 8)
+				goto out_delete;
+
+			(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+		}
+
+		if (gwj->mod.csumfunc.xor) {
+			if (cf->can_dlc > 8)
+				goto out_delete;
+
 			(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+		}
 	}
 
 	/* clear the skb timestamp if not configured the other way */
@@ -434,6 +450,14 @@
 		gwj->dropped_frames++;
 	else
 		gwj->handled_frames++;
+
+	return;
+
+ out_delete:
+	/* delete frame due to misconfiguration */
+	gwj->deleted_frames++;
+	kfree_skb(nskb);
+	return;
 }
 
 static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9a1c27c..f7d7f32 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2091,6 +2091,8 @@
 	dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
 
 	if (con->auth) {
+		int len = le32_to_cpu(con->in_reply.authorizer_len);
+
 		/*
 		 * Any connection that defines ->get_authorizer()
 		 * should also define ->add_authorizer_challenge() and
@@ -2100,8 +2102,7 @@
 		 */
 		if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
 			ret = con->ops->add_authorizer_challenge(
-				    con, con->auth->authorizer_reply_buf,
-				    le32_to_cpu(con->in_reply.authorizer_len));
+				    con, con->auth->authorizer_reply_buf, len);
 			if (ret < 0)
 				return ret;
 
@@ -2111,10 +2112,12 @@
 			return 0;
 		}
 
-		ret = con->ops->verify_authorizer_reply(con);
-		if (ret < 0) {
-			con->error_msg = "bad authorize reply";
-			return ret;
+		if (len) {
+			ret = con->ops->verify_authorizer_reply(con);
+			if (ret < 0) {
+				con->error_msg = "bad authorize reply";
+				return ret;
+			}
 		}
 	}
 
@@ -3240,9 +3243,10 @@
 	dout("con_keepalive %p\n", con);
 	mutex_lock(&con->mutex);
 	clear_standby(con);
+	con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
 	mutex_unlock(&con->mutex);
-	if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
-	    con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
+
+	if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
 		queue_con(con);
 }
 EXPORT_SYMBOL(ceph_con_keepalive);
diff --git a/net/compat.c b/net/compat.c
index 3b2105f..3c4b028 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -467,12 +467,14 @@
 	ctv = (struct compat_timeval __user *) userstamp;
 	err = -ENOENT;
 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
-	tv = ktime_to_timeval(sk->sk_stamp);
+	tv = ktime_to_timeval(sock_read_timestamp(sk));
+
 	if (tv.tv_sec == -1)
 		return err;
 	if (tv.tv_sec == 0) {
-		sk->sk_stamp = ktime_get_real();
-		tv = ktime_to_timeval(sk->sk_stamp);
+		ktime_t kt = ktime_get_real();
+		sock_write_timestamp(sk, kt);
+		tv = ktime_to_timeval(kt);
 	}
 	err = 0;
 	if (put_user(tv.tv_sec, &ctv->tv_sec) ||
@@ -494,12 +496,13 @@
 	ctv = (struct compat_timespec __user *) userstamp;
 	err = -ENOENT;
 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
-	ts = ktime_to_timespec(sk->sk_stamp);
+	ts = ktime_to_timespec(sock_read_timestamp(sk));
 	if (ts.tv_sec == -1)
 		return err;
 	if (ts.tv_sec == 0) {
-		sk->sk_stamp = ktime_get_real();
-		ts = ktime_to_timespec(sk->sk_stamp);
+		ktime_t kt = ktime_get_real();
+		sock_write_timestamp(sk, kt);
+		ts = ktime_to_timespec(kt);
 	}
 	err = 0;
 	if (put_user(ts.tv_sec, &ctv->tv_sec) ||
diff --git a/net/core/Makefile b/net/core/Makefile
index 80175e6..a38e0d1 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
 obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
 obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o
+obj-$(CONFIG_SOCKEV_NLMCAST) += sockev_nlmcast.o
 obj-$(CONFIG_DST_CACHE) += dst_cache.o
 obj-$(CONFIG_HWBM) += hwbm.o
 obj-$(CONFIG_NET_DEVLINK) += devlink.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 44ccab0..35ea93b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8069,7 +8069,7 @@
 	netdev_features_t feature;
 	int feature_bit;
 
-	for_each_netdev_feature(&upper_disables, feature_bit) {
+	for_each_netdev_feature(upper_disables, feature_bit) {
 		feature = __NETIF_F_BIT(feature_bit);
 		if (!(upper->wanted_features & feature)
 		    && (features & feature)) {
@@ -8089,7 +8089,7 @@
 	netdev_features_t feature;
 	int feature_bit;
 
-	for_each_netdev_feature(&upper_disables, feature_bit) {
+	for_each_netdev_feature(upper_disables, feature_bit) {
 		feature = __NETIF_F_BIT(feature_bit);
 		if (!(features & feature) && (lower->features & feature)) {
 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
@@ -8629,6 +8629,9 @@
 	set_bit(__LINK_STATE_PRESENT, &dev->state);
 	set_bit(__LINK_STATE_START, &dev->state);
 
+	/* napi_busy_loop stats accounting wants this */
+	dev_net_set(dev, &init_net);
+
 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
 	 * because users of this 'device' dont need to change
 	 * its refcount.
diff --git a/net/core/filter.c b/net/core/filter.c
index 5e00f2b..bed9061 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2018,18 +2018,19 @@
 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
 				 u32 flags)
 {
-	/* skb->mac_len is not set on normal egress */
-	unsigned int mlen = skb->network_header - skb->mac_header;
+	unsigned int mlen = skb_network_offset(skb);
 
-	__skb_pull(skb, mlen);
+	if (mlen) {
+		__skb_pull(skb, mlen);
 
-	/* At ingress, the mac header has already been pulled once.
-	 * At egress, skb_pospull_rcsum has to be done in case that
-	 * the skb is originated from ingress (i.e. a forwarded skb)
-	 * to ensure that rcsum starts at net header.
-	 */
-	if (!skb_at_tc_ingress(skb))
-		skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+		/* At ingress, the mac header has already been pulled once.
+		 * At egress, skb_pospull_rcsum has to be done in case that
+		 * the skb is originated from ingress (i.e. a forwarded skb)
+		 * to ensure that rcsum starts at net header.
+		 */
+		if (!skb_at_tc_ingress(skb))
+			skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+	}
 	skb_pop_mac_header(skb);
 	skb_reset_mac_len(skb);
 	return flags & BPF_F_INGRESS ?
@@ -3908,10 +3909,12 @@
 		/* Only some socketops are supported */
 		switch (optname) {
 		case SO_RCVBUF:
+			val = min_t(u32, val, sysctl_rmem_max);
 			sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 			sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
 			break;
 		case SO_SNDBUF:
+			val = min_t(u32, val, sysctl_wmem_max);
 			sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
 			sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
 			break;
@@ -3929,7 +3932,10 @@
 			sk->sk_rcvlowat = val ? : 1;
 			break;
 		case SO_MARK:
-			sk->sk_mark = val;
+			if (sk->sk_mark != val) {
+				sk->sk_mark = val;
+				sk_dst_reset(sk);
+			}
 			break;
 		default:
 			ret = -EINVAL;
@@ -4000,7 +4006,7 @@
 			/* Only some options are supported */
 			switch (optname) {
 			case TCP_BPF_IW:
-				if (val <= 0 || tp->data_segs_out > 0)
+				if (val <= 0 || tp->data_segs_out > tp->syn_data)
 					ret = -EINVAL;
 				else
 					tp->snd_cwnd = val;
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 188d693..e2fd8ba 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -256,7 +256,6 @@
 	for_each_possible_cpu(i) {
 		const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
 
-		qstats->qlen = 0;
 		qstats->backlog += qcpu->backlog;
 		qstats->drops += qcpu->drops;
 		qstats->requeues += qcpu->requeues;
@@ -272,7 +271,6 @@
 	if (cpu) {
 		__gnet_stats_copy_queue_cpu(qstats, cpu);
 	} else {
-		qstats->qlen = q->qlen;
 		qstats->backlog = q->backlog;
 		qstats->drops = q->drops;
 		qstats->requeues = q->requeues;
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index 4b54e5f..e095fb8 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -13,22 +13,36 @@
 {
 	struct net_device *dev = skb->dev;
 	struct gro_cell *cell;
+	int res;
 
-	if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
-		return netif_rx(skb);
+	rcu_read_lock();
+	if (unlikely(!(dev->flags & IFF_UP)))
+		goto drop;
+
+	if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
+		res = netif_rx(skb);
+		goto unlock;
+	}
 
 	cell = this_cpu_ptr(gcells->cells);
 
 	if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+drop:
 		atomic_long_inc(&dev->rx_dropped);
 		kfree_skb(skb);
-		return NET_RX_DROP;
+		res = NET_RX_DROP;
+		goto unlock;
 	}
 
 	__skb_queue_tail(&cell->napi_skbs, skb);
 	if (skb_queue_len(&cell->napi_skbs) == 1)
 		napi_schedule(&cell->napi);
-	return NET_RX_SUCCESS;
+
+	res = NET_RX_SUCCESS;
+
+unlock:
+	rcu_read_unlock();
+	return res;
 }
 EXPORT_SYMBOL(gro_cells_receive);
 
@@ -84,6 +98,7 @@
 	for_each_possible_cpu(i) {
 		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
 
+		napi_disable(&cell->napi);
 		netif_napi_del(&cell->napi);
 		__skb_queue_purge(&cell->napi_skbs);
 	}
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 3e85437..a648568 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -63,6 +63,7 @@
 				     lwt->name ? : "<unknown>");
 			ret = BPF_OK;
 		} else {
+			skb_reset_mac_header(skb);
 			ret = skb_do_redirect(skb);
 			if (ret == 0)
 				ret = BPF_REDIRECT;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index bd67c4d..2aabb7e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1547,6 +1547,9 @@
 error:
 	netdev_queue_update_kobjects(dev, txq, 0);
 	net_rx_queue_update_kobjects(dev, rxq, 0);
+#ifdef CONFIG_SYSFS
+	kset_unregister(dev->queues_kset);
+#endif
 	return error;
 }
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index abbbd7f..8656b1e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -353,6 +353,8 @@
  */
 void *netdev_alloc_frag(unsigned int fragsz)
 {
+	fragsz = SKB_DATA_ALIGN(fragsz);
+
 	return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(netdev_alloc_frag);
@@ -366,6 +368,8 @@
 
 void *napi_alloc_frag(unsigned int fragsz)
 {
+	fragsz = SKB_DATA_ALIGN(fragsz);
+
 	return __napi_alloc_frag(fragsz, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(napi_alloc_frag);
@@ -5258,7 +5262,6 @@
 	unsigned long chunk;
 	struct sk_buff *skb;
 	struct page *page;
-	gfp_t gfp_head;
 	int i;
 
 	*errcode = -EMSGSIZE;
@@ -5268,12 +5271,8 @@
 	if (npages > MAX_SKB_FRAGS)
 		return NULL;
 
-	gfp_head = gfp_mask;
-	if (gfp_head & __GFP_DIRECT_RECLAIM)
-		gfp_head |= __GFP_RETRY_MAYFAIL;
-
 	*errcode = -ENOBUFS;
-	skb = alloc_skb(header_len, gfp_head);
+	skb = alloc_skb(header_len, gfp_mask);
 	if (!skb)
 		return NULL;
 
diff --git a/net/core/sock.c b/net/core/sock.c
index 748765e..c9668dc 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -698,6 +698,7 @@
 		break;
 	case SO_DONTROUTE:
 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+		sk_dst_reset(sk);
 		break;
 	case SO_BROADCAST:
 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
@@ -2803,6 +2804,9 @@
 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
 
 	sk->sk_stamp = SK_DEFAULT_STAMP;
+#if BITS_PER_LONG==32
+	seqlock_init(&sk->sk_stamp_seq);
+#endif
 	atomic_set(&sk->sk_zckey, 0);
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -2902,12 +2906,13 @@
 	struct timeval tv;
 
 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
-	tv = ktime_to_timeval(sk->sk_stamp);
+	tv = ktime_to_timeval(sock_read_timestamp(sk));
 	if (tv.tv_sec == -1)
 		return -ENOENT;
 	if (tv.tv_sec == 0) {
-		sk->sk_stamp = ktime_get_real();
-		tv = ktime_to_timeval(sk->sk_stamp);
+		ktime_t kt = ktime_get_real();
+		sock_write_timestamp(sk, kt);
+		tv = ktime_to_timeval(kt);
 	}
 	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
 }
@@ -2918,11 +2923,12 @@
 	struct timespec ts;
 
 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
-	ts = ktime_to_timespec(sk->sk_stamp);
+	ts = ktime_to_timespec(sock_read_timestamp(sk));
 	if (ts.tv_sec == -1)
 		return -ENOENT;
 	if (ts.tv_sec == 0) {
-		sk->sk_stamp = ktime_get_real();
+		ktime_t kt = ktime_get_real();
+		sock_write_timestamp(sk, kt);
 		ts = ktime_to_timespec(sk->sk_stamp);
 	}
 	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c
new file mode 100644
index 0000000..230f8ab
--- /dev/null
+++ b/net/core/sockev_nlmcast.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2014-2015, 2017-2019, The Linux Foundation. All rights reserved. */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/netlink.h>
+#include <linux/sockev.h>
+#include <net/sock.h>
+
+static int registration_status;
+static struct sock *socknlmsgsk;
+
+static void sockev_skmsg_recv(struct sk_buff *skb)
+{
+	pr_debug("%s(): Got unsolicited request\n", __func__);
+}
+
+static struct netlink_kernel_cfg nlcfg = {
+	.input = sockev_skmsg_recv
+};
+
+static void _sockev_event(unsigned long event, __u8 *evstr, int buflen)
+{
+	switch (event) {
+	case SOCKEV_SOCKET:
+		strlcpy(evstr, "SOCKEV_SOCKET", buflen);
+		break;
+	case SOCKEV_BIND:
+		strlcpy(evstr, "SOCKEV_BIND", buflen);
+		break;
+	case SOCKEV_LISTEN:
+		strlcpy(evstr, "SOCKEV_LISTEN", buflen);
+		break;
+	case SOCKEV_ACCEPT:
+		strlcpy(evstr, "SOCKEV_ACCEPT", buflen);
+		break;
+	case SOCKEV_CONNECT:
+		strlcpy(evstr, "SOCKEV_CONNECT", buflen);
+		break;
+	case SOCKEV_SHUTDOWN:
+		strlcpy(evstr, "SOCKEV_SHUTDOWN", buflen);
+		break;
+	default:
+		strlcpy(evstr, "UNKNOWN", buflen);
+	}
+}
+
+static int sockev_client_cb(struct notifier_block *nb,
+			    unsigned long event, void *data)
+{
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+	struct sknlsockevmsg *smsg;
+	struct socket *sock;
+	struct sock *sk;
+
+	sock = (struct socket *)data;
+	if (!socknlmsgsk || !sock)
+		goto done;
+
+	sk = sock->sk;
+	if (!sk)
+		goto done;
+
+	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
+		goto done;
+
+	if (event != SOCKEV_BIND && event != SOCKEV_LISTEN)
+		goto done;
+
+	skb = nlmsg_new(sizeof(struct sknlsockevmsg), GFP_KERNEL);
+	if (!skb)
+		goto done;
+
+	nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct sknlsockevmsg), 0);
+	if (!nlh) {
+		kfree_skb(skb);
+		goto done;
+	}
+
+	NETLINK_CB(skb).dst_group = SKNLGRP_SOCKEV;
+
+	smsg = nlmsg_data(nlh);
+	memset(smsg, 0, sizeof(struct sknlsockevmsg));
+	smsg->pid = current->pid;
+	_sockev_event(event, smsg->event, sizeof(smsg->event));
+	smsg->skfamily = sk->sk_family;
+	smsg->skstate = sk->sk_state;
+	smsg->skprotocol = sk->sk_protocol;
+	smsg->sktype = sk->sk_type;
+	smsg->skflags = sk->sk_flags;
+	nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL);
+done:
+	return 0;
+}
+
+static struct notifier_block sockev_notifier_client = {
+	.notifier_call = sockev_client_cb,
+	.next = 0,
+	.priority = 0
+};
+
+/* ***************** Startup/Shutdown *************************************** */
+
+static int __init sockev_client_init(void)
+{
+	int rc;
+
+	registration_status = 1;
+	rc = sockev_register_notify(&sockev_notifier_client);
+	if (rc != 0) {
+		registration_status = 0;
+		pr_err("%s(): Failed to register cb (%d)\n", __func__, rc);
+	}
+	socknlmsgsk = netlink_kernel_create(&init_net, NETLINK_SOCKEV, &nlcfg);
+	if (!socknlmsgsk) {
+		pr_err("%s(): Failed to initialize netlink socket\n", __func__);
+		if (registration_status)
+			sockev_unregister_notify(&sockev_notifier_client);
+		registration_status = 0;
+	}
+
+	return rc;
+}
+
+static void __exit sockev_client_exit(void)
+{
+	if (registration_status)
+		sockev_unregister_notify(&sockev_notifier_client);
+}
+
+module_init(sockev_client_init)
+module_exit(sockev_client_exit)
+MODULE_LICENSE("GPL v2");
+
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6eb837a..baaaeb2 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -202,7 +202,7 @@
 static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
 					   u8 pkt, u8 opt, u8 *val, u8 len)
 {
-	if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
+	if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
 		return 0;
 	return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
 }
@@ -214,7 +214,7 @@
 static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
 					   u8 pkt, u8 opt, u8 *val, u8 len)
 {
-	if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
+	if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
 		return 0;
 	return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
 }
diff --git a/net/dsa/master.c b/net/dsa/master.c
index c90ee32..aae478d 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -158,6 +158,8 @@
 	cpu_dp->orig_ethtool_ops = NULL;
 }
 
+static struct lock_class_key dsa_master_addr_list_lock_key;
+
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
 	/* If we use a tagging format that doesn't have an ethertype
@@ -167,6 +169,8 @@
 	wmb();
 
 	dev->dsa_ptr = cpu_dp;
+	lockdep_set_class(&dev->addr_list_lock,
+			  &dsa_master_addr_list_lock_key);
 
 	return dsa_master_ethtool_setup(dev);
 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 1c45c1d..b39720d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -140,11 +140,14 @@
 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
 {
 	struct net_device *master = dsa_slave_to_master(dev);
-
-	if (change & IFF_ALLMULTI)
-		dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
-	if (change & IFF_PROMISC)
-		dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
+	if (dev->flags & IFF_UP) {
+		if (change & IFF_ALLMULTI)
+			dev_set_allmulti(master,
+					 dev->flags & IFF_ALLMULTI ? 1 : -1);
+		if (change & IFF_PROMISC)
+			dev_set_promiscuity(master,
+					    dev->flags & IFF_PROMISC ? 1 : -1);
+	}
 }
 
 static void dsa_slave_set_rx_mode(struct net_device *dev)
@@ -639,7 +642,7 @@
 	int ret;
 
 	/* Port's PHY and MAC both need to be EEE capable */
-	if (!dev->phydev && !dp->pl)
+	if (!dev->phydev || !dp->pl)
 		return -ENODEV;
 
 	if (!ds->ops->set_mac_eee)
@@ -659,7 +662,7 @@
 	int ret;
 
 	/* Port's PHY and MAC both need to be EEE capable */
-	if (!dev->phydev && !dp->pl)
+	if (!dev->phydev || !dp->pl)
 		return -ENODEV;
 
 	if (!ds->ops->get_mac_eee)
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index b8cd43c..a97bf32 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -94,9 +94,8 @@
 			&& (old_operstate != IF_OPER_UP)) {
 		/* Went up */
 		hsr->announce_count = 0;
-		hsr->announce_timer.expires = jiffies +
-				msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
-		add_timer(&hsr->announce_timer);
+		mod_timer(&hsr->announce_timer,
+			  jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
 	}
 
 	if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
@@ -332,6 +331,7 @@
 {
 	struct hsr_priv *hsr;
 	struct hsr_port *master;
+	unsigned long interval;
 
 	hsr = from_timer(hsr, t, announce_timer);
 
@@ -343,18 +343,16 @@
 				hsr->protVersion);
 		hsr->announce_count++;
 
-		hsr->announce_timer.expires = jiffies +
-				msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+		interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
 	} else {
 		send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
 				hsr->protVersion);
 
-		hsr->announce_timer.expires = jiffies +
-				msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+		interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
 	}
 
 	if (is_admin_up(master->dev))
-		add_timer(&hsr->announce_timer);
+		mod_timer(&hsr->announce_timer, jiffies + interval);
 
 	rcu_read_unlock();
 }
@@ -486,7 +484,7 @@
 
 	res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
 	if (res)
-		return res;
+		goto err_add_port;
 
 	res = register_netdevice(hsr_dev);
 	if (res)
@@ -506,6 +504,8 @@
 fail:
 	hsr_for_each_port(hsr, port)
 		hsr_del_port(port);
+err_add_port:
+	hsr_del_node(&hsr->self_node_db);
 
 	return res;
 }
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 286ceb4..9af16cb 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -124,6 +124,18 @@
 	return 0;
 }
 
+void hsr_del_node(struct list_head *self_node_db)
+{
+	struct hsr_node *node;
+
+	rcu_read_lock();
+	node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
+	rcu_read_unlock();
+	if (node) {
+		list_del_rcu(&node->mac_list);
+		kfree(node);
+	}
+}
 
 /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
  * seq_out is used to initialize filtering of outgoing duplicate frames
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 370b459..531fd3d 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -16,6 +16,7 @@
 
 struct hsr_node;
 
+void hsr_del_node(struct list_head *self_node_db);
 struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
 			      u16 seq_out);
 struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index ca53efa..8bec827 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -48,6 +48,9 @@
 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
 	struct neighbour *n;
 
+	if (!daddr)
+		return -EINVAL;
+
 	/* TODO:
 	 * if this package isn't ipv6 one, where should it be routed?
 	 */
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 777fa3b..f0165c5 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -667,7 +667,8 @@
 	case CIPSO_V4_MAP_PASS:
 		return 0;
 	case CIPSO_V4_MAP_TRANS:
-		if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
+		if ((level < doi_def->map.std->lvl.cipso_size) &&
+		    (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
 			return 0;
 		break;
 	}
@@ -1735,13 +1736,26 @@
  */
 void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
 {
+	unsigned char optbuf[sizeof(struct ip_options) + 40];
+	struct ip_options *opt = (struct ip_options *)optbuf;
+
 	if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
 		return;
 
+	/*
+	 * We might be called above the IP layer,
+	 * so we can not use icmp_send and IPCB here.
+	 */
+
+	memset(opt, 0, sizeof(struct ip_options));
+	opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+	if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
+		return;
+
 	if (gateway)
-		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
+		__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
 	else
-		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
+		__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
 }
 
 /**
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 0113993..dae743b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -203,7 +203,7 @@
 		struct fib_table *tb;
 
 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
-			flushed += fib_table_flush(net, tb);
+			flushed += fib_table_flush(net, tb, false);
 	}
 
 	if (flushed)
@@ -700,6 +700,10 @@
 		case RTA_GATEWAY:
 			cfg->fc_gw = nla_get_be32(attr);
 			break;
+		case RTA_VIA:
+			NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
+			err = -EINVAL;
+			goto errout;
 		case RTA_PRIORITY:
 			cfg->fc_priority = nla_get_u32(attr);
 			break;
@@ -1357,7 +1361,7 @@
 
 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
 			hlist_del(&tb->tb_hlist);
-			fib_table_flush(net, tb);
+			fib_table_flush(net, tb, true);
 			fib_free_table(tb);
 		}
 	}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5bc0c89..3955a6d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1856,7 +1856,7 @@
 }
 
 /* Caller must hold RTNL. */
-int fib_table_flush(struct net *net, struct fib_table *tb)
+int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
 {
 	struct trie *t = (struct trie *)tb->tb_data;
 	struct key_vector *pn = t->kv;
@@ -1904,8 +1904,17 @@
 		hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
 			struct fib_info *fi = fa->fa_info;
 
-			if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
-			    tb->tb_id != fa->tb_id) {
+			if (!fi || tb->tb_id != fa->tb_id ||
+			    (!(fi->fib_flags & RTNH_F_DEAD) &&
+			     !fib_props[fa->fa_type].error)) {
+				slen = fa->fa_slen;
+				continue;
+			}
+
+			/* Do not flush error routes if network namespace is
+			 * not being dismantled
+			 */
+			if (!flush_all && fib_props[fa->fa_type].error) {
 				slen = fa->fa_slen;
 				continue;
 			}
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index b798862..f21ea61 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -25,6 +25,7 @@
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 #include <net/gre.h>
+#include <net/erspan.h>
 
 #include <net/icmp.h>
 #include <net/route.h>
@@ -118,6 +119,22 @@
 			hdr_len += 4;
 	}
 	tpi->hdr_len = hdr_len;
+
+	/* ERSPAN ver 1 and 2 protocol sets GRE key field
+	 * to 0 and sets the configured key in the
+	 * inner erspan header field
+	 */
+	if (greh->protocol == htons(ETH_P_ERSPAN) ||
+	    greh->protocol == htons(ETH_P_ERSPAN2)) {
+		struct erspan_base_hdr *ershdr;
+
+		if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
+			return -EINVAL;
+
+		ershdr = (struct erspan_base_hdr *)options;
+		tpi->key = cpu_to_be32(get_session_id(ershdr));
+	}
+
 	return hdr_len;
 }
 EXPORT_SYMBOL(gre_parse_header);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 695979b..ad75c46 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -570,7 +570,8 @@
  *			MUST reply to only the first fragment.
  */
 
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+		 const struct ip_options *opt)
 {
 	struct iphdr *iph;
 	int room;
@@ -691,7 +692,7 @@
 					  iph->tos;
 	mark = IP4_REPLY_MARK(net, skb_in->mark);
 
-	if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
+	if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
 		goto out_unlock;
 
 
@@ -742,7 +743,7 @@
 	local_bh_enable();
 out:;
 }
-EXPORT_SYMBOL(icmp_send);
+EXPORT_SYMBOL(__icmp_send);
 
 
 static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 4e5bc4b..5731670 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,7 @@
 		+ nla_total_size(1) /* INET_DIAG_TOS */
 		+ nla_total_size(1) /* INET_DIAG_TCLASS */
 		+ nla_total_size(4) /* INET_DIAG_MARK */
+		+ nla_total_size(4) /* INET_DIAG_CLASS_ID */
 		+ nla_total_size(sizeof(struct inet_diag_meminfo))
 		+ nla_total_size(sizeof(struct inet_diag_msg))
 		+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
@@ -287,12 +288,19 @@
 			goto errout;
 	}
 
-	if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
+	if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
+	    ext & (1 << (INET_DIAG_TCLASS - 1))) {
 		u32 classid = 0;
 
 #ifdef CONFIG_SOCK_CGROUP_DATA
 		classid = sock_cgroup_classid(&sk->sk_cgrp_data);
 #endif
+		/* Fallback to socket priority if class id isn't set.
+		 * Classful qdiscs use it as direct reference to class.
+		 * For cgroup2 classid is always zero.
+		 */
+		if (!classid)
+			classid = sk->sk_priority;
 
 		if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
 			goto errout;
@@ -998,7 +1006,9 @@
 			if (!inet_diag_bc_sk(bc, sk))
 				goto next_normal;
 
-			sock_hold(sk);
+			if (!refcount_inc_not_zero(&sk->sk_refcnt))
+				goto next_normal;
+
 			num_arr[accum] = num;
 			sk_arr[accum] = sk;
 			if (++accum == SKARR_SZ)
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d757b96..be77859 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -216,6 +216,7 @@
 			atomic_set(&p->rid, 0);
 			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
 			p->rate_tokens = 0;
+			p->n_redirects = 0;
 			/* 60*HZ is arbitrary, but chosen enough high so that the first
 			 * calculation of tokens is at its maximum.
 			 */
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 32662e9..d5984d3 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -72,6 +72,7 @@
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
 
+	skb->tstamp = 0;
 	return dst_output(net, sk, skb);
 }
 
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index f686d77..d95b32a 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -347,10 +347,10 @@
 	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
 	struct rb_node **rbn, *parent;
 	struct sk_buff *skb1, *prev_tail;
+	int ihl, end, skb1_run_end;
 	struct net_device *dev;
 	unsigned int fragsize;
 	int flags, offset;
-	int ihl, end;
 	int err = -ENOENT;
 	u8 ecn;
 
@@ -420,9 +420,12 @@
 	 *   overlapping fragment, the entire datagram (and any constituent
 	 *   fragments) MUST be silently discarded.
 	 *
-	 * We do the same here for IPv4 (and increment an snmp counter).
+	 * We do the same here for IPv4 (and increment an snmp counter) but
+	 * we do not want to drop the whole queue in response to a duplicate
+	 * fragment.
 	 */
 
+	err = -EINVAL;
 	/* Find out where to put this fragment.  */
 	prev_tail = qp->q.fragments_tail;
 	if (!prev_tail)
@@ -444,13 +447,17 @@
 		do {
 			parent = *rbn;
 			skb1 = rb_to_skb(parent);
+			skb1_run_end = skb1->ip_defrag_offset +
+				       FRAG_CB(skb1)->frag_run_len;
 			if (end <= skb1->ip_defrag_offset)
 				rbn = &parent->rb_left;
-			else if (offset >= skb1->ip_defrag_offset +
-						FRAG_CB(skb1)->frag_run_len)
+			else if (offset >= skb1_run_end)
 				rbn = &parent->rb_right;
-			else /* Found an overlap with skb1. */
-				goto discard_qp;
+			else if (offset >= skb1->ip_defrag_offset &&
+				 end <= skb1_run_end)
+				goto err; /* No new data, potential duplicate */
+			else
+				goto discard_qp; /* Found an overlap */
 		} while (*rbn);
 		/* Here we have parent properly set, and rbn pointing to
 		 * one of its NULL left/right children. Insert skb.
@@ -495,7 +502,6 @@
 
 discard_qp:
 	inet_frag_kill(&qp->q);
-	err = -EINVAL;
 	__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
 err:
 	kfree_skb(skb);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8cce0e9..f199945 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -269,20 +269,11 @@
 	int len;
 
 	itn = net_generic(net, erspan_net_id);
-	len = gre_hdr_len + sizeof(*ershdr);
-
-	/* Check based hdr len */
-	if (unlikely(!pskb_may_pull(skb, len)))
-		return PACKET_REJECT;
 
 	iph = ip_hdr(skb);
 	ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 	ver = ershdr->ver;
 
-	/* The original GRE header does not have key field,
-	 * Use ERSPAN 10-bit session ID as key.
-	 */
-	tpi->key = cpu_to_be32(get_session_id(ershdr));
 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 				  tpi->flags | TUNNEL_KEY,
 				  iph->saddr, iph->daddr, tpi->key);
@@ -570,8 +561,7 @@
 	dev->stats.tx_dropped++;
 }
 
-static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
-			   __be16 proto)
+static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	struct ip_tunnel_info *tun_info;
@@ -579,10 +569,10 @@
 	struct erspan_metadata *md;
 	struct rtable *rt = NULL;
 	bool truncate = false;
+	__be16 df, proto;
 	struct flowi4 fl;
 	int tunnel_hlen;
 	int version;
-	__be16 df;
 	int nhoff;
 	int thoff;
 
@@ -627,18 +617,20 @@
 	if (version == 1) {
 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
 				    ntohl(md->u.index), truncate, true);
+		proto = htons(ETH_P_ERSPAN);
 	} else if (version == 2) {
 		erspan_build_header_v2(skb,
 				       ntohl(tunnel_id_to_key32(key->tun_id)),
 				       md->u.md2.dir,
 				       get_hwid(&md->u.md2),
 				       truncate, true);
+		proto = htons(ETH_P_ERSPAN2);
 	} else {
 		goto err_free_rt;
 	}
 
 	gre_build_header(skb, 8, TUNNEL_SEQ,
-			 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
+			 proto, 0, htonl(tunnel->o_seqno++));
 
 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
 
@@ -677,6 +669,9 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	const struct iphdr *tnl_params;
 
+	if (!pskb_inet_may_pull(skb))
+		goto free_skb;
+
 	if (tunnel->collect_md) {
 		gre_fb_xmit(skb, dev, skb->protocol);
 		return NETDEV_TX_OK;
@@ -719,9 +714,13 @@
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	bool truncate = false;
+	__be16 proto;
+
+	if (!pskb_inet_may_pull(skb))
+		goto free_skb;
 
 	if (tunnel->collect_md) {
-		erspan_fb_xmit(skb, dev, skb->protocol);
+		erspan_fb_xmit(skb, dev);
 		return NETDEV_TX_OK;
 	}
 
@@ -737,19 +736,22 @@
 	}
 
 	/* Push ERSPAN header */
-	if (tunnel->erspan_ver == 1)
+	if (tunnel->erspan_ver == 1) {
 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
 				    tunnel->index,
 				    truncate, true);
-	else if (tunnel->erspan_ver == 2)
+		proto = htons(ETH_P_ERSPAN);
+	} else if (tunnel->erspan_ver == 2) {
 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
 				       tunnel->dir, tunnel->hwid,
 				       truncate, true);
-	else
+		proto = htons(ETH_P_ERSPAN2);
+	} else {
 		goto free_skb;
+	}
 
 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
-	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
+	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
 	return NETDEV_TX_OK;
 
 free_skb:
@@ -763,6 +765,9 @@
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 
+	if (!pskb_inet_may_pull(skb))
+		goto free_skb;
+
 	if (tunnel->collect_md) {
 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 		return NETDEV_TX_OK;
@@ -1457,12 +1462,17 @@
 {
 	struct ip_tunnel *t = netdev_priv(dev);
 	struct ip_tunnel_parm *p = &t->parms;
+	__be16 o_flags = p->o_flags;
+
+	if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
+	    !t->collect_md)
+		o_flags |= TUNNEL_KEY;
 
 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
-			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
+			 gre_tnl_flags_to_gre_flags(o_flags)) ||
 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 5f6f7b3..0680f87 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -307,11 +307,10 @@
 }
 
 static int ip_rcv_finish_core(struct net *net, struct sock *sk,
-			      struct sk_buff *skb)
+			      struct sk_buff *skb, struct net_device *dev)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	int (*edemux)(struct sk_buff *skb);
-	struct net_device *dev = skb->dev;
 	struct rtable *rt;
 	int err;
 
@@ -400,6 +399,7 @@
 
 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	struct net_device *dev = skb->dev;
 	int ret;
 
 	/* if ingress device is enslaved to an L3 master device pass the
@@ -409,7 +409,7 @@
 	if (!skb)
 		return NET_RX_SUCCESS;
 
-	ret = ip_rcv_finish_core(net, sk, skb);
+	ret = ip_rcv_finish_core(net, sk, skb, dev);
 	if (ret != NET_RX_DROP)
 		ret = dst_input(skb);
 	return ret;
@@ -488,6 +488,7 @@
 		goto drop;
 	}
 
+	iph = ip_hdr(skb);
 	skb->transport_header = skb->network_header + iph->ihl*4;
 
 	/* Remove any debris in the socket control block */
@@ -548,6 +549,7 @@
 
 	INIT_LIST_HEAD(&sublist);
 	list_for_each_entry_safe(skb, next, head, list) {
+		struct net_device *dev = skb->dev;
 		struct dst_entry *dst;
 
 		skb_list_del_init(skb);
@@ -557,7 +559,7 @@
 		skb = l3mdev_ip_rcv(skb);
 		if (!skb)
 			continue;
-		if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
+		if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
 			continue;
 
 		dst = skb_dst(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ed194d4..32a3504 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -251,8 +251,9 @@
  * If opt == NULL, then skb->data should point to IP header.
  */
 
-int ip_options_compile(struct net *net,
-		       struct ip_options *opt, struct sk_buff *skb)
+int __ip_options_compile(struct net *net,
+			 struct ip_options *opt, struct sk_buff *skb,
+			 __be32 *info)
 {
 	__be32 spec_dst = htonl(INADDR_ANY);
 	unsigned char *pp_ptr = NULL;
@@ -468,11 +469,22 @@
 		return 0;
 
 error:
-	if (skb) {
-		icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
-	}
+	if (info)
+		*info = htonl((pp_ptr-iph)<<24);
 	return -EINVAL;
 }
+
+int ip_options_compile(struct net *net,
+		       struct ip_options *opt, struct sk_buff *skb)
+{
+	int ret;
+	__be32 info;
+
+	ret = __ip_options_compile(net, opt, skb, &info);
+	if (ret != 0 && skb)
+		icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
+	return ret;
+}
 EXPORT_SYMBOL(ip_options_compile);
 
 /*
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 26c36cc..b7a2612 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -148,19 +148,17 @@
 
 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
+	__be16 _ports[2], *ports;
 	struct sockaddr_in sin;
-	__be16 *ports;
-	int end;
-
-	end = skb_transport_offset(skb) + 4;
-	if (end > 0 && !pskb_may_pull(skb, end))
-		return;
 
 	/* All current transport protocols have the port numbers in the
 	 * first four bytes of the transport header and this function is
 	 * written with this assumption in mind.
 	 */
-	ports = (__be16 *)skb_transport_header(skb);
+	ports = skb_header_pointer(skb, skb_transport_offset(skb),
+				   sizeof(_ports), &_ports);
+	if (!ports)
+		return;
 
 	sin.sin_family = AF_INET;
 	sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 284a221..c4f5602 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,7 +627,6 @@
 		    const struct iphdr *tnl_params, u8 protocol)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
-	unsigned int inner_nhdr_len = 0;
 	const struct iphdr *inner_iph;
 	struct flowi4 fl4;
 	u8     tos, ttl;
@@ -637,14 +636,6 @@
 	__be32 dst;
 	bool connected;
 
-	/* ensure we can access the inner net header, for several users below */
-	if (skb->protocol == htons(ETH_P_IP))
-		inner_nhdr_len = sizeof(struct iphdr);
-	else if (skb->protocol == htons(ETH_P_IPV6))
-		inner_nhdr_len = sizeof(struct ipv6hdr);
-	if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
-		goto tx_error;
-
 	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
 	connected = (tunnel->parms.iph.daddr != 0);
 
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index f38cb21..40a7cd5 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -74,6 +74,33 @@
 	return 0;
 }
 
+static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
+		     int encap_type)
+{
+	struct ip_tunnel *tunnel;
+	const struct iphdr *iph = ip_hdr(skb);
+	struct net *net = dev_net(skb->dev);
+	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+
+	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+				  iph->saddr, iph->daddr, 0);
+	if (tunnel) {
+		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+			goto drop;
+
+		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+
+		skb->dev = tunnel->dev;
+
+		return xfrm_input(skb, nexthdr, spi, encap_type);
+	}
+
+	return -EINVAL;
+drop:
+	kfree_skb(skb);
+	return 0;
+}
+
 static int vti_rcv(struct sk_buff *skb)
 {
 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
@@ -82,6 +109,14 @@
 	return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
 }
 
+static int vti_rcv_ipip(struct sk_buff *skb)
+{
+	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+	return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
+}
+
 static int vti_rcv_cb(struct sk_buff *skb, int err)
 {
 	unsigned short family;
@@ -241,6 +276,9 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	struct flowi fl;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	memset(&fl, 0, sizeof(fl));
 
 	switch (skb->protocol) {
@@ -253,15 +291,18 @@
 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 		break;
 	default:
-		dev->stats.tx_errors++;
-		dev_kfree_skb(skb);
-		return NETDEV_TX_OK;
+		goto tx_err;
 	}
 
 	/* override mark with tunnel output key */
 	fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
 
 	return vti_xmit(skb, dev, &fl);
+
+tx_err:
+	dev->stats.tx_errors++;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
 }
 
 static int vti4_err(struct sk_buff *skb, u32 info)
@@ -429,6 +470,12 @@
 	.priority	=	100,
 };
 
+static struct xfrm_tunnel ipip_handler __read_mostly = {
+	.handler	=	vti_rcv_ipip,
+	.err_handler	=	vti4_err,
+	.priority	=	0,
+};
+
 static int __net_init vti_init_net(struct net *net)
 {
 	int err;
@@ -597,6 +644,13 @@
 	if (err < 0)
 		goto xfrm_proto_comp_failed;
 
+	msg = "ipip tunnel";
+	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
+	if (err < 0) {
+		pr_info("%s: cant't register tunnel\n",__func__);
+		goto xfrm_tunnel_failed;
+	}
+
 	msg = "netlink interface";
 	err = rtnl_link_register(&vti_link_ops);
 	if (err < 0)
@@ -606,6 +660,8 @@
 
 rtnl_link_failed:
 	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+xfrm_tunnel_failed:
+	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
 xfrm_proto_comp_failed:
 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
 xfrm_proto_ah_failed:
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5660adc..f6275aa 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -69,6 +69,8 @@
 #include <net/nexthop.h>
 #include <net/switchdev.h>
 
+#include <linux/nospec.h>
+
 struct ipmr_rule {
 	struct fib_rule		common;
 };
@@ -1612,6 +1614,7 @@
 			return -EFAULT;
 		if (vr.vifi >= mrt->maxvif)
 			return -EINVAL;
+		vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
 		read_lock(&mrt_lock);
 		vif = &mrt->vif_table[vr.vifi];
 		if (VIF_EXISTS(mrt, vr.vifi)) {
@@ -1686,6 +1689,7 @@
 			return -EFAULT;
 		if (vr.vifi >= mrt->maxvif)
 			return -EINVAL;
+		vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
 		read_lock(&mrt_lock);
 		vif = &mrt->vif_table[vr.vifi];
 		if (VIF_EXISTS(mrt, vr.vifi)) {
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 2c8d313..3cd237b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -56,18 +56,15 @@
 #endif
 	enum clusterip_hashmode hash_mode;	/* which hashing mode */
 	u_int32_t hash_initval;			/* hash initialization */
-	struct rcu_head rcu;
-
+	struct rcu_head rcu;			/* for call_rcu_bh */
+	struct net *net;			/* netns for pernet list */
 	char ifname[IFNAMSIZ];			/* device ifname */
-	struct notifier_block notifier;		/* refresh c->ifindex in it */
 };
 
 #ifdef CONFIG_PROC_FS
 static const struct file_operations clusterip_proc_fops;
 #endif
 
-static unsigned int clusterip_net_id __read_mostly;
-
 struct clusterip_net {
 	struct list_head configs;
 	/* lock protects the configs list */
@@ -75,19 +72,35 @@
 
 #ifdef CONFIG_PROC_FS
 	struct proc_dir_entry *procdir;
+	/* mutex protects the config->pde*/
+	struct mutex mutex;
 #endif
 };
 
+static unsigned int clusterip_net_id __read_mostly;
+static inline struct clusterip_net *clusterip_pernet(struct net *net)
+{
+	return net_generic(net, clusterip_net_id);
+}
+
 static inline void
 clusterip_config_get(struct clusterip_config *c)
 {
 	refcount_inc(&c->refcount);
 }
 
-
 static void clusterip_config_rcu_free(struct rcu_head *head)
 {
-	kfree(container_of(head, struct clusterip_config, rcu));
+	struct clusterip_config *config;
+	struct net_device *dev;
+
+	config = container_of(head, struct clusterip_config, rcu);
+	dev = dev_get_by_name(config->net, config->ifname);
+	if (dev) {
+		dev_mc_del(dev, config->clustermac);
+		dev_put(dev);
+	}
+	kfree(config);
 }
 
 static inline void
@@ -101,25 +114,24 @@
  * entry(rule) is removed, remove the config from lists, but don't free it
  * yet, since proc-files could still be holding references */
 static inline void
-clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
+clusterip_config_entry_put(struct clusterip_config *c)
 {
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(c->net);
 
 	local_bh_disable();
 	if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
+		list_del_rcu(&c->list);
+		spin_unlock(&cn->lock);
+		local_bh_enable();
 		/* In case anyone still accesses the file, the open/close
 		 * functions are also incrementing the refcount on their own,
 		 * so it's safe to remove the entry even if it's in use. */
 #ifdef CONFIG_PROC_FS
+		mutex_lock(&cn->mutex);
 		if (cn->procdir)
 			proc_remove(c->pde);
+		mutex_unlock(&cn->mutex);
 #endif
-		list_del_rcu(&c->list);
-		spin_unlock(&cn->lock);
-		local_bh_enable();
-
-		unregister_netdevice_notifier(&c->notifier);
-
 		return;
 	}
 	local_bh_enable();
@@ -129,7 +141,7 @@
 __clusterip_config_find(struct net *net, __be32 clusterip)
 {
 	struct clusterip_config *c;
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(net);
 
 	list_for_each_entry_rcu(c, &cn->configs, list) {
 		if (c->clusterip == clusterip)
@@ -181,32 +193,37 @@
 		       void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct net *net = dev_net(dev);
+	struct clusterip_net *cn = clusterip_pernet(net);
 	struct clusterip_config *c;
 
-	c = container_of(this, struct clusterip_config, notifier);
-	switch (event) {
-	case NETDEV_REGISTER:
-		if (!strcmp(dev->name, c->ifname)) {
-			c->ifindex = dev->ifindex;
-			dev_mc_add(dev, c->clustermac);
+	spin_lock_bh(&cn->lock);
+	list_for_each_entry_rcu(c, &cn->configs, list) {
+		switch (event) {
+		case NETDEV_REGISTER:
+			if (!strcmp(dev->name, c->ifname)) {
+				c->ifindex = dev->ifindex;
+				dev_mc_add(dev, c->clustermac);
+			}
+			break;
+		case NETDEV_UNREGISTER:
+			if (dev->ifindex == c->ifindex) {
+				dev_mc_del(dev, c->clustermac);
+				c->ifindex = -1;
+			}
+			break;
+		case NETDEV_CHANGENAME:
+			if (!strcmp(dev->name, c->ifname)) {
+				c->ifindex = dev->ifindex;
+				dev_mc_add(dev, c->clustermac);
+			} else if (dev->ifindex == c->ifindex) {
+				dev_mc_del(dev, c->clustermac);
+				c->ifindex = -1;
+			}
+			break;
 		}
-		break;
-	case NETDEV_UNREGISTER:
-		if (dev->ifindex == c->ifindex) {
-			dev_mc_del(dev, c->clustermac);
-			c->ifindex = -1;
-		}
-		break;
-	case NETDEV_CHANGENAME:
-		if (!strcmp(dev->name, c->ifname)) {
-			c->ifindex = dev->ifindex;
-			dev_mc_add(dev, c->clustermac);
-		} else if (dev->ifindex == c->ifindex) {
-			dev_mc_del(dev, c->clustermac);
-			c->ifindex = -1;
-		}
-		break;
 	}
+	spin_unlock_bh(&cn->lock);
 
 	return NOTIFY_DONE;
 }
@@ -215,30 +232,44 @@
 clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
 		      __be32 ip, const char *iniface)
 {
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(net);
 	struct clusterip_config *c;
+	struct net_device *dev;
 	int err;
 
+	if (iniface[0] == '\0') {
+		pr_info("Please specify an interface name\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	c = kzalloc(sizeof(*c), GFP_ATOMIC);
 	if (!c)
 		return ERR_PTR(-ENOMEM);
 
-	strcpy(c->ifname, iniface);
-	c->ifindex = -1;
-	c->clusterip = ip;
+	dev = dev_get_by_name(net, iniface);
+	if (!dev) {
+		pr_info("no such interface %s\n", iniface);
+		kfree(c);
+		return ERR_PTR(-ENOENT);
+	}
+	c->ifindex = dev->ifindex;
+	strcpy(c->ifname, dev->name);
 	memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
+	dev_mc_add(dev, c->clustermac);
+	dev_put(dev);
+
+	c->clusterip = ip;
 	c->num_total_nodes = i->num_total_nodes;
 	clusterip_config_init_nodelist(c, i);
 	c->hash_mode = i->hash_mode;
 	c->hash_initval = i->hash_initval;
+	c->net = net;
 	refcount_set(&c->refcount, 1);
 
 	spin_lock_bh(&cn->lock);
 	if (__clusterip_config_find(net, ip)) {
-		spin_unlock_bh(&cn->lock);
-		kfree(c);
-
-		return ERR_PTR(-EBUSY);
+		err = -EBUSY;
+		goto out_config_put;
 	}
 
 	list_add_rcu(&c->list, &cn->configs);
@@ -250,9 +281,11 @@
 
 		/* create proc dir entry */
 		sprintf(buffer, "%pI4", &ip);
+		mutex_lock(&cn->mutex);
 		c->pde = proc_create_data(buffer, 0600,
 					  cn->procdir,
 					  &clusterip_proc_fops, c);
+		mutex_unlock(&cn->mutex);
 		if (!c->pde) {
 			err = -ENOMEM;
 			goto err;
@@ -260,22 +293,17 @@
 	}
 #endif
 
-	c->notifier.notifier_call = clusterip_netdev_event;
-	err = register_netdevice_notifier(&c->notifier);
-	if (!err) {
-		refcount_set(&c->entries, 1);
-		return c;
-	}
+	refcount_set(&c->entries, 1);
+	return c;
 
 #ifdef CONFIG_PROC_FS
-	proc_remove(c->pde);
 err:
 #endif
 	spin_lock_bh(&cn->lock);
 	list_del_rcu(&c->list);
+out_config_put:
 	spin_unlock_bh(&cn->lock);
 	clusterip_config_put(c);
-
 	return ERR_PTR(err);
 }
 
@@ -475,34 +503,20 @@
 				&e->ip.dst.s_addr);
 			return -EINVAL;
 		} else {
-			struct net_device *dev;
-
-			if (e->ip.iniface[0] == '\0') {
-				pr_info("Please specify an interface name\n");
-				return -EINVAL;
-			}
-
-			dev = dev_get_by_name(par->net, e->ip.iniface);
-			if (!dev) {
-				pr_info("no such interface %s\n",
-					e->ip.iniface);
-				return -ENOENT;
-			}
-			dev_put(dev);
-
 			config = clusterip_config_init(par->net, cipinfo,
 						       e->ip.dst.s_addr,
 						       e->ip.iniface);
 			if (IS_ERR(config))
 				return PTR_ERR(config);
 		}
-	}
+	} else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN))
+		return -EINVAL;
 
 	ret = nf_ct_netns_get(par->net, par->family);
 	if (ret < 0) {
 		pr_info("cannot load conntrack support for proto=%u\n",
 			par->family);
-		clusterip_config_entry_put(par->net, config);
+		clusterip_config_entry_put(config);
 		clusterip_config_put(config);
 		return ret;
 	}
@@ -524,7 +538,7 @@
 
 	/* if no more entries are referencing the config, remove it
 	 * from the list and destroy the proc entry */
-	clusterip_config_entry_put(par->net, cipinfo->config);
+	clusterip_config_entry_put(cipinfo->config);
 
 	clusterip_config_put(cipinfo->config);
 
@@ -806,7 +820,7 @@
 
 static int clusterip_net_init(struct net *net)
 {
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(net);
 	int ret;
 
 	INIT_LIST_HEAD(&cn->configs);
@@ -824,6 +838,7 @@
 		pr_err("Unable to proc dir entry\n");
 		return -ENOMEM;
 	}
+	mutex_init(&cn->mutex);
 #endif /* CONFIG_PROC_FS */
 
 	return 0;
@@ -831,13 +846,15 @@
 
 static void clusterip_net_exit(struct net *net)
 {
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(net);
+
 #ifdef CONFIG_PROC_FS
+	mutex_lock(&cn->mutex);
 	proc_remove(cn->procdir);
 	cn->procdir = NULL;
+	mutex_unlock(&cn->mutex);
 #endif
 	nf_unregister_net_hook(net, &cip_arp_ops);
-	WARN_ON_ONCE(!list_empty(&cn->configs));
 }
 
 static struct pernet_operations clusterip_net_ops = {
@@ -847,6 +864,10 @@
 	.size = sizeof(struct clusterip_net),
 };
 
+struct notifier_block cip_netdev_notifier = {
+	.notifier_call = clusterip_netdev_event
+};
+
 static int __init clusterip_tg_init(void)
 {
 	int ret;
@@ -859,11 +880,17 @@
 	if (ret < 0)
 		goto cleanup_subsys;
 
+	ret = register_netdevice_notifier(&cip_netdev_notifier);
+	if (ret < 0)
+		goto unregister_target;
+
 	pr_info("ClusterIP Version %s loaded successfully\n",
 		CLUSTERIP_VERSION);
 
 	return 0;
 
+unregister_target:
+	xt_unregister_target(&clusterip_tg_reg);
 cleanup_subsys:
 	unregister_pernet_subsys(&clusterip_net_ops);
 	return ret;
@@ -873,6 +900,7 @@
 {
 	pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
 
+	unregister_netdevice_notifier(&cip_netdev_notifier);
 	xt_unregister_target(&clusterip_tg_reg);
 	unregister_pernet_subsys(&clusterip_net_ops);
 
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
index ac110c1..481437f 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -104,6 +104,8 @@
 int snmp_version(void *context, size_t hdrlen, unsigned char tag,
 		 const void *data, size_t datalen)
 {
+	if (datalen != 1)
+		return -EINVAL;
 	if (*(unsigned char *)data > 1)
 		return -ENOTSUPP;
 	return 1;
@@ -113,8 +115,11 @@
 		const void *data, size_t datalen)
 {
 	struct snmp_ctx *ctx = (struct snmp_ctx *)context;
-	__be32 *pdata = (__be32 *)data;
+	__be32 *pdata;
 
+	if (datalen != 4)
+		return -EINVAL;
+	pdata = (__be32 *)data;
 	if (*pdata == ctx->from) {
 		pr_debug("%s: %pI4 to %pI4\n", __func__,
 			 (void *)&ctx->from, (void *)&ctx->to);
diff --git a/net/ipv4/netlink.c b/net/ipv4/netlink.c
index f86bb4f..d8e3a1f 100644
--- a/net/ipv4/netlink.c
+++ b/net/ipv4/netlink.c
@@ -3,9 +3,10 @@
 #include <linux/types.h>
 #include <net/net_namespace.h>
 #include <net/netlink.h>
+#include <linux/in6.h>
 #include <net/ip.h>
 
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
 				struct netlink_ext_ack *extack)
 {
 	*ip_proto = nla_get_u8(attr);
@@ -13,11 +14,19 @@
 	switch (*ip_proto) {
 	case IPPROTO_TCP:
 	case IPPROTO_UDP:
-	case IPPROTO_ICMP:
 		return 0;
-	default:
-		NL_SET_ERR_MSG(extack, "Unsupported ip proto");
-		return -EOPNOTSUPP;
+	case IPPROTO_ICMP:
+		if (family != AF_INET)
+			break;
+		return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+	case IPPROTO_ICMPV6:
+		if (family != AF_INET6)
+			break;
+		return 0;
+#endif
 	}
+	NL_SET_ERR_MSG(extack, "Unsupported ip proto");
+	return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 8501554..7a556e4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -887,13 +887,15 @@
 	/* No redirected packets during ip_rt_redirect_silence;
 	 * reset the algorithm.
 	 */
-	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
+	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
 		peer->rate_tokens = 0;
+		peer->n_redirects = 0;
+	}
 
 	/* Too many ignored redirects; do not send anything
 	 * set dst.rate_last to the last seen redirected packet.
 	 */
-	if (peer->rate_tokens >= ip_rt_redirect_number) {
+	if (peer->n_redirects >= ip_rt_redirect_number) {
 		peer->rate_last = jiffies;
 		goto out_put_peer;
 	}
@@ -910,6 +912,7 @@
 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
 		peer->rate_last = jiffies;
 		++peer->rate_tokens;
+		++peer->n_redirects;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
 		if (log_martians &&
 		    peer->rate_tokens == ip_rt_redirect_number)
@@ -1305,6 +1308,10 @@
 		if (fnhe->fnhe_daddr == daddr) {
 			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
 				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+			/* set fnhe_daddr to 0 to ensure it won't bind with
+			 * new dsts in rt_bind_exception().
+			 */
+			fnhe->fnhe_daddr = 0;
 			fnhe_flush_routes(fnhe);
 			kfree_rcu(fnhe, rcu);
 			break;
@@ -2152,12 +2159,13 @@
 		int our = 0;
 		int err = -EINVAL;
 
-		if (in_dev)
-			our = ip_check_mc_rcu(in_dev, daddr, saddr,
-					      ip_hdr(skb)->protocol);
+		if (!in_dev)
+			return err;
+		our = ip_check_mc_rcu(in_dev, daddr, saddr,
+				      ip_hdr(skb)->protocol);
 
 		/* check l3 master if no match yet */
-		if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
+		if (!our && netif_is_l3_slave(dev)) {
 			struct in_device *l3_in_dev;
 
 			l3_in_dev = __in_dev_get_rcu(skb->dev);
@@ -2811,7 +2819,7 @@
 
 	if (tb[RTA_IP_PROTO]) {
 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
-						  &ip_proto, extack);
+						  &ip_proto, AF_INET, extack);
 		if (err)
 			return err;
 	}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index c3387df..f66b2e6 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -216,7 +216,12 @@
 		refcount_set(&req->rsk_refcnt, 1);
 		tcp_sk(child)->tsoffset = tsoff;
 		sock_rps_save_rxhash(child, skb);
-		inet_csk_reqsk_queue_add(sk, req, child);
+		if (!inet_csk_reqsk_queue_add(sk, req, child)) {
+			bh_unlock_sock(child);
+			sock_put(child);
+			child = NULL;
+			reqsk_put(req);
+		}
 	} else {
 		reqsk_free(req);
 	}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5d0d6cc..ca38aca 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1186,7 +1186,7 @@
 	flags = msg->msg_flags;
 
 	if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
-		if (sk->sk_state != TCP_ESTABLISHED) {
+		if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
 			err = -EINVAL;
 			goto out_err;
 		}
@@ -1901,6 +1901,11 @@
 		inq = tp->rcv_nxt - tp->copied_seq;
 		release_sock(sk);
 	}
+	/* After receiving a FIN, tell the user-space to continue reading
+	 * by returning a non-zero inq.
+	 */
+	if (inq == 0 && sock_flag(sk, SOCK_DONE))
+		inq = 1;
 	return inq;
 }
 
@@ -2519,6 +2524,7 @@
 	sk_mem_reclaim(sk);
 	tcp_clear_all_retrans_hints(tcp_sk(sk));
 	tcp_sk(sk)->packets_out = 0;
+	inet_csk(sk)->icsk_backoff = 0;
 }
 
 int tcp_disconnect(struct sock *sk, int flags)
@@ -2567,7 +2573,6 @@
 	tp->write_seq += tp->max_window + 2;
 	if (tp->write_seq == 0)
 		tp->write_seq = 1;
-	icsk->icsk_backoff = 0;
 	tp->snd_cwnd = 2;
 	icsk->icsk_probes_out = 0;
 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0e9fbdf..16f2c84 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6493,7 +6493,13 @@
 		af_ops->send_synack(fastopen_sk, dst, &fl, req,
 				    &foc, TCP_SYNACK_FASTOPEN);
 		/* Add the child socket directly into the accept queue */
-		inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
+		if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
+			reqsk_fastopen_remove(fastopen_sk, req, false);
+			bh_unlock_sock(fastopen_sk);
+			sock_put(fastopen_sk);
+			reqsk_put(req);
+			goto drop;
+		}
 		sk->sk_data_ready(sk);
 		bh_unlock_sock(fastopen_sk);
 		sock_put(fastopen_sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3147931..ce66c23 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -535,14 +535,15 @@
 		if (sock_owned_by_user(sk))
 			break;
 
+		skb = tcp_rtx_queue_head(sk);
+		if (WARN_ON_ONCE(!skb))
+			break;
+
 		icsk->icsk_backoff--;
 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
 					       TCP_TIMEOUT_INIT;
 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 
-		skb = tcp_rtx_queue_head(sk);
-		BUG_ON(!skb);
-
 		tcp_mstamp_refresh(tp);
 		delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
 		remaining = icsk->icsk_rto -
@@ -1645,15 +1646,8 @@
 int tcp_filter(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcphdr *th = (struct tcphdr *)skb->data;
-	unsigned int eaten = skb->len;
-	int err;
 
-	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
-	if (!err) {
-		eaten -= skb->len;
-		TCP_SKB_CB(skb)->end_seq -= eaten;
-	}
-	return err;
+	return sk_filter_trim_cap(sk, skb, th->doff * 4);
 }
 EXPORT_SYMBOL(tcp_filter);
 
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 57eae8d..b1b5a64 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -224,7 +224,7 @@
 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 		if (icsk->icsk_retransmits) {
 			dst_negative_advice(sk);
-		} else if (!tp->syn_data && !tp->syn_fastopen) {
+		} else {
 			sk_rethink_txhash(sk);
 		}
 		retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 31a42ae..c5f31162 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -791,15 +791,23 @@
 		const int hlen = skb_network_header_len(skb) +
 				 sizeof(struct udphdr);
 
-		if (hlen + cork->gso_size > cork->fragsize)
+		if (hlen + cork->gso_size > cork->fragsize) {
+			kfree_skb(skb);
 			return -EINVAL;
-		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+		}
+		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+			kfree_skb(skb);
 			return -EINVAL;
-		if (sk->sk_no_check_tx)
+		}
+		if (sk->sk_no_check_tx) {
+			kfree_skb(skb);
 			return -EINVAL;
+		}
 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
-		    dst_xfrm(skb_dst(skb)))
+		    dst_xfrm(skb_dst(skb))) {
+			kfree_skb(skb);
 			return -EIO;
+		}
 
 		skb_shinfo(skb)->gso_size = cork->gso_size;
 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7e74c7f..6bd1ba10 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1150,7 +1150,8 @@
 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
 		if (ifa == ifp)
 			continue;
-		if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
+		if (ifa->prefix_len != ifp->prefix_len ||
+		    !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
 				       ifp->prefix_len))
 			continue;
 		if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
@@ -4733,8 +4734,8 @@
 			 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
 
 	idev = ipv6_find_idev(dev);
-	if (IS_ERR(idev))
-		return PTR_ERR(idev);
+	if (!idev)
+		return -ENOBUFS;
 
 	if (!ipv6_allow_optimistic_dad(net, idev))
 		cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 6c330ed..cc855d9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -325,6 +325,7 @@
 
 	/* Check if the address belongs to the host. */
 	if (addr_type == IPV6_ADDR_MAPPED) {
+		struct net_device *dev = NULL;
 		int chk_addr_ret;
 
 		/* Binding to v4-mapped address on a v6-only socket
@@ -335,9 +336,20 @@
 			goto out;
 		}
 
+		rcu_read_lock();
+		if (sk->sk_bound_dev_if) {
+			dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+			if (!dev) {
+				err = -ENODEV;
+				goto out_unlock;
+			}
+		}
+
 		/* Reproduce AF_INET checks to make the bindings consistent */
 		v4addr = addr->sin6_addr.s6_addr32[3];
-		chk_addr_ret = inet_addr_type(net, v4addr);
+		chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
+		rcu_read_unlock();
+
 		if (!inet_can_nonlocal_bind(net, inet) &&
 		    v4addr != htonl(INADDR_ANY) &&
 		    chk_addr_ret != RTN_LOCAL &&
@@ -365,6 +377,9 @@
 					err = -EINVAL;
 					goto out_unlock;
 				}
+			}
+
+			if (sk->sk_bound_dev_if) {
 				dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
 				if (!dev) {
 					err = -ENODEV;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index f31fe86..aea3cb4 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -341,6 +341,7 @@
 	skb_reset_network_header(skb);
 	iph = ipv6_hdr(skb);
 	iph->daddr = fl6->daddr;
+	ip6_flow_hdr(iph, 0, 0);
 
 	serr = SKB_EXT_ERR(skb);
 	serr->ee.ee_errno = err;
@@ -700,17 +701,15 @@
 	}
 	if (np->rxopt.bits.rxorigdstaddr) {
 		struct sockaddr_in6 sin6;
-		__be16 *ports;
-		int end;
+		__be16 _ports[2], *ports;
 
-		end = skb_transport_offset(skb) + 4;
-		if (end <= 0 || pskb_may_pull(skb, end)) {
+		ports = skb_header_pointer(skb, skb_transport_offset(skb),
+					   sizeof(_ports), &_ports);
+		if (ports) {
 			/* All current transport protocols have the port numbers in the
 			 * first four bytes of the transport header and this function is
 			 * written with this assumption in mind.
 			 */
-			ports = (__be16 *)skb_transport_header(skb);
-
 			sin6.sin6_family = AF_INET6;
 			sin6.sin6_addr = ipv6_hdr(skb)->daddr;
 			sin6.sin6_port = ports[1];
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index c9c53ad..6d14cbe 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -421,10 +421,10 @@
 static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
 		       const struct in6_addr *force_saddr)
 {
-	struct net *net = dev_net(skb->dev);
 	struct inet6_dev *idev = NULL;
 	struct ipv6hdr *hdr = ipv6_hdr(skb);
 	struct sock *sk;
+	struct net *net;
 	struct ipv6_pinfo *np;
 	const struct in6_addr *saddr = NULL;
 	struct dst_entry *dst;
@@ -435,12 +435,16 @@
 	int iif = 0;
 	int addr_type = 0;
 	int len;
-	u32 mark = IP6_REPLY_MARK(net, skb->mark);
+	u32 mark;
 
 	if ((u8 *)hdr < skb->head ||
 	    (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
 		return;
 
+	if (!skb->dev)
+		return;
+	net = dev_net(skb->dev);
+	mark = IP6_REPLY_MARK(net, skb->mark);
 	/*
 	 *	Make sure we respect the rules
 	 *	i.e. RFC 1885 2.4(e)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index e493b04..faed98d 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -550,13 +550,9 @@
 	struct ip6_tnl *tunnel;
 	u8 ver;
 
-	if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
-		return PACKET_REJECT;
-
 	ipv6h = ipv6_hdr(skb);
 	ershdr = (struct erspan_base_hdr *)skb->data;
 	ver = ershdr->ver;
-	tpi->key = cpu_to_be32(get_session_id(ershdr));
 
 	tunnel = ip6gre_tunnel_lookup(skb->dev,
 				      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
@@ -897,6 +893,9 @@
 	struct net_device_stats *stats = &t->dev->stats;
 	int ret;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
 		goto tx_err;
 
@@ -935,10 +934,14 @@
 	__u8 dsfield = false;
 	struct flowi6 fl6;
 	int err = -EINVAL;
+	__be16 proto;
 	__u32 mtu;
 	int nhoff;
 	int thoff;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
 		goto tx_err;
 
@@ -1011,8 +1014,6 @@
 			goto tx_err;
 		}
 	} else {
-		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-
 		switch (skb->protocol) {
 		case htons(ETH_P_IP):
 			memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1020,7 +1021,7 @@
 						 &dsfield, &encap_limit);
 			break;
 		case htons(ETH_P_IPV6):
-			if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+			if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
 				goto tx_err;
 			if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
 						     &dsfield, &encap_limit))
@@ -1047,8 +1048,9 @@
 	}
 
 	/* Push GRE header. */
-	gre_build_header(skb, 8, TUNNEL_SEQ,
-			 htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
+	proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
+					   : htons(ETH_P_ERSPAN2);
+	gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
 
 	/* TooBig packet may have updated dst->dev's mtu */
 	if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
@@ -1181,6 +1183,10 @@
 	t->parms.i_flags = p->i_flags;
 	t->parms.o_flags = p->o_flags;
 	t->parms.fwmark = p->fwmark;
+	t->parms.erspan_ver = p->erspan_ver;
+	t->parms.index = p->index;
+	t->parms.dir = p->dir;
+	t->parms.hwid = p->hwid;
 	dst_cache_reset(&t->dst_cache);
 }
 
@@ -1729,6 +1735,24 @@
 	return 0;
 }
 
+static void ip6erspan_set_version(struct nlattr *data[],
+				  struct __ip6_tnl_parm *parms)
+{
+	parms->erspan_ver = 1;
+	if (data[IFLA_GRE_ERSPAN_VER])
+		parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+
+	if (parms->erspan_ver == 1) {
+		if (data[IFLA_GRE_ERSPAN_INDEX])
+			parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+	} else if (parms->erspan_ver == 2) {
+		if (data[IFLA_GRE_ERSPAN_DIR])
+			parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+		if (data[IFLA_GRE_ERSPAN_HWID])
+			parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+	}
+}
+
 static void ip6gre_netlink_parms(struct nlattr *data[],
 				struct __ip6_tnl_parm *parms)
 {
@@ -1777,20 +1801,6 @@
 
 	if (data[IFLA_GRE_COLLECT_METADATA])
 		parms->collect_md = true;
-
-	parms->erspan_ver = 1;
-	if (data[IFLA_GRE_ERSPAN_VER])
-		parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
-
-	if (parms->erspan_ver == 1) {
-		if (data[IFLA_GRE_ERSPAN_INDEX])
-			parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
-	} else if (parms->erspan_ver == 2) {
-		if (data[IFLA_GRE_ERSPAN_DIR])
-			parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
-		if (data[IFLA_GRE_ERSPAN_HWID])
-			parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
-	}
 }
 
 static int ip6gre_tap_init(struct net_device *dev)
@@ -2043,9 +2053,9 @@
 			     struct nlattr *data[],
 			     struct netlink_ext_ack *extack)
 {
-	struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
 	struct __ip6_tnl_parm p;
-	struct ip6_tnl *t;
 
 	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
 	if (IS_ERR(t))
@@ -2114,12 +2124,17 @@
 {
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct __ip6_tnl_parm *p = &t->parms;
+	__be16 o_flags = p->o_flags;
+
+	if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
+	    !p->collect_md)
+		o_flags |= TUNNEL_KEY;
 
 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
-			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
+			 gre_tnl_flags_to_gre_flags(o_flags)) ||
 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
 	    nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
@@ -2214,6 +2229,7 @@
 	int err;
 
 	ip6gre_netlink_parms(data, &nt->parms);
+	ip6erspan_set_version(data, &nt->parms);
 	ign = net_generic(net, ip6gre_net_id);
 
 	if (nt->parms.collect_md) {
@@ -2259,6 +2275,7 @@
 	if (IS_ERR(t))
 		return PTR_ERR(t);
 
+	ip6erspan_set_version(data, &p);
 	ip6gre_tunnel_unlink_md(ign, t);
 	ip6gre_tunnel_unlink(ign, t);
 	ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2694def..0bb87f3 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -378,6 +378,7 @@
 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 
+	skb->tstamp = 0;
 	return dst_output(net, sk, skb);
 }
 
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a9d06d4..0c6403c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -901,6 +901,7 @@
 			goto drop;
 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 			goto drop;
+		ipv6h = ipv6_hdr(skb);
 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
 			goto drop;
 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
@@ -1242,10 +1243,6 @@
 	u8 tproto;
 	int err;
 
-	/* ensure we can access the full inner ip header */
-	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
-		return -1;
-
 	iph = ip_hdr(skb);
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
@@ -1320,9 +1317,6 @@
 	u8 tproto;
 	int err;
 
-	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
-		return -1;
-
 	ipv6h = ipv6_hdr(skb);
 	tproto = READ_ONCE(t->parms.proto);
 	if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
@@ -1404,6 +1398,9 @@
 	struct net_device_stats *stats = &t->dev->stats;
 	int ret;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
 		ret = ip4ip6_tnl_xmit(skb, dev);
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index b283f29..caad40d 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -15,7 +15,7 @@
 int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
 		     struct socket **sockp)
 {
-	struct sockaddr_in6 udp6_addr;
+	struct sockaddr_in6 udp6_addr = {};
 	int err;
 	struct socket *sock = NULL;
 
@@ -42,6 +42,7 @@
 		goto error;
 
 	if (cfg->peer_udp_port) {
+		memset(&udp6_addr, 0, sizeof(udp6_addr));
 		udp6_addr.sin6_family = AF_INET6;
 		memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
 		       sizeof(udp6_addr.sin6_addr));
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index eeaf745..8b6eeff 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -318,6 +318,7 @@
 			return 0;
 		}
 
+		ipv6h = ipv6_hdr(skb);
 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
 			t->dev->stats.rx_dropped++;
 			rcu_read_unlock();
@@ -521,18 +522,18 @@
 {
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct net_device_stats *stats = &t->dev->stats;
-	struct ipv6hdr *ipv6h;
 	struct flowi fl;
 	int ret;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	memset(&fl, 0, sizeof(fl));
 
 	switch (skb->protocol) {
 	case htons(ETH_P_IPV6):
-		ipv6h = ipv6_hdr(skb);
-
 		if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
-		    vti6_addr_conflict(t, ipv6h))
+		    vti6_addr_conflict(t, ipv6_hdr(skb)))
 			goto tx_err;
 
 		xfrm_decode_session(skb, &fl, AF_INET6);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index d0b7e02..35e7092 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -51,6 +51,9 @@
 #include <linux/export.h>
 #include <net/ip6_checksum.h>
 #include <linux/netconf.h>
+#include <net/ip_tunnels.h>
+
+#include <linux/nospec.h>
 
 struct ip6mr_rule {
 	struct fib_rule		common;
@@ -591,13 +594,12 @@
 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
 		.flowi6_mark	= skb->mark,
 	};
-	int err;
 
-	err = ip6mr_fib_lookup(net, &fl6, &mrt);
-	if (err < 0) {
-		kfree_skb(skb);
-		return err;
-	}
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
+	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
+		goto tx_err;
 
 	read_lock(&mrt_lock);
 	dev->stats.tx_bytes += skb->len;
@@ -606,6 +608,11 @@
 	read_unlock(&mrt_lock);
 	kfree_skb(skb);
 	return NETDEV_TX_OK;
+
+tx_err:
+	dev->stats.tx_errors++;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
 }
 
 static int reg_vif_get_iflink(const struct net_device *dev)
@@ -1499,6 +1506,9 @@
 			continue;
 		rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
 		list_del_rcu(&c->list);
+		call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
+					       FIB_EVENT_ENTRY_DEL,
+					       (struct mfc6_cache *)c, mrt->id);
 		mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
 		mr_cache_put(c);
 	}
@@ -1507,10 +1517,6 @@
 		spin_lock_bh(&mfc_unres_lock);
 		list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
 			list_del(&c->list);
-			call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
-						       FIB_EVENT_ENTRY_DEL,
-						       (struct mfc6_cache *)c,
-						       mrt->id);
 			mr6_netlink_event(mrt, (struct mfc6_cache *)c,
 					  RTM_DELROUTE);
 			ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
@@ -1831,6 +1837,7 @@
 			return -EFAULT;
 		if (vr.mifi >= mrt->maxvif)
 			return -EINVAL;
+		vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
 		read_lock(&mrt_lock);
 		vif = &mrt->vif_table[vr.mifi];
 		if (VIF_EXISTS(mrt, vr.mifi)) {
@@ -1905,6 +1912,7 @@
 			return -EFAULT;
 		if (vr.mifi >= mrt->maxvif)
 			return -EINVAL;
+		vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
 		read_lock(&mrt_lock);
 		vif = &mrt->vif_table[vr.mifi];
 		if (VIF_EXISTS(mrt, vr.mifi)) {
@@ -1946,10 +1954,10 @@
 
 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-			IPSTATS_MIB_OUTFORWDATAGRAMS);
-	__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
-			IPSTATS_MIB_OUTOCTETS, skb->len);
+	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+		      IPSTATS_MIB_OUTFORWDATAGRAMS);
+	IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+		      IPSTATS_MIB_OUTOCTETS, skb->len);
 	return dst_output(net, sk, skb);
 }
 
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 8b075f0..6d0b1f3 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -23,9 +23,11 @@
 	struct sock *sk = sk_to_full_sk(skb->sk);
 	unsigned int hh_len;
 	struct dst_entry *dst;
+	int strict = (ipv6_addr_type(&iph->daddr) &
+		      (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
 	struct flowi6 fl6 = {
 		.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
-			rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
+			strict ? skb_dst(skb)->dev->ifindex : 0,
 		.flowi6_mark = skb->mark,
 		.flowi6_uid = sock_net_uid(net, sk),
 		.daddr = iph->daddr,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index d3fd2d7..7c94339 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -384,6 +384,7 @@
 		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
 			kfree_skb_partial(fp, headstolen);
 		} else {
+			fp->sk = NULL;
 			if (!skb_shinfo(head)->frag_list)
 				skb_shinfo(head)->frag_list = fp;
 			head->data_len += fp->len;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9959c9c..509a49f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -210,7 +210,9 @@
 	n = __ipv6_neigh_lookup(dev, daddr);
 	if (n)
 		return n;
-	return neigh_create(&nd_tbl, daddr, dev);
+
+	n = neigh_create(&nd_tbl, daddr, dev);
+	return IS_ERR(n) ? NULL : n;
 }
 
 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
@@ -1280,18 +1282,29 @@
 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
 				 struct rt6_exception *rt6_ex)
 {
+	struct fib6_info *from;
 	struct net *net;
 
 	if (!bucket || !rt6_ex)
 		return;
 
 	net = dev_net(rt6_ex->rt6i->dst.dev);
+	net->ipv6.rt6_stats->fib_rt_cache--;
+
+	/* purge completely the exception to allow releasing the held resources:
+	 * some [sk] cache may keep the dst around for unlimited time
+	 */
+	from = rcu_dereference_protected(rt6_ex->rt6i->from,
+					 lockdep_is_held(&rt6_exception_lock));
+	rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+	fib6_info_release(from);
+	dst_dev_put(&rt6_ex->rt6i->dst);
+
 	hlist_del_rcu(&rt6_ex->hlist);
 	dst_release(&rt6_ex->rt6i->dst);
 	kfree_rcu(rt6_ex, rcu);
 	WARN_ON_ONCE(!bucket->depth);
 	bucket->depth--;
-	net->ipv6.rt6_stats->fib_rt_cache--;
 }
 
 /* Remove oldest rt6_ex in bucket and free the memory
@@ -1610,15 +1623,15 @@
 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
 {
 	struct rt6_exception_bucket *bucket;
-	struct fib6_info *from = rt->from;
 	struct in6_addr *src_key = NULL;
 	struct rt6_exception *rt6_ex;
-
-	if (!from ||
-	    !(rt->rt6i_flags & RTF_CACHE))
-		return;
+	struct fib6_info *from;
 
 	rcu_read_lock();
+	from = rcu_dereference(rt->from);
+	if (!from || !(rt->rt6i_flags & RTF_CACHE))
+		goto unlock;
+
 	bucket = rcu_dereference(from->rt6i_exception_bucket);
 
 #ifdef CONFIG_IPV6_SUBTREES
@@ -1637,6 +1650,7 @@
 	if (rt6_ex)
 		rt6_ex->stamp = jiffies;
 
+unlock:
 	rcu_read_unlock();
 }
 
@@ -2794,20 +2808,24 @@
 	u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
 	u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
+	struct fib6_info *from;
 	struct rt6_info *grt;
 	int err;
 
 	err = 0;
 	grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
 	if (grt) {
+		rcu_read_lock();
+		from = rcu_dereference(grt->from);
 		if (!grt->dst.error &&
 		    /* ignore match if it is the default route */
-		    grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
+		    from && !ipv6_addr_any(&from->fib6_dst.addr) &&
 		    (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
 			NL_SET_ERR_MSG(extack,
 				       "Nexthop has invalid gateway or device mismatch");
 			err = -EINVAL;
 		}
+		rcu_read_unlock();
 
 		ip6_rt_put(grt);
 	}
@@ -4187,6 +4205,10 @@
 		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
 		cfg->fc_flags |= RTF_GATEWAY;
 	}
+	if (tb[RTA_VIA]) {
+		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
+		goto errout;
+	}
 
 	if (tb[RTA_DST]) {
 		int plen = (rtm->rtm_dst_len + 7) >> 3;
@@ -4680,7 +4702,7 @@
 		table = rt->fib6_table->tb6_id;
 	else
 		table = RT6_TABLE_UNSPEC;
-	rtm->rtm_table = table;
+	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
 	if (nla_put_u32(skb, RTA_TABLE, table))
 		goto nla_put_failure;
 
@@ -4881,7 +4903,8 @@
 
 	if (tb[RTA_IP_PROTO]) {
 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
-						  &fl6.flowi6_proto, extack);
+						  &fl6.flowi6_proto, AF_INET6,
+						  extack);
 		if (err)
 			goto errout;
 	}
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 8d0ba75..9b2f272 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -221,9 +221,7 @@
 	rcu_read_unlock();
 
 	genlmsg_end(msg, hdr);
-	genlmsg_reply(msg, info);
-
-	return 0;
+	return genlmsg_reply(msg, info);
 
 nla_put_failure:
 	rcu_read_unlock();
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 8181ee7..ee5403c 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -146,6 +146,8 @@
 	} else {
 		ip6_flow_hdr(hdr, 0, flowlabel);
 		hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
+
+		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 	}
 
 	hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e9400ff..de9aa5c 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -546,7 +546,8 @@
 	}
 
 	err = 0;
-	if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
+	if (__in6_dev_get(skb->dev) &&
+	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
 		goto out;
 
 	if (t->parms.iph.daddr == 0)
@@ -777,8 +778,9 @@
 		pbw0 = tunnel->ip6rd.prefixlen >> 5;
 		pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
 
-		d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
-		    tunnel->ip6rd.relay_prefixlen;
+		d = tunnel->ip6rd.relay_prefixlen < 32 ?
+			(ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
+		    tunnel->ip6rd.relay_prefixlen : 0;
 
 		pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
 		if (pbi1 > 0)
@@ -1021,6 +1023,9 @@
 static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
 				   struct net_device *dev)
 {
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
 		sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
@@ -1869,6 +1874,7 @@
 
 err_reg_dev:
 	ipip6_dev_free(sitn->fb_tunnel_dev);
+	free_netdev(sitn->fb_tunnel_dev);
 err_alloc_dev:
 	return err;
 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e2e3aa8..1efce16 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1081,15 +1081,23 @@
 		const int hlen = skb_network_header_len(skb) +
 				 sizeof(struct udphdr);
 
-		if (hlen + cork->gso_size > cork->fragsize)
+		if (hlen + cork->gso_size > cork->fragsize) {
+			kfree_skb(skb);
 			return -EINVAL;
-		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+		}
+		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+			kfree_skb(skb);
 			return -EINVAL;
-		if (udp_sk(sk)->no_check6_tx)
+		}
+		if (udp_sk(sk)->no_check6_tx) {
+			kfree_skb(skb);
 			return -EINVAL;
+		}
 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
-		    dst_xfrm(skb_dst(skb)))
+		    dst_xfrm(skb_dst(skb))) {
+			kfree_skb(skb);
 			return -EIO;
+		}
 
 		skb_shinfo(skb)->gso_size = cork->gso_size;
 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
@@ -1339,10 +1347,7 @@
 	ipc6.opt = opt;
 
 	fl6.flowi6_proto = sk->sk_protocol;
-	if (!ipv6_addr_any(daddr))
-		fl6.daddr = *daddr;
-	else
-		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+	fl6.daddr = *daddr;
 	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
 		fl6.saddr = np->saddr;
 	fl6.fl6_sport = inet->inet_sport;
@@ -1370,6 +1375,9 @@
 		}
 	}
 
+	if (ipv6_addr_any(&fl6.daddr))
+		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+
 	final_p = fl6_update_dst(&fl6, opt, &final);
 	if (final_p)
 		connected = false;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 4a46df8..f5b4feb 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -144,6 +144,9 @@
 		index = __xfrm6_tunnel_spi_check(net, spi);
 		if (index >= 0)
 			goto alloc_spi;
+
+		if (spi == XFRM6_TUNNEL_SPI_MAX)
+			break;
 	}
 	for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
 		index = __xfrm6_tunnel_spi_check(net, spi);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 26f1d43..fed6bec 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -83,8 +83,7 @@
 #define L2TP_SLFLAG_S	   0x40000000
 #define L2TP_SL_SEQ_MASK   0x00ffffff
 
-#define L2TP_HDR_SIZE_SEQ		10
-#define L2TP_HDR_SIZE_NOSEQ		6
+#define L2TP_HDR_SIZE_MAX		14
 
 /* Default trace flags */
 #define L2TP_DEFAULT_DEBUG_FLAGS	0
@@ -808,7 +807,7 @@
 	__skb_pull(skb, sizeof(struct udphdr));
 
 	/* Short packet? */
-	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
+	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
 		l2tp_info(tunnel, L2TP_MSG_DATA,
 			  "%s: recv short packet (len=%d)\n",
 			  tunnel->name, skb->len);
@@ -884,6 +883,10 @@
 		goto error;
 	}
 
+	if (tunnel->version == L2TP_HDR_VER_3 &&
+	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+		goto error;
+
 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
 	l2tp_session_dec_refcount(session);
 
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9c9afe9..b2ce902 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -301,6 +301,26 @@
 }
 #endif
 
+static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
+					       unsigned char **ptr, unsigned char **optr)
+{
+	int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
+
+	if (opt_len > 0) {
+		int off = *ptr - *optr;
+
+		if (!pskb_may_pull(skb, off + opt_len))
+			return -1;
+
+		if (skb->data != *optr) {
+			*optr = skb->data;
+			*ptr = skb->data + off;
+		}
+	}
+
+	return 0;
+}
+
 #define l2tp_printk(ptr, type, func, fmt, ...)				\
 do {									\
 	if (((ptr)->debug) & (type))					\
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 35f6f86..d4c6052 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -165,6 +165,9 @@
 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
 
+	if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+		goto discard_sess;
+
 	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
 	l2tp_session_dec_refcount(session);
 
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 237f1a4..37a69df 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -178,6 +178,9 @@
 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
 
+	if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+		goto discard_sess;
+
 	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
 	l2tp_session_dec_refcount(session);
 
@@ -671,9 +674,6 @@
 	if (flags & MSG_OOB)
 		goto out;
 
-	if (addr_len)
-		*addr_len = sizeof(*lsa);
-
 	if (flags & MSG_ERRQUEUE)
 		return ipv6_recv_error(sk, msg, len, addr_len);
 
@@ -703,6 +703,7 @@
 		lsa->l2tp_conn_id = 0;
 		if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
 			lsa->l2tp_scope_id = inet6_iif(skb);
+		*addr_len = sizeof(*lsa);
 	}
 
 	if (np->rxopt.all)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 5d22eda..40c5102 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -887,6 +887,7 @@
 		      BSS_CHANGED_P2P_PS |
 		      BSS_CHANGED_TXPOWER;
 	int err;
+	int prev_beacon_int;
 
 	old = sdata_dereference(sdata->u.ap.beacon, sdata);
 	if (old)
@@ -909,6 +910,7 @@
 
 	sdata->needed_rx_chains = sdata->local->rx_chains;
 
+	prev_beacon_int = sdata->vif.bss_conf.beacon_int;
 	sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 
 	mutex_lock(&local->mtx);
@@ -917,8 +919,10 @@
 	if (!err)
 		ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
 	mutex_unlock(&local->mtx);
-	if (err)
+	if (err) {
+		sdata->vif.bss_conf.beacon_int = prev_beacon_int;
 		return err;
+	}
 
 	/*
 	 * Apply control port protocol, this allows us to
@@ -1474,6 +1478,10 @@
 	if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
 		sta->sta.tdls = true;
 
+	if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
+	    !sdata->u.mgd.associated)
+		return -EINVAL;
+
 	err = sta_apply_parameters(local, sta, params);
 	if (err) {
 		sta_info_free(local, sta);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5f3c81e..3a0171a 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -7,6 +7,7 @@
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (c) 2016        Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1951,6 +1952,8 @@
 	WARN(local->open_count, "%s: open count remains %d\n",
 	     wiphy_name(local->hw.wiphy), local->open_count);
 
+	ieee80211_txq_teardown_flows(local);
+
 	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
 		list_del(&sdata->list);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 5136278..68db2a3 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1198,7 +1198,6 @@
 	rtnl_unlock();
 	ieee80211_led_exit(local);
 	ieee80211_wep_free(local);
-	ieee80211_txq_teardown_flows(local);
  fail_flows:
 	destroy_workqueue(local->workqueue);
  fail_workqueue:
@@ -1224,7 +1223,6 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
-	ieee80211_txq_teardown_flows(local);
 
 	rtnl_lock();
 
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 2152663..e84103b 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -70,6 +70,7 @@
  * @dst: mesh path destination mac address
  * @mpp: mesh proxy mac address
  * @rhash: rhashtable list pointer
+ * @walk_list: linked list containing all mesh_path objects.
  * @gate_list: list pointer for known gates list
  * @sdata: mesh subif
  * @next_hop: mesh neighbor to which frames for this destination will be
@@ -105,6 +106,7 @@
 	u8 dst[ETH_ALEN];
 	u8 mpp[ETH_ALEN];	/* used for MPP or MAP */
 	struct rhash_head rhash;
+	struct hlist_node walk_list;
 	struct hlist_node gate_list;
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info __rcu *next_hop;
@@ -133,12 +135,16 @@
  * gate's mpath may or may not be resolved and active.
  * @gates_lock: protects updates to known_gates
  * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containging all mesh_path objects
+ * @walk_lock: lock protecting walk_head
  * @entries: number of entries in the table
  */
 struct mesh_table {
 	struct hlist_head known_gates;
 	spinlock_t gates_lock;
 	struct rhashtable rhead;
+	struct hlist_head walk_head;
+	spinlock_t walk_lock;
 	atomic_t entries;		/* Up to MAX_MESH_NEIGHBOURS */
 };
 
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a512562..c3a7396 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -59,8 +59,10 @@
 		return NULL;
 
 	INIT_HLIST_HEAD(&newtbl->known_gates);
+	INIT_HLIST_HEAD(&newtbl->walk_head);
 	atomic_set(&newtbl->entries,  0);
 	spin_lock_init(&newtbl->gates_lock);
+	spin_lock_init(&newtbl->walk_lock);
 
 	return newtbl;
 }
@@ -249,28 +251,15 @@
 static struct mesh_path *
 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
 {
-	int i = 0, ret;
-	struct mesh_path *mpath = NULL;
-	struct rhashtable_iter iter;
+	int i = 0;
+	struct mesh_path *mpath;
 
-	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-	if (ret)
-		return NULL;
-
-	rhashtable_walk_start(&iter);
-
-	while ((mpath = rhashtable_walk_next(&iter))) {
-		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-			continue;
-		if (IS_ERR(mpath))
-			break;
+	hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
 		if (i++ == idx)
 			break;
 	}
-	rhashtable_walk_stop(&iter);
-	rhashtable_walk_exit(&iter);
 
-	if (IS_ERR(mpath) || !mpath)
+	if (!mpath)
 		return NULL;
 
 	if (mpath_expired(mpath)) {
@@ -432,6 +421,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	tbl = sdata->u.mesh.mesh_paths;
+	spin_lock_bh(&tbl->walk_lock);
 	do {
 		ret = rhashtable_lookup_insert_fast(&tbl->rhead,
 						    &new_mpath->rhash,
@@ -441,20 +431,20 @@
 			mpath = rhashtable_lookup_fast(&tbl->rhead,
 						       dst,
 						       mesh_rht_params);
-
+		else if (!ret)
+			hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
 	} while (unlikely(ret == -EEXIST && !mpath));
+	spin_unlock_bh(&tbl->walk_lock);
 
-	if (ret && ret != -EEXIST)
-		return ERR_PTR(ret);
-
-	/* At this point either new_mpath was added, or we found a
-	 * matching entry already in the table; in the latter case
-	 * free the unnecessary new entry.
-	 */
-	if (ret == -EEXIST) {
+	if (ret) {
 		kfree(new_mpath);
+
+		if (ret != -EEXIST)
+			return ERR_PTR(ret);
+
 		new_mpath = mpath;
 	}
+
 	sdata->u.mesh.mesh_paths_generation++;
 	return new_mpath;
 }
@@ -480,9 +470,17 @@
 
 	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
 	tbl = sdata->u.mesh.mpp_paths;
+
+	spin_lock_bh(&tbl->walk_lock);
 	ret = rhashtable_lookup_insert_fast(&tbl->rhead,
 					    &new_mpath->rhash,
 					    mesh_rht_params);
+	if (!ret)
+		hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
+	spin_unlock_bh(&tbl->walk_lock);
+
+	if (ret)
+		kfree(new_mpath);
 
 	sdata->u.mesh.mpp_paths_generation++;
 	return ret;
@@ -503,20 +501,9 @@
 	struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 	struct mesh_path *mpath;
-	struct rhashtable_iter iter;
-	int ret;
 
-	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-	if (ret)
-		return;
-
-	rhashtable_walk_start(&iter);
-
-	while ((mpath = rhashtable_walk_next(&iter))) {
-		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-			continue;
-		if (IS_ERR(mpath))
-			break;
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
 		if (rcu_access_pointer(mpath->next_hop) == sta &&
 		    mpath->flags & MESH_PATH_ACTIVE &&
 		    !(mpath->flags & MESH_PATH_FIXED)) {
@@ -530,8 +517,7 @@
 				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
 		}
 	}
-	rhashtable_walk_stop(&iter);
-	rhashtable_walk_exit(&iter);
+	rcu_read_unlock();
 }
 
 static void mesh_path_free_rcu(struct mesh_table *tbl,
@@ -551,6 +537,7 @@
 
 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
 {
+	hlist_del_rcu(&mpath->walk_list);
 	rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
 	mesh_path_free_rcu(tbl, mpath);
 }
@@ -571,27 +558,14 @@
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
 	struct mesh_path *mpath;
-	struct rhashtable_iter iter;
-	int ret;
+	struct hlist_node *n;
 
-	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-	if (ret)
-		return;
-
-	rhashtable_walk_start(&iter);
-
-	while ((mpath = rhashtable_walk_next(&iter))) {
-		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-			continue;
-		if (IS_ERR(mpath))
-			break;
-
+	spin_lock_bh(&tbl->walk_lock);
+	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
 		if (rcu_access_pointer(mpath->next_hop) == sta)
 			__mesh_path_del(tbl, mpath);
 	}
-
-	rhashtable_walk_stop(&iter);
-	rhashtable_walk_exit(&iter);
+	spin_unlock_bh(&tbl->walk_lock);
 }
 
 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
@@ -599,51 +573,26 @@
 {
 	struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
 	struct mesh_path *mpath;
-	struct rhashtable_iter iter;
-	int ret;
+	struct hlist_node *n;
 
-	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-	if (ret)
-		return;
-
-	rhashtable_walk_start(&iter);
-
-	while ((mpath = rhashtable_walk_next(&iter))) {
-		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-			continue;
-		if (IS_ERR(mpath))
-			break;
-
+	spin_lock_bh(&tbl->walk_lock);
+	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
 		if (ether_addr_equal(mpath->mpp, proxy))
 			__mesh_path_del(tbl, mpath);
 	}
-
-	rhashtable_walk_stop(&iter);
-	rhashtable_walk_exit(&iter);
+	spin_unlock_bh(&tbl->walk_lock);
 }
 
 static void table_flush_by_iface(struct mesh_table *tbl)
 {
 	struct mesh_path *mpath;
-	struct rhashtable_iter iter;
-	int ret;
+	struct hlist_node *n;
 
-	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-	if (ret)
-		return;
-
-	rhashtable_walk_start(&iter);
-
-	while ((mpath = rhashtable_walk_next(&iter))) {
-		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-			continue;
-		if (IS_ERR(mpath))
-			break;
+	spin_lock_bh(&tbl->walk_lock);
+	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
 		__mesh_path_del(tbl, mpath);
 	}
-
-	rhashtable_walk_stop(&iter);
-	rhashtable_walk_exit(&iter);
+	spin_unlock_bh(&tbl->walk_lock);
 }
 
 /**
@@ -675,7 +624,7 @@
 {
 	struct mesh_path *mpath;
 
-	rcu_read_lock();
+	spin_lock_bh(&tbl->walk_lock);
 	mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
 	if (!mpath) {
 		rcu_read_unlock();
@@ -683,7 +632,7 @@
 	}
 
 	__mesh_path_del(tbl, mpath);
-	rcu_read_unlock();
+	spin_unlock_bh(&tbl->walk_lock);
 	return 0;
 }
 
@@ -854,28 +803,16 @@
 			  struct mesh_table *tbl)
 {
 	struct mesh_path *mpath;
-	struct rhashtable_iter iter;
-	int ret;
+	struct hlist_node *n;
 
-	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
-	if (ret)
-		return;
-
-	rhashtable_walk_start(&iter);
-
-	while ((mpath = rhashtable_walk_next(&iter))) {
-		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-			continue;
-		if (IS_ERR(mpath))
-			break;
+	spin_lock_bh(&tbl->walk_lock);
+	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
 		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
 		    (!(mpath->flags & MESH_PATH_FIXED)) &&
 		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
 			__mesh_path_del(tbl, mpath);
 	}
-
-	rhashtable_walk_stop(&iter);
-	rhashtable_walk_exit(&iter);
+	spin_unlock_bh(&tbl->walk_lock);
 }
 
 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5e2b4a4..e946ee4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -142,6 +142,9 @@
 	/* allocate extra bitmaps */
 	if (status->chains)
 		len += 4 * hweight8(status->chains);
+	/* vendor presence bitmap */
+	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
+		len += 4;
 
 	if (ieee80211_have_rx_timestamp(status)) {
 		len = ALIGN(len, 8);
@@ -197,8 +200,6 @@
 	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
 		struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
 
-		/* vendor presence bitmap */
-		len += 4;
 		/* alignment for fixed 6-byte vendor data header */
 		len = ALIGN(len, 2);
 		/* vendor data header */
@@ -220,7 +221,7 @@
 		struct ieee80211_hdr_3addr hdr;
 		u8 category;
 		u8 action_code;
-	} __packed action;
+	} __packed __aligned(2) action;
 
 	if (!sdata)
 		return;
@@ -2597,6 +2598,7 @@
 	struct ieee80211_sub_if_data *sdata = rx->sdata;
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	u16 ac, q, hdrlen;
+	int tailroom = 0;
 
 	hdr = (struct ieee80211_hdr *) skb->data;
 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2676,15 +2678,21 @@
 	skb_set_queue_mapping(skb, q);
 
 	if (!--mesh_hdr->ttl) {
-		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+		if (!is_multicast_ether_addr(hdr->addr1))
+			IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
+						     dropped_frames_ttl);
 		goto out;
 	}
 
 	if (!ifmsh->mshcfg.dot11MeshForwarding)
 		goto out;
 
+	if (sdata->crypto_tx_tailroom_needed_cnt)
+		tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
 	fwd_skb = skb_copy_expand(skb, local->tx_headroom +
-				       sdata->encrypt_headroom, 0, GFP_ATOMIC);
+				       sdata->encrypt_headroom,
+				  tailroom, GFP_ATOMIC);
 	if (!fwd_skb)
 		goto out;
 
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 7fa10d0..534a604 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -556,6 +556,11 @@
 	}
 
 	ieee80211_led_tx(local);
+
+	if (skb_has_frag_list(skb)) {
+		kfree_skb_list(skb_shinfo(skb)->frag_list);
+		skb_shinfo(skb)->frag_list = NULL;
+	}
 }
 
 /*
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 995a491..743cde6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1913,9 +1913,16 @@
 				int head_need, bool may_encrypt)
 {
 	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_hdr *hdr;
+	bool enc_tailroom;
 	int tail_need = 0;
 
-	if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
+	hdr = (struct ieee80211_hdr *) skb->data;
+	enc_tailroom = may_encrypt &&
+		       (sdata->crypto_tx_tailroom_needed_cnt ||
+			ieee80211_is_mgmt(hdr->frame_control));
+
+	if (enc_tailroom) {
 		tail_need = IEEE80211_ENCRYPT_TAILROOM;
 		tail_need -= skb_tailroom(skb);
 		tail_need = max_t(int, tail_need, 0);
@@ -1923,8 +1930,7 @@
 
 	if (skb_cloned(skb) &&
 	    (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
-	     !skb_clone_writable(skb, ETH_HLEN) ||
-	     (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
+	     !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
 		I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
 	else if (head_need || tail_need)
 		I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -3608,10 +3614,10 @@
 		/* We need a bit of data queued to build aggregates properly, so
 		 * instruct the TCP stack to allow more than a single ms of data
 		 * to be queued in the stack. The value is a bit-shift of 1
-		 * second, so 8 is ~4ms of queued data. Only affects local TCP
+		 * second, so 7 is ~8ms of queued data. Only affects local TCP
 		 * sockets.
 		 */
-		sk_pacing_shift_update(skb->sk, 8);
+		sk_pacing_shift_update(skb->sk, 7);
 
 		fast_tx = rcu_dereference(sta->fast_tx);
 
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 8fbe6cd..d5a4db5 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1822,6 +1822,9 @@
 				goto errout;
 			break;
 		}
+		case RTA_GATEWAY:
+			NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
+			goto errout;
 		case RTA_VIA:
 		{
 			if (nla_get_via(nla, &cfg->rc_via_alen,
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 1b371f6..4dba997 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -884,6 +884,20 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_TARGET_HARDIDLETIMER
+	tristate  "HARDIDLETIMER target support"
+	depends on NETFILTER_ADVANCED
+	help
+
+	  This option adds the `HARDIDLETIMER' target.  Each matching packet
+	  resets the timer associated with label specified when the rule is
+	  added.  When the timer expires, it triggers a sysfs notification.
+	  The remaining time for expiration can be read via sysfs.
+	   Compared to IDLETIMER HARDIDLETIMER will send notification when
+	   CPU in suspend too.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_TARGET_LED
 	tristate '"LED" target support'
 	depends on LEDS_CLASS && LEDS_TRIGGERS
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index f2c701e..ad603cd 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -156,6 +156,7 @@
 obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER) += xt_HARDIDLETIMER.o
 
 # matches
 obj-$(CONFIG_NETFILTER_XT_MATCH_ADDRTYPE) += xt_addrtype.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index c00b6a2..13ade57 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -219,10 +219,6 @@
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 	u32 ip;
 
-	/* MAC can be src only */
-	if (!(opt->flags & IPSET_DIM_TWO_SRC))
-		return 0;
-
 	ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
 	if (ip < map->first_ip || ip > map->last_ip)
 		return -IPSET_ERR_BITMAP_RANGE;
@@ -233,7 +229,11 @@
 		return -EINVAL;
 
 	e.id = ip_to_id(map, ip);
-	memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
+
+	if (opt->flags & IPSET_DIM_ONE_SRC)
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	else
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
 
 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
 }
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index 1ab5ed2..fd87de3 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -103,7 +103,11 @@
 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
 		return -EINVAL;
 
-	memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
+	if (opt->flags & IPSET_DIM_ONE_SRC)
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	else
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
 	if (ether_addr_equal(e.ether, invalid_ether))
 		return -EINVAL;
 
@@ -211,15 +215,15 @@
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	 /* MAC can be src only */
-	if (!(opt->flags & IPSET_DIM_TWO_SRC))
-		return 0;
-
 	if (skb_mac_header(skb) < skb->head ||
 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
 		return -EINVAL;
 
-	memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
+	if (opt->flags & IPSET_DIM_ONE_SRC)
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	else
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
 	if (ether_addr_equal(e.ether, invalid_ether))
 		return -EINVAL;
 
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
index f9d5a2a..4fe5f24 100644
--- a/net/netfilter/ipset/ip_set_hash_mac.c
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
@@ -81,15 +81,15 @@
 	struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	 /* MAC can be src only */
-	if (!(opt->flags & IPSET_DIM_ONE_SRC))
-		return 0;
-
 	if (skb_mac_header(skb) < skb->head ||
 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
 		return -EINVAL;
 
-	ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	if (opt->flags & IPSET_DIM_ONE_SRC)
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	else
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
 	if (is_zero_ether_addr(e.ether))
 		return -EINVAL;
 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 4eef55d..8da228d 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -531,8 +531,8 @@
 		ret = -EMSGSIZE;
 	} else {
 		cb->args[IPSET_CB_ARG0] = i;
+		ipset_nest_end(skb, atd);
 	}
-	ipset_nest_end(skb, atd);
 out:
 	rcu_read_unlock();
 	return ret;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 518364f..55a7731 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2221,6 +2221,18 @@
 		  u->udp_timeout);
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
+	if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
+	    u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
+		return -EINVAL;
+	}
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+	if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
+		return -EINVAL;
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_TCP
 	if (u->tcp_timeout) {
 		pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 		pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index b6d0f6d..7554c56 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -33,12 +33,6 @@
 
 #define CONNCOUNT_SLOTS		256U
 
-#ifdef CONFIG_LOCKDEP
-#define CONNCOUNT_LOCK_SLOTS	8U
-#else
-#define CONNCOUNT_LOCK_SLOTS	256U
-#endif
-
 #define CONNCOUNT_GC_MAX_NODES	8
 #define MAX_KEYLEN		5
 
@@ -49,8 +43,6 @@
 	struct nf_conntrack_zone	zone;
 	int				cpu;
 	u32				jiffies32;
-	bool				dead;
-	struct rcu_head			rcu_head;
 };
 
 struct nf_conncount_rb {
@@ -60,7 +52,7 @@
 	struct rcu_head rcu_head;
 };
 
-static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
+static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
 
 struct nf_conncount_data {
 	unsigned int keylen;
@@ -89,79 +81,25 @@
 	return memcmp(a, b, klen * sizeof(u32));
 }
 
-enum nf_conncount_list_add
-nf_conncount_add(struct nf_conncount_list *list,
-		 const struct nf_conntrack_tuple *tuple,
-		 const struct nf_conntrack_zone *zone)
-{
-	struct nf_conncount_tuple *conn;
-
-	if (WARN_ON_ONCE(list->count > INT_MAX))
-		return NF_CONNCOUNT_ERR;
-
-	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
-	if (conn == NULL)
-		return NF_CONNCOUNT_ERR;
-
-	conn->tuple = *tuple;
-	conn->zone = *zone;
-	conn->cpu = raw_smp_processor_id();
-	conn->jiffies32 = (u32)jiffies;
-	conn->dead = false;
-	spin_lock_bh(&list->list_lock);
-	if (list->dead == true) {
-		kmem_cache_free(conncount_conn_cachep, conn);
-		spin_unlock_bh(&list->list_lock);
-		return NF_CONNCOUNT_SKIP;
-	}
-	list_add_tail(&conn->node, &list->head);
-	list->count++;
-	spin_unlock_bh(&list->list_lock);
-	return NF_CONNCOUNT_ADDED;
-}
-EXPORT_SYMBOL_GPL(nf_conncount_add);
-
-static void __conn_free(struct rcu_head *h)
-{
-	struct nf_conncount_tuple *conn;
-
-	conn = container_of(h, struct nf_conncount_tuple, rcu_head);
-	kmem_cache_free(conncount_conn_cachep, conn);
-}
-
-static bool conn_free(struct nf_conncount_list *list,
+static void conn_free(struct nf_conncount_list *list,
 		      struct nf_conncount_tuple *conn)
 {
-	bool free_entry = false;
-
-	spin_lock_bh(&list->list_lock);
-
-	if (conn->dead) {
-		spin_unlock_bh(&list->list_lock);
-		return free_entry;
-	}
+	lockdep_assert_held(&list->list_lock);
 
 	list->count--;
-	conn->dead = true;
-	list_del_rcu(&conn->node);
-	if (list->count == 0) {
-		list->dead = true;
-		free_entry = true;
-	}
+	list_del(&conn->node);
 
-	spin_unlock_bh(&list->list_lock);
-	call_rcu(&conn->rcu_head, __conn_free);
-	return free_entry;
+	kmem_cache_free(conncount_conn_cachep, conn);
 }
 
 static const struct nf_conntrack_tuple_hash *
 find_or_evict(struct net *net, struct nf_conncount_list *list,
-	      struct nf_conncount_tuple *conn, bool *free_entry)
+	      struct nf_conncount_tuple *conn)
 {
 	const struct nf_conntrack_tuple_hash *found;
 	unsigned long a, b;
 	int cpu = raw_smp_processor_id();
-	__s32 age;
+	u32 age;
 
 	found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
 	if (found)
@@ -176,52 +114,45 @@
 	 */
 	age = a - b;
 	if (conn->cpu == cpu || age >= 2) {
-		*free_entry = conn_free(list, conn);
+		conn_free(list, conn);
 		return ERR_PTR(-ENOENT);
 	}
 
 	return ERR_PTR(-EAGAIN);
 }
 
-void nf_conncount_lookup(struct net *net,
-			 struct nf_conncount_list *list,
-			 const struct nf_conntrack_tuple *tuple,
-			 const struct nf_conntrack_zone *zone,
-			 bool *addit)
+static int __nf_conncount_add(struct net *net,
+			      struct nf_conncount_list *list,
+			      const struct nf_conntrack_tuple *tuple,
+			      const struct nf_conntrack_zone *zone)
 {
 	const struct nf_conntrack_tuple_hash *found;
 	struct nf_conncount_tuple *conn, *conn_n;
 	struct nf_conn *found_ct;
 	unsigned int collect = 0;
-	bool free_entry = false;
-
-	/* best effort only */
-	*addit = tuple ? true : false;
 
 	/* check the saved connections */
 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
 		if (collect > CONNCOUNT_GC_MAX_NODES)
 			break;
 
-		found = find_or_evict(net, list, conn, &free_entry);
+		found = find_or_evict(net, list, conn);
 		if (IS_ERR(found)) {
 			/* Not found, but might be about to be confirmed */
 			if (PTR_ERR(found) == -EAGAIN) {
-				if (!tuple)
-					continue;
-
 				if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
 				    nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
 				    nf_ct_zone_id(zone, zone->dir))
-					*addit = false;
-			} else if (PTR_ERR(found) == -ENOENT)
+					return 0; /* already exists */
+			} else {
 				collect++;
+			}
 			continue;
 		}
 
 		found_ct = nf_ct_tuplehash_to_ctrack(found);
 
-		if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
+		if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
 		    nf_ct_zone_equal(found_ct, zone, zone->dir)) {
 			/*
 			 * We should not see tuples twice unless someone hooks
@@ -229,7 +160,8 @@
 			 *
 			 * Attempt to avoid a re-add in this case.
 			 */
-			*addit = false;
+			nf_ct_put(found_ct);
+			return 0;
 		} else if (already_closed(found_ct)) {
 			/*
 			 * we do not care about connections which are
@@ -243,19 +175,48 @@
 
 		nf_ct_put(found_ct);
 	}
+
+	if (WARN_ON_ONCE(list->count > INT_MAX))
+		return -EOVERFLOW;
+
+	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
+	if (conn == NULL)
+		return -ENOMEM;
+
+	conn->tuple = *tuple;
+	conn->zone = *zone;
+	conn->cpu = raw_smp_processor_id();
+	conn->jiffies32 = (u32)jiffies;
+	list_add_tail(&conn->node, &list->head);
+	list->count++;
+	return 0;
 }
-EXPORT_SYMBOL_GPL(nf_conncount_lookup);
+
+int nf_conncount_add(struct net *net,
+		     struct nf_conncount_list *list,
+		     const struct nf_conntrack_tuple *tuple,
+		     const struct nf_conntrack_zone *zone)
+{
+	int ret;
+
+	/* check the saved connections */
+	spin_lock_bh(&list->list_lock);
+	ret = __nf_conncount_add(net, list, tuple, zone);
+	spin_unlock_bh(&list->list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_add);
 
 void nf_conncount_list_init(struct nf_conncount_list *list)
 {
 	spin_lock_init(&list->list_lock);
 	INIT_LIST_HEAD(&list->head);
 	list->count = 0;
-	list->dead = false;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
 
-/* Return true if the list is empty */
+/* Return true if the list is empty. Must be called with BH disabled. */
 bool nf_conncount_gc_list(struct net *net,
 			  struct nf_conncount_list *list)
 {
@@ -263,17 +224,17 @@
 	struct nf_conncount_tuple *conn, *conn_n;
 	struct nf_conn *found_ct;
 	unsigned int collected = 0;
-	bool free_entry = false;
 	bool ret = false;
 
+	/* don't bother if other cpu is already doing GC */
+	if (!spin_trylock(&list->list_lock))
+		return false;
+
 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
-		found = find_or_evict(net, list, conn, &free_entry);
+		found = find_or_evict(net, list, conn);
 		if (IS_ERR(found)) {
-			if (PTR_ERR(found) == -ENOENT)  {
-				if (free_entry)
-					return true;
+			if (PTR_ERR(found) == -ENOENT)
 				collected++;
-			}
 			continue;
 		}
 
@@ -284,23 +245,19 @@
 			 * closed already -> ditch it
 			 */
 			nf_ct_put(found_ct);
-			if (conn_free(list, conn))
-				return true;
+			conn_free(list, conn);
 			collected++;
 			continue;
 		}
 
 		nf_ct_put(found_ct);
 		if (collected > CONNCOUNT_GC_MAX_NODES)
-			return false;
+			break;
 	}
 
-	spin_lock_bh(&list->list_lock);
-	if (!list->count) {
-		list->dead = true;
+	if (!list->count)
 		ret = true;
-	}
-	spin_unlock_bh(&list->list_lock);
+	spin_unlock(&list->list_lock);
 
 	return ret;
 }
@@ -314,6 +271,7 @@
 	kmem_cache_free(conncount_rb_cachep, rbconn);
 }
 
+/* caller must hold tree nf_conncount_locks[] lock */
 static void tree_nodes_free(struct rb_root *root,
 			    struct nf_conncount_rb *gc_nodes[],
 			    unsigned int gc_count)
@@ -323,8 +281,10 @@
 	while (gc_count) {
 		rbconn = gc_nodes[--gc_count];
 		spin_lock(&rbconn->list.list_lock);
-		rb_erase(&rbconn->node, root);
-		call_rcu(&rbconn->rcu_head, __tree_nodes_free);
+		if (!rbconn->list.count) {
+			rb_erase(&rbconn->node, root);
+			call_rcu(&rbconn->rcu_head, __tree_nodes_free);
+		}
 		spin_unlock(&rbconn->list.list_lock);
 	}
 }
@@ -341,20 +301,19 @@
 	    struct rb_root *root,
 	    unsigned int hash,
 	    const u32 *key,
-	    u8 keylen,
 	    const struct nf_conntrack_tuple *tuple,
 	    const struct nf_conntrack_zone *zone)
 {
-	enum nf_conncount_list_add ret;
 	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
 	struct rb_node **rbnode, *parent;
 	struct nf_conncount_rb *rbconn;
 	struct nf_conncount_tuple *conn;
 	unsigned int count = 0, gc_count = 0;
-	bool node_found = false;
+	u8 keylen = data->keylen;
+	bool do_gc = true;
 
-	spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
-
+	spin_lock_bh(&nf_conncount_locks[hash]);
+restart:
 	parent = NULL;
 	rbnode = &(root->rb_node);
 	while (*rbnode) {
@@ -368,45 +327,32 @@
 		} else if (diff > 0) {
 			rbnode = &((*rbnode)->rb_right);
 		} else {
-			/* unlikely: other cpu added node already */
-			node_found = true;
-			ret = nf_conncount_add(&rbconn->list, tuple, zone);
-			if (ret == NF_CONNCOUNT_ERR) {
+			int ret;
+
+			ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
+			if (ret)
 				count = 0; /* hotdrop */
-			} else if (ret == NF_CONNCOUNT_ADDED) {
+			else
 				count = rbconn->list.count;
-			} else {
-				/* NF_CONNCOUNT_SKIP, rbconn is already
-				 * reclaimed by gc, insert a new tree node
-				 */
-				node_found = false;
-			}
-			break;
+			tree_nodes_free(root, gc_nodes, gc_count);
+			goto out_unlock;
 		}
 
 		if (gc_count >= ARRAY_SIZE(gc_nodes))
 			continue;
 
-		if (nf_conncount_gc_list(net, &rbconn->list))
+		if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
 			gc_nodes[gc_count++] = rbconn;
 	}
 
 	if (gc_count) {
 		tree_nodes_free(root, gc_nodes, gc_count);
-		/* tree_node_free before new allocation permits
-		 * allocator to re-use newly free'd object.
-		 *
-		 * This is a rare event; in most cases we will find
-		 * existing node to re-use. (or gc_count is 0).
-		 */
-
-		if (gc_count >= ARRAY_SIZE(gc_nodes))
-			schedule_gc_worker(data, hash);
+		schedule_gc_worker(data, hash);
+		gc_count = 0;
+		do_gc = false;
+		goto restart;
 	}
 
-	if (node_found)
-		goto out_unlock;
-
 	/* expected case: match, insert new node */
 	rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
 	if (rbconn == NULL)
@@ -427,10 +373,10 @@
 	count = 1;
 	rbconn->list.count = count;
 
-	rb_link_node(&rbconn->node, parent, rbnode);
+	rb_link_node_rcu(&rbconn->node, parent, rbnode);
 	rb_insert_color(&rbconn->node, root);
 out_unlock:
-	spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+	spin_unlock_bh(&nf_conncount_locks[hash]);
 	return count;
 }
 
@@ -441,7 +387,6 @@
 	   const struct nf_conntrack_tuple *tuple,
 	   const struct nf_conntrack_zone *zone)
 {
-	enum nf_conncount_list_add ret;
 	struct rb_root *root;
 	struct rb_node *parent;
 	struct nf_conncount_rb *rbconn;
@@ -454,7 +399,6 @@
 	parent = rcu_dereference_raw(root->rb_node);
 	while (parent) {
 		int diff;
-		bool addit;
 
 		rbconn = rb_entry(parent, struct nf_conncount_rb, node);
 
@@ -464,31 +408,36 @@
 		} else if (diff > 0) {
 			parent = rcu_dereference_raw(parent->rb_right);
 		} else {
-			/* same source network -> be counted! */
-			nf_conncount_lookup(net, &rbconn->list, tuple, zone,
-					    &addit);
+			int ret;
 
-			if (!addit)
+			if (!tuple) {
+				nf_conncount_gc_list(net, &rbconn->list);
 				return rbconn->list.count;
+			}
 
-			ret = nf_conncount_add(&rbconn->list, tuple, zone);
-			if (ret == NF_CONNCOUNT_ERR) {
-				return 0; /* hotdrop */
-			} else if (ret == NF_CONNCOUNT_ADDED) {
-				return rbconn->list.count;
-			} else {
-				/* NF_CONNCOUNT_SKIP, rbconn is already
-				 * reclaimed by gc, insert a new tree node
-				 */
+			spin_lock_bh(&rbconn->list.list_lock);
+			/* Node might be about to be free'd.
+			 * We need to defer to insert_tree() in this case.
+			 */
+			if (rbconn->list.count == 0) {
+				spin_unlock_bh(&rbconn->list.list_lock);
 				break;
 			}
+
+			/* same source network -> be counted! */
+			ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
+			spin_unlock_bh(&rbconn->list.list_lock);
+			if (ret)
+				return 0; /* hotdrop */
+			else
+				return rbconn->list.count;
 		}
 	}
 
 	if (!tuple)
 		return 0;
 
-	return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
+	return insert_tree(net, data, root, hash, key, tuple, zone);
 }
 
 static void tree_gc_worker(struct work_struct *work)
@@ -499,27 +448,47 @@
 	struct rb_node *node;
 	unsigned int tree, next_tree, gc_count = 0;
 
-	tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
+	tree = data->gc_tree % CONNCOUNT_SLOTS;
 	root = &data->root[tree];
 
+	local_bh_disable();
 	rcu_read_lock();
 	for (node = rb_first(root); node != NULL; node = rb_next(node)) {
 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
 		if (nf_conncount_gc_list(data->net, &rbconn->list))
-			gc_nodes[gc_count++] = rbconn;
+			gc_count++;
 	}
 	rcu_read_unlock();
+	local_bh_enable();
+
+	cond_resched();
 
 	spin_lock_bh(&nf_conncount_locks[tree]);
+	if (gc_count < ARRAY_SIZE(gc_nodes))
+		goto next; /* do not bother */
 
-	if (gc_count) {
-		tree_nodes_free(root, gc_nodes, gc_count);
+	gc_count = 0;
+	node = rb_first(root);
+	while (node != NULL) {
+		rbconn = rb_entry(node, struct nf_conncount_rb, node);
+		node = rb_next(node);
+
+		if (rbconn->list.count > 0)
+			continue;
+
+		gc_nodes[gc_count++] = rbconn;
+		if (gc_count >= ARRAY_SIZE(gc_nodes)) {
+			tree_nodes_free(root, gc_nodes, gc_count);
+			gc_count = 0;
+		}
 	}
 
+	tree_nodes_free(root, gc_nodes, gc_count);
+next:
 	clear_bit(tree, data->pending_trees);
 
 	next_tree = (tree + 1) % CONNCOUNT_SLOTS;
-	next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);
+	next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
 
 	if (next_tree < CONNCOUNT_SLOTS) {
 		data->gc_tree = next_tree;
@@ -621,10 +590,7 @@
 {
 	int i;
 
-	BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
-	BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
-
-	for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
+	for (i = 0; i < CONNCOUNT_SLOTS; ++i)
 		spin_lock_init(&nf_conncount_locks[i]);
 
 	conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 277d02a..895171a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1007,6 +1007,22 @@
 		}
 
 		if (nf_ct_key_equal(h, tuple, zone, net)) {
+			/* Tuple is taken already, so caller will need to find
+			 * a new source port to use.
+			 *
+			 * Only exception:
+			 * If the *original tuples* are identical, then both
+			 * conntracks refer to the same flow.
+			 * This is a rare situation, it can occur e.g. when
+			 * more than one UDP packet is sent from same socket
+			 * in different threads.
+			 *
+			 * Let nf_ct_resolve_clash() deal with this later.
+			 */
+			if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+					      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+				continue;
+
 			NF_CT_STAT_INC_ATOMIC(net, found);
 			rcu_read_unlock();
 			return 1;
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index a975efd..9da3034 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -115,12 +115,12 @@
 /* TCP SACK sequence number adjustment */
 static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
 				      unsigned int protoff,
-				      struct tcphdr *tcph,
 				      struct nf_conn *ct,
 				      enum ip_conntrack_info ctinfo)
 {
-	unsigned int dir, optoff, optend;
+	struct tcphdr *tcph = (void *)skb->data + protoff;
 	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+	unsigned int dir, optoff, optend;
 
 	optoff = protoff + sizeof(struct tcphdr);
 	optend = protoff + tcph->doff * 4;
@@ -128,6 +128,7 @@
 	if (!skb_make_writable(skb, optend))
 		return 0;
 
+	tcph = (void *)skb->data + protoff;
 	dir = CTINFO2DIR(ctinfo);
 
 	while (optoff < optend) {
@@ -207,7 +208,7 @@
 		 ntohl(newack));
 	tcph->ack_seq = newack;
 
-	res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+	res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
 out:
 	spin_unlock_bh(&ct->lock);
 
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index d812561..e1537ac 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -28,6 +28,7 @@
 {
 	struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
 	struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
+	struct dst_entry *other_dst = route->tuple[!dir].dst;
 	struct dst_entry *dst = route->tuple[dir].dst;
 
 	ft->dir = dir;
@@ -50,8 +51,8 @@
 	ft->src_port = ctt->src.u.tcp.port;
 	ft->dst_port = ctt->dst.u.tcp.port;
 
-	ft->iifidx = route->tuple[dir].ifindex;
-	ft->oifidx = route->tuple[!dir].ifindex;
+	ft->iifidx = other_dst->dev->ifindex;
+	ft->oifidx = dst->dev->ifindex;
 	ft->dst_cache = dst;
 }
 
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index e2b1960..2268b10 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -117,7 +117,8 @@
 	dst = skb_dst(skb);
 	if (dst->xfrm)
 		dst = ((struct xfrm_dst *)dst)->route;
-	dst_hold(dst);
+	if (!dst_hold_safe(dst))
+		return -EHOSTUNREACH;
 
 	if (sk && !net_eq(net, sock_net(sk)))
 		sk = NULL;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fe0558b..7d424fd 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -291,6 +291,9 @@
 	int err;
 
 	list_for_each_entry(rule, &ctx->chain->rules, list) {
+		if (!nft_is_active_next(ctx->net, rule))
+			continue;
+
 		err = nft_delrule(ctx, rule);
 		if (err < 0)
 			return err;
@@ -1199,7 +1202,8 @@
 		if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
 			goto nla_put_failure;
 
-		if (basechain->stats && nft_dump_stats(skb, basechain->stats))
+		if (rcu_access_pointer(basechain->stats) &&
+		    nft_dump_stats(skb, rcu_dereference(basechain->stats)))
 			goto nla_put_failure;
 	}
 
@@ -1375,7 +1379,8 @@
 	return newstats;
 }
 
-static void nft_chain_stats_replace(struct nft_base_chain *chain,
+static void nft_chain_stats_replace(struct net *net,
+				    struct nft_base_chain *chain,
 				    struct nft_stats __percpu *newstats)
 {
 	struct nft_stats __percpu *oldstats;
@@ -1383,8 +1388,9 @@
 	if (newstats == NULL)
 		return;
 
-	if (chain->stats) {
-		oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES);
+	if (rcu_access_pointer(chain->stats)) {
+		oldstats = rcu_dereference_protected(chain->stats,
+					lockdep_commit_lock_is_held(net));
 		rcu_assign_pointer(chain->stats, newstats);
 		synchronize_rcu();
 		free_percpu(oldstats);
@@ -1421,9 +1427,10 @@
 		struct nft_base_chain *basechain = nft_base_chain(chain);
 
 		module_put(basechain->type->owner);
-		free_percpu(basechain->stats);
-		if (basechain->stats)
+		if (rcu_access_pointer(basechain->stats)) {
 			static_branch_dec(&nft_counters_enabled);
+			free_percpu(rcu_dereference_raw(basechain->stats));
+		}
 		kfree(chain->name);
 		kfree(basechain);
 	} else {
@@ -1572,7 +1579,7 @@
 				kfree(basechain);
 				return PTR_ERR(stats);
 			}
-			basechain->stats = stats;
+			rcu_assign_pointer(basechain->stats, stats);
 			static_branch_inc(&nft_counters_enabled);
 		}
 
@@ -4435,6 +4442,8 @@
 err5:
 	kfree(trans);
 err4:
+	if (obj)
+		obj->use--;
 	kfree(elem.priv);
 err3:
 	if (nla[NFTA_SET_ELEM_DATA] != NULL)
@@ -6145,7 +6154,8 @@
 		return;
 
 	basechain = nft_base_chain(trans->ctx.chain);
-	nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
+	nft_chain_stats_replace(trans->ctx.net, basechain,
+				nft_trans_chain_stats(trans));
 
 	switch (nft_trans_chain_policy(trans)) {
 	case NF_DROP:
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index ffd5c0f..60f258f 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -101,7 +101,7 @@
 	struct nft_stats *stats;
 
 	base_chain = nft_base_chain(chain);
-	if (!base_chain->stats)
+	if (!rcu_access_pointer(base_chain->stats))
 		return;
 
 	local_bh_disable();
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 00db27d..b0bc130 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -71,6 +71,7 @@
 			     int ttl_check,
 			     struct nf_osf_hdr_ctx *ctx)
 {
+	const __u8 *optpinit = ctx->optp;
 	unsigned int check_WSS = 0;
 	int fmatch = FMATCH_WRONG;
 	int foptsize, optnum;
@@ -160,6 +161,9 @@
 		}
 	}
 
+	if (fmatch != FMATCH_OK)
+		ctx->optp = optpinit;
+
 	return fmatch == FMATCH_OK;
 }
 
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 29d6fc7..38da1f5 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -282,6 +282,7 @@
 {
 	struct xt_target *target = expr->ops->data;
 	void *info = nft_expr_priv(expr);
+	struct module *me = target->me;
 	struct xt_tgdtor_param par;
 
 	par.net = ctx->net;
@@ -292,7 +293,7 @@
 		par.target->destroy(&par);
 
 	if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
-		module_put(target->me);
+		module_put(me);
 }
 
 static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index b90d96b..af1497a 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -30,7 +30,6 @@
 	enum ip_conntrack_info ctinfo;
 	const struct nf_conn *ct;
 	unsigned int count;
-	bool addit;
 
 	tuple_ptr = &tuple;
 
@@ -44,19 +43,12 @@
 		return;
 	}
 
-	nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone,
-			    &addit);
-	count = priv->list.count;
-
-	if (!addit)
-		goto out;
-
-	if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) {
+	if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
 		regs->verdict.code = NF_DROP;
 		return;
 	}
-	count++;
-out:
+
+	count = priv->list.count;
 
 	if ((count > priv->limit) ^ priv->invert) {
 		regs->verdict.code = NFT_BREAK;
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 5fd4c57..436cc14 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -12,6 +12,7 @@
 #include <net/netfilter/nf_conntrack_core.h>
 #include <linux/netfilter/nf_conntrack_common.h>
 #include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack_helper.h>
 
 struct nft_flow_offload {
 	struct nft_flowtable	*flowtable;
@@ -29,10 +30,12 @@
 	memset(&fl, 0, sizeof(fl));
 	switch (nft_pf(pkt)) {
 	case NFPROTO_IPV4:
-		fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
+		fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
+		fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
 		break;
 	case NFPROTO_IPV6:
-		fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
+		fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
+		fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
 		break;
 	}
 
@@ -41,9 +44,7 @@
 		return -ENOENT;
 
 	route->tuple[dir].dst		= this_dst;
-	route->tuple[dir].ifindex	= nft_in(pkt)->ifindex;
 	route->tuple[!dir].dst		= other_dst;
-	route->tuple[!dir].ifindex	= nft_out(pkt)->ifindex;
 
 	return 0;
 }
@@ -66,6 +67,7 @@
 {
 	struct nft_flow_offload *priv = nft_expr_priv(expr);
 	struct nf_flowtable *flowtable = &priv->flowtable->data;
+	const struct nf_conn_help *help;
 	enum ip_conntrack_info ctinfo;
 	struct nf_flow_route route;
 	struct flow_offload *flow;
@@ -88,7 +90,8 @@
 		goto out;
 	}
 
-	if (test_bit(IPS_HELPER_BIT, &ct->status))
+	help = nfct_help(ct);
+	if (help)
 		goto out;
 
 	if (ctinfo == IP_CT_NEW ||
diff --git a/net/netfilter/xt_HARDIDLETIMER.c b/net/netfilter/xt_HARDIDLETIMER.c
new file mode 100644
index 0000000..055763b
--- /dev/null
+++ b/net/netfilter/xt_HARDIDLETIMER.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* linux/net/netfilter/xt_HARDIDLETIMER.c
+ *
+ * Netfilter module to trigger a timer when packet matches.
+ * After timer expires a kevent will be sent.
+ *
+ * Copyright (c) 2014-2015, 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (C) 2004, 2010 Nokia Corporation
+ *
+ * Written by Timo Teras <ext-timo.teras@nokia.com>
+ *
+ * Converted to x_tables and reworked for upstream inclusion
+ * by Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/alarmtimer.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_HARDIDLETIMER.h>
+#include <linux/kdev_t.h>
+#include <linux/kobject.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <net/net_namespace.h>
+
+struct hardidletimer_tg_attr {
+	struct attribute attr;
+	ssize_t	(*show)(struct kobject *kobj,
+			struct attribute *attr, char *buf);
+};
+
+struct hardidletimer_tg {
+	struct list_head entry;
+	struct alarm alarm;
+	struct work_struct work;
+
+	struct kobject *kobj;
+	struct hardidletimer_tg_attr attr;
+
+	unsigned int refcnt;
+	bool send_nl_msg;
+	bool active;
+};
+
+static LIST_HEAD(hardidletimer_tg_list);
+static DEFINE_MUTEX(list_mutex);
+
+static struct kobject *hardidletimer_tg_kobj;
+
+static void notify_netlink_uevent(const char *iface,
+				  struct hardidletimer_tg *timer)
+{
+	char iface_msg[NLMSG_MAX_SIZE];
+	char state_msg[NLMSG_MAX_SIZE];
+	char *envp[] = { iface_msg, state_msg, NULL };
+	int res;
+
+	res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+		       iface);
+	if (res >= NLMSG_MAX_SIZE) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+	res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+		       timer->active ? "active" : "inactive");
+	if (res >= NLMSG_MAX_SIZE) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+	pr_debug("putting nlmsg: <%s> <%s>\n", iface_msg, state_msg);
+	kobject_uevent_env(hardidletimer_tg_kobj, KOBJ_CHANGE, envp);
+}
+
+static
+struct hardidletimer_tg *__hardidletimer_tg_find_by_label(const char *label)
+{
+	struct hardidletimer_tg *entry;
+
+	WARN_ON(!label);
+
+	list_for_each_entry(entry, &hardidletimer_tg_list, entry) {
+		if (!strcmp(label, entry->attr.attr.name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+static ssize_t hardidletimer_tg_show(struct kobject *kobj,
+				     struct attribute *attr, char *buf)
+{
+	struct hardidletimer_tg *timer;
+	ktime_t expires;
+	struct timespec ktimespec;
+
+	memset(&ktimespec, 0, sizeof(struct timespec));
+	mutex_lock(&list_mutex);
+
+	timer =	__hardidletimer_tg_find_by_label(attr->name);
+	if (timer) {
+		expires = alarm_expires_remaining(&timer->alarm);
+		ktimespec = ktime_to_timespec(expires);
+	}
+
+	mutex_unlock(&list_mutex);
+
+	if (ktimespec.tv_sec >= 0)
+		return snprintf(buf, PAGE_SIZE, "%ld\n", ktimespec.tv_sec);
+
+	if ((timer) && timer->send_nl_msg)
+		return snprintf(buf, PAGE_SIZE, "0 %ld\n", ktimespec.tv_sec);
+	else
+		return snprintf(buf, PAGE_SIZE, "0\n");
+}
+
+static void hardidletimer_tg_work(struct work_struct *work)
+{
+	struct hardidletimer_tg *timer = container_of(work,
+				struct hardidletimer_tg, work);
+
+	sysfs_notify(hardidletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+	if (timer->send_nl_msg)
+		notify_netlink_uevent(timer->attr.attr.name, timer);
+}
+
+static enum alarmtimer_restart hardidletimer_tg_alarmproc(struct alarm *alarm,
+							  ktime_t now)
+{
+	struct hardidletimer_tg *timer = alarm->data;
+
+	pr_debug("alarm %s expired\n", timer->attr.attr.name);
+
+	timer->active = false;
+	schedule_work(&timer->work);
+	return ALARMTIMER_NORESTART;
+}
+
+static int hardidletimer_tg_create(struct hardidletimer_tg_info *info)
+{
+	int ret;
+	ktime_t tout;
+
+	info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+	if (!info->timer) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
+	if (!info->timer->attr.attr.name) {
+		ret = -ENOMEM;
+		goto out_free_timer;
+	}
+	info->timer->attr.attr.mode = 0444;
+	info->timer->attr.show = hardidletimer_tg_show;
+
+	ret = sysfs_create_file(hardidletimer_tg_kobj, &info->timer->attr.attr);
+	if (ret < 0) {
+		pr_debug("couldn't add file to sysfs");
+		goto out_free_attr;
+	}
+	/*  notify userspace  */
+	kobject_uevent(hardidletimer_tg_kobj, KOBJ_ADD);
+
+	list_add(&info->timer->entry, &hardidletimer_tg_list);
+
+	alarm_init(&info->timer->alarm, ALARM_BOOTTIME,
+		   hardidletimer_tg_alarmproc);
+	info->timer->alarm.data = info->timer;
+	info->timer->refcnt = 1;
+	info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+	info->timer->active = true;
+	tout = ktime_set(info->timeout, 0);
+	alarm_start_relative(&info->timer->alarm, tout);
+
+	INIT_WORK(&info->timer->work, hardidletimer_tg_work);
+
+	return 0;
+
+out_free_attr:
+	kfree(info->timer->attr.attr.name);
+out_free_timer:
+	kfree(info->timer);
+out:
+	return ret;
+}
+
+/* The actual xt_tables plugin. */
+static unsigned int hardidletimer_tg_target(struct sk_buff *skb,
+					    const struct xt_action_param *par)
+{
+	const struct hardidletimer_tg_info *info = par->targinfo;
+	ktime_t tout;
+
+	pr_debug("resetting timer %s, timeout period %u\n",
+		 info->label, info->timeout);
+
+	WARN_ON(!info->timer);
+
+	if (!info->timer->active) {
+		schedule_work(&info->timer->work);
+		pr_debug("Starting timer %s\n", info->label);
+	}
+
+	info->timer->active = true;
+	/* TODO: Avoid modifying timers on each packet */
+	tout = ktime_set(info->timeout, 0);
+	alarm_start_relative(&info->timer->alarm, tout);
+
+	return XT_CONTINUE;
+}
+
+static int hardidletimer_tg_checkentry(const struct xt_tgchk_param *par)
+{
+	struct hardidletimer_tg_info *info = par->targinfo;
+	int ret;
+	ktime_t tout;
+	struct timespec ktimespec;
+
+	memset(&ktimespec, 0, sizeof(struct timespec));
+
+	pr_debug("checkentry targinfo %s\n", info->label);
+
+	if (info->timeout == 0) {
+		pr_debug("timeout value is zero\n");
+		return -EINVAL;
+	}
+
+	if (info->label[0] == '\0' ||
+	    strnlen(info->label, MAX_HARDIDLETIMER_LABEL_SIZE)
+				== MAX_HARDIDLETIMER_LABEL_SIZE) {
+		pr_debug("label is empty or not nul-terminated\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&list_mutex);
+
+	info->timer = __hardidletimer_tg_find_by_label(info->label);
+	if (info->timer) {
+		info->timer->refcnt++;
+		/* calculate remaining expiry time */
+		tout = alarm_expires_remaining(&info->timer->alarm);
+		ktimespec = ktime_to_timespec(tout);
+
+		if (ktimespec.tv_sec > 0) {
+			pr_debug("time_expiry_remaining %ld\n",
+				 ktimespec.tv_sec);
+			alarm_start_relative(&info->timer->alarm, tout);
+		}
+
+		pr_debug("increased refcnt of timer %s to %u\n",
+			 info->label, info->timer->refcnt);
+	} else {
+		ret = hardidletimer_tg_create(info);
+		if (ret < 0) {
+			pr_debug("failed to create timer\n");
+			mutex_unlock(&list_mutex);
+			return ret;
+		}
+	}
+
+	mutex_unlock(&list_mutex);
+
+	return 0;
+}
+
+static void hardidletimer_tg_destroy(const struct xt_tgdtor_param *par)
+{
+	const struct hardidletimer_tg_info *info = par->targinfo;
+
+	pr_debug("destroy targinfo %s\n", info->label);
+
+	mutex_lock(&list_mutex);
+
+	if (--info->timer->refcnt == 0) {
+		pr_debug("deleting timer %s\n", info->label);
+
+		list_del(&info->timer->entry);
+		alarm_cancel(&info->timer->alarm);
+		cancel_work_sync(&info->timer->work);
+		sysfs_remove_file(hardidletimer_tg_kobj,
+				  &info->timer->attr.attr);
+		kfree(info->timer->attr.attr.name);
+		kfree(info->timer);
+	} else {
+		pr_debug("decreased refcnt of timer %s to %u\n",
+			 info->label, info->timer->refcnt);
+	}
+
+	mutex_unlock(&list_mutex);
+}
+
+static struct xt_target hardidletimer_tg __read_mostly = {
+	.name		= "HARDIDLETIMER",
+	.revision	= 1,
+	.family		= NFPROTO_UNSPEC,
+	.target		= hardidletimer_tg_target,
+	.targetsize     = sizeof(struct hardidletimer_tg_info),
+	.checkentry	= hardidletimer_tg_checkentry,
+	.destroy        = hardidletimer_tg_destroy,
+	.me		= THIS_MODULE,
+};
+
+static struct class *hardidletimer_tg_class;
+
+static struct device *hardidletimer_tg_device;
+
+static int __init hardidletimer_tg_init(void)
+{
+	int err;
+
+	hardidletimer_tg_class = class_create(THIS_MODULE, "xt_hardidletimer");
+	err = PTR_ERR(hardidletimer_tg_class);
+	if (IS_ERR(hardidletimer_tg_class)) {
+		pr_debug("couldn't register device class\n");
+		goto out;
+	}
+
+	hardidletimer_tg_device = device_create(hardidletimer_tg_class, NULL,
+						MKDEV(0, 0), NULL, "timers");
+	err = PTR_ERR(hardidletimer_tg_device);
+	if (IS_ERR(hardidletimer_tg_device)) {
+		pr_debug("couldn't register system device\n");
+		goto out_class;
+	}
+
+	hardidletimer_tg_kobj = &hardidletimer_tg_device->kobj;
+
+	err = xt_register_target(&hardidletimer_tg);
+	if (err < 0) {
+		pr_debug("couldn't register xt target\n");
+		goto out_dev;
+	}
+
+	return 0;
+out_dev:
+	device_destroy(hardidletimer_tg_class, MKDEV(0, 0));
+out_class:
+	class_destroy(hardidletimer_tg_class);
+out:
+	return err;
+}
+
+static void __exit hardidletimer_tg_exit(void)
+{
+	xt_unregister_target(&hardidletimer_tg);
+
+	device_destroy(hardidletimer_tg_class, MKDEV(0, 0));
+	class_destroy(hardidletimer_tg_class);
+}
+
+module_init(hardidletimer_tg_init);
+module_exit(hardidletimer_tg_exit);
+
+MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_DESCRIPTION("Xtables: idle time monitor");
+MODULE_LICENSE("GPL v2");
+
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 9271d88..3a1deec 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -327,6 +327,8 @@
 		pr_debug("couldn't add file to sysfs");
 		goto out_free_attr;
 	}
+	/* notify userspace */
+	kobject_uevent(idletimer_tg_kobj, KOBJ_ADD);
 
 	list_add(&info->timer->entry, &idletimer_tg_list);
 
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 0d0d68c..1dae02a 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -14,6 +14,8 @@
 #include <linux/skbuff.h>
 #include <linux/route.h>
 #include <linux/netfilter/x_tables.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 #include <net/route.h>
 #include <net/netfilter/ipv4/nf_dup_ipv4.h>
 #include <net/netfilter/ipv6/nf_dup_ipv6.h>
@@ -25,8 +27,15 @@
 	int			oif;
 };
 
+static unsigned int tee_net_id __read_mostly;
 static const union nf_inet_addr tee_zero_address;
 
+struct tee_net {
+	struct list_head priv_list;
+	/* lock protects the priv_list */
+	struct mutex lock;
+};
+
 static unsigned int
 tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
@@ -51,17 +60,16 @@
 }
 #endif
 
-static DEFINE_MUTEX(priv_list_mutex);
-static LIST_HEAD(priv_list);
-
 static int tee_netdev_event(struct notifier_block *this, unsigned long event,
 			    void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct net *net = dev_net(dev);
+	struct tee_net *tn = net_generic(net, tee_net_id);
 	struct xt_tee_priv *priv;
 
-	mutex_lock(&priv_list_mutex);
-	list_for_each_entry(priv, &priv_list, list) {
+	mutex_lock(&tn->lock);
+	list_for_each_entry(priv, &tn->priv_list, list) {
 		switch (event) {
 		case NETDEV_REGISTER:
 			if (!strcmp(dev->name, priv->tginfo->oif))
@@ -79,13 +87,14 @@
 			break;
 		}
 	}
-	mutex_unlock(&priv_list_mutex);
+	mutex_unlock(&tn->lock);
 
 	return NOTIFY_DONE;
 }
 
 static int tee_tg_check(const struct xt_tgchk_param *par)
 {
+	struct tee_net *tn = net_generic(par->net, tee_net_id);
 	struct xt_tee_tginfo *info = par->targinfo;
 	struct xt_tee_priv *priv;
 
@@ -95,6 +104,8 @@
 		return -EINVAL;
 
 	if (info->oif[0]) {
+		struct net_device *dev;
+
 		if (info->oif[sizeof(info->oif)-1] != '\0')
 			return -EINVAL;
 
@@ -106,9 +117,14 @@
 		priv->oif     = -1;
 		info->priv    = priv;
 
-		mutex_lock(&priv_list_mutex);
-		list_add(&priv->list, &priv_list);
-		mutex_unlock(&priv_list_mutex);
+		dev = dev_get_by_name(par->net, info->oif);
+		if (dev) {
+			priv->oif = dev->ifindex;
+			dev_put(dev);
+		}
+		mutex_lock(&tn->lock);
+		list_add(&priv->list, &tn->priv_list);
+		mutex_unlock(&tn->lock);
 	} else
 		info->priv = NULL;
 
@@ -118,12 +134,13 @@
 
 static void tee_tg_destroy(const struct xt_tgdtor_param *par)
 {
+	struct tee_net *tn = net_generic(par->net, tee_net_id);
 	struct xt_tee_tginfo *info = par->targinfo;
 
 	if (info->priv) {
-		mutex_lock(&priv_list_mutex);
+		mutex_lock(&tn->lock);
 		list_del(&info->priv->list);
-		mutex_unlock(&priv_list_mutex);
+		mutex_unlock(&tn->lock);
 		kfree(info->priv);
 	}
 	static_key_slow_dec(&xt_tee_enabled);
@@ -156,6 +173,21 @@
 #endif
 };
 
+static int __net_init tee_net_init(struct net *net)
+{
+	struct tee_net *tn = net_generic(net, tee_net_id);
+
+	INIT_LIST_HEAD(&tn->priv_list);
+	mutex_init(&tn->lock);
+	return 0;
+}
+
+static struct pernet_operations tee_net_ops = {
+	.init = tee_net_init,
+	.id   = &tee_net_id,
+	.size = sizeof(struct tee_net),
+};
+
 static struct notifier_block tee_netdev_notifier = {
 	.notifier_call = tee_netdev_event,
 };
@@ -164,22 +196,32 @@
 {
 	int ret;
 
+	ret = register_pernet_subsys(&tee_net_ops);
+	if (ret < 0)
+		return ret;
+
 	ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
-	if (ret)
-		return ret;
+	if (ret < 0)
+		goto cleanup_subsys;
+
 	ret = register_netdevice_notifier(&tee_netdev_notifier);
-	if (ret) {
-		xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
-		return ret;
-	}
+	if (ret < 0)
+		goto unregister_targets;
 
 	return 0;
+
+unregister_targets:
+	xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
+cleanup_subsys:
+	unregister_pernet_subsys(&tee_net_ops);
+	return ret;
 }
 
 static void __exit tee_tg_exit(void)
 {
 	unregister_netdevice_notifier(&tee_netdev_notifier);
 	xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
+	unregister_pernet_subsys(&tee_net_ops);
 }
 
 module_init(tee_tg_init);
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index ea7c670..ee3e5b6 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -903,7 +903,8 @@
 		    (state == 0 && (byte & bitmask) == 0))
 			return bit_spot;
 
-		bit_spot++;
+		if (++bit_spot >= bitmap_len)
+			return -1;
 		bitmask >>= 1;
 		if (bitmask == 0) {
 			byte = bitmap[++byte_offset];
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 03f37c4..1d3144d 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -153,7 +153,7 @@
 	sk_for_each(s, &nr_list)
 		if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
 		    s->sk_state == TCP_LISTEN) {
-			bh_lock_sock(s);
+			sock_hold(s);
 			goto found;
 		}
 	s = NULL;
@@ -174,7 +174,7 @@
 		struct nr_sock *nr = nr_sk(s);
 
 		if (nr->my_index == index && nr->my_id == id) {
-			bh_lock_sock(s);
+			sock_hold(s);
 			goto found;
 		}
 	}
@@ -198,7 +198,7 @@
 
 		if (nr->your_index == index && nr->your_id == id &&
 		    !ax25cmp(&nr->dest_addr, dest)) {
-			bh_lock_sock(s);
+			sock_hold(s);
 			goto found;
 		}
 	}
@@ -224,7 +224,7 @@
 		if (i != 0 && j != 0) {
 			if ((sk=nr_find_socket(i, j)) == NULL)
 				break;
-			bh_unlock_sock(sk);
+			sock_put(sk);
 		}
 
 		id++;
@@ -920,6 +920,7 @@
 	}
 
 	if (sk != NULL) {
+		bh_lock_sock(sk);
 		skb_reset_transport_header(skb);
 
 		if (frametype == NR_CONNACK && skb->len == 22)
@@ -929,6 +930,7 @@
 
 		ret = nr_process_rx_frame(sk, skb);
 		bh_unlock_sock(sk);
+		sock_put(sk);
 		return ret;
 	}
 
@@ -960,10 +962,12 @@
 	    (make = nr_make_new(sk)) == NULL) {
 		nr_transmit_refusal(skb, 0);
 		if (sk)
-			bh_unlock_sock(sk);
+			sock_put(sk);
 		return 0;
 	}
 
+	bh_lock_sock(sk);
+
 	window = skb->data[20];
 
 	skb->sk             = make;
@@ -1016,6 +1020,7 @@
 		sk->sk_data_ready(sk);
 
 	bh_unlock_sock(sk);
+	sock_put(sk);
 
 	nr_insert_socket(make);
 
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index cbd51ed..908e53a 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -52,21 +52,21 @@
 {
 	struct nr_sock *nr = nr_sk(sk);
 
-	mod_timer(&nr->t1timer, jiffies + nr->t1);
+	sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
 }
 
 void nr_start_t2timer(struct sock *sk)
 {
 	struct nr_sock *nr = nr_sk(sk);
 
-	mod_timer(&nr->t2timer, jiffies + nr->t2);
+	sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
 }
 
 void nr_start_t4timer(struct sock *sk)
 {
 	struct nr_sock *nr = nr_sk(sk);
 
-	mod_timer(&nr->t4timer, jiffies + nr->t4);
+	sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
 }
 
 void nr_start_idletimer(struct sock *sk)
@@ -74,37 +74,37 @@
 	struct nr_sock *nr = nr_sk(sk);
 
 	if (nr->idle > 0)
-		mod_timer(&nr->idletimer, jiffies + nr->idle);
+		sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
 }
 
 void nr_start_heartbeat(struct sock *sk)
 {
-	mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
+	sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
 }
 
 void nr_stop_t1timer(struct sock *sk)
 {
-	del_timer(&nr_sk(sk)->t1timer);
+	sk_stop_timer(sk, &nr_sk(sk)->t1timer);
 }
 
 void nr_stop_t2timer(struct sock *sk)
 {
-	del_timer(&nr_sk(sk)->t2timer);
+	sk_stop_timer(sk, &nr_sk(sk)->t2timer);
 }
 
 void nr_stop_t4timer(struct sock *sk)
 {
-	del_timer(&nr_sk(sk)->t4timer);
+	sk_stop_timer(sk, &nr_sk(sk)->t4timer);
 }
 
 void nr_stop_idletimer(struct sock *sk)
 {
-	del_timer(&nr_sk(sk)->idletimer);
+	sk_stop_timer(sk, &nr_sk(sk)->idletimer);
 }
 
 void nr_stop_heartbeat(struct sock *sk)
 {
-	del_timer(&sk->sk_timer);
+	sk_stop_timer(sk, &sk->sk_timer);
 }
 
 int nr_t1timer_running(struct sock *sk)
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 6a196e4..d1fc019e 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -419,6 +419,10 @@
 						      sock->service_name,
 						      sock->service_name_len,
 						      &service_name_tlv_length);
+		if (!service_name_tlv) {
+			err = -ENOMEM;
+			goto error_tlv;
+		}
 		size += service_name_tlv_length;
 	}
 
@@ -429,9 +433,17 @@
 
 	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
 				      &miux_tlv_length);
+	if (!miux_tlv) {
+		err = -ENOMEM;
+		goto error_tlv;
+	}
 	size += miux_tlv_length;
 
 	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+	if (!rw_tlv) {
+		err = -ENOMEM;
+		goto error_tlv;
+	}
 	size += rw_tlv_length;
 
 	pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -484,9 +496,17 @@
 
 	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
 				      &miux_tlv_length);
+	if (!miux_tlv) {
+		err = -ENOMEM;
+		goto error_tlv;
+	}
 	size += miux_tlv_length;
 
 	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+	if (!rw_tlv) {
+		err = -ENOMEM;
+		goto error_tlv;
+	}
 	size += rw_tlv_length;
 
 	skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index ef4026a..4fa0152 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -532,10 +532,10 @@
 
 static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
 {
-	u8 *gb_cur, *version_tlv, version, version_length;
-	u8 *lto_tlv, lto_length;
-	u8 *wks_tlv, wks_length;
-	u8 *miux_tlv, miux_length;
+	u8 *gb_cur, version, version_length;
+	u8 lto_length, wks_length, miux_length;
+	u8 *version_tlv = NULL, *lto_tlv = NULL,
+	   *wks_tlv = NULL, *miux_tlv = NULL;
 	__be16 wks = cpu_to_be16(local->local_wks);
 	u8 gb_len = 0;
 	int ret = 0;
@@ -543,17 +543,33 @@
 	version = LLCP_VERSION_11;
 	version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
 					 1, &version_length);
+	if (!version_tlv) {
+		ret = -ENOMEM;
+		goto out;
+	}
 	gb_len += version_length;
 
 	lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
+	if (!lto_tlv) {
+		ret = -ENOMEM;
+		goto out;
+	}
 	gb_len += lto_length;
 
 	pr_debug("Local wks 0x%lx\n", local->local_wks);
 	wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
+	if (!wks_tlv) {
+		ret = -ENOMEM;
+		goto out;
+	}
 	gb_len += wks_length;
 
 	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
 				      &miux_length);
+	if (!miux_tlv) {
+		ret = -ENOMEM;
+		goto out;
+	}
 	gb_len += miux_length;
 
 	gb_len += ARRAY_SIZE(llcp_magic);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 865ecef..c7b6010 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -500,7 +500,7 @@
 			return -EINVAL;
 		}
 
-		if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+		if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
 			attrs |= 1 << type;
 			a[type] = nla;
 		}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 100ce98..e451eb4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2625,8 +2625,10 @@
 						sll_addr)))
 			goto out;
 		proto	= saddr->sll_protocol;
-		addr	= saddr->sll_addr;
+		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+		if (addr && dev && saddr->sll_halen < dev->addr_len)
+			goto out_put;
 	}
 
 	err = -ENXIO;
@@ -2823,8 +2825,10 @@
 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
 			goto out;
 		proto	= saddr->sll_protocol;
-		addr	= saddr->sll_addr;
+		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+		if (addr && dev && saddr->sll_halen < dev->addr_len)
+			goto out_unlock;
 	}
 
 	err = -ENXIO;
@@ -2883,7 +2887,8 @@
 			goto out_free;
 	} else if (reserve) {
 		skb_reserve(skb, -reserve);
-		if (len < reserve)
+		if (len < reserve + sizeof(struct ipv6hdr) &&
+		    dev->min_header_len != dev->hard_header_len)
 			skb_reset_network_header(skb);
 	}
 
@@ -4270,7 +4275,7 @@
 		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
 		if (unlikely(rb->frames_per_block == 0))
 			goto out;
-		if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
 			goto out;
 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
 					req->tp_frame_nr))
diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig
index ee794d5..8b9d3c2 100644
--- a/net/qrtr/Kconfig
+++ b/net/qrtr/Kconfig
@@ -47,4 +47,12 @@
 	  transport provides bulk endpoints to facilitate sending and receiving
 	  IPC Router data packets.
 
+config QRTR_FIFO
+	tristate "FIFO IPC Router channels"
+	help
+	Say Y here to support FIFO based ipcrouter channels. FIFO Transport
+	Layer enables IPC Router communication between two virtual machines.
+	The shared memory between virtual machines will be allocated by the
+	hypervisor and signal other VMs through virtualized interrupts.
+
 endif # QRTR
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
index d3c3a19..cae5493 100644
--- a/net/qrtr/Makefile
+++ b/net/qrtr/Makefile
@@ -9,3 +9,6 @@
 
 obj-$(CONFIG_QRTR_USB) += qrtr-usb.o
 qrtr-usb-y      := usb.o
+
+obj-$(CONFIG_QRTR_FIFO) += qrtr-fifo.o
+qrtr-fifo-y	:= fifo.o
diff --git a/net/qrtr/fifo.c b/net/qrtr/fifo.c
new file mode 100644
index 0000000..0a494a6
--- /dev/null
+++ b/net/qrtr/fifo.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <microvisor/microvisor.h>
+
+#include "qrtr.h"
+
+#define FIFO_MAGIC_KEY	0x24495043 /* "$IPC" */
+#define FIFO_SIZE	0x4000
+#define FIFO_0_START	0x1000
+#define FIFO_1_START	(FIFO_0_START + FIFO_SIZE)
+#define FIFO_MAGIC_IDX	0x0
+#define TAIL_0_IDX	0x1
+#define HEAD_0_IDX	0x2
+#define TAIL_1_IDX	0x3
+#define HEAD_1_IDX	0x4
+
+struct fifo_pipe {
+	__le32 *tail;
+	__le32 *head;
+
+	void *fifo;
+	size_t length;
+};
+
+/**
+ * qrtr_fifo_xprt - qrtr FIFO transport structure
+ * @ep: qrtr endpoint specific info.
+ * @tx_pipe: TX FIFO specific info.
+ * @rx_pipe: RX FIFO specific info.
+ * @fifo_base: Base of the shared FIFO.
+ * @fifo_size: FIFO Size.
+ * @tx_fifo_idx: TX FIFO index.
+ * @kcap: Register info to raise irq to other VM.
+ */
+struct qrtr_fifo_xprt {
+	struct qrtr_endpoint ep;
+	struct fifo_pipe tx_pipe;
+	struct fifo_pipe rx_pipe;
+	void *fifo_base;
+	size_t fifo_size;
+	int tx_fifo_idx;
+	okl4_kcap_t kcap;
+};
+
+static void qrtr_fifo_raise_virq(struct qrtr_fifo_xprt *xprtp);
+
+static size_t fifo_rx_avail(struct fifo_pipe *pipe)
+{
+	u32 head;
+	u32 tail;
+
+	head = le32_to_cpu(*pipe->head);
+	tail = le32_to_cpu(*pipe->tail);
+
+	if (head < tail)
+		return pipe->length - tail + head;
+
+	return head - tail;
+}
+
+static void fifo_rx_peak(struct fifo_pipe *pipe,
+			 void *data, unsigned int offset, size_t count)
+{
+	size_t len;
+	u32 tail;
+
+	tail = le32_to_cpu(*pipe->tail);
+	tail += offset;
+	if (tail >= pipe->length)
+		tail -= pipe->length;
+
+	len = min_t(size_t, count, pipe->length - tail);
+	if (len)
+		memcpy_fromio(data, pipe->fifo + tail, len);
+
+	if (len != count)
+		memcpy_fromio(data + len, pipe->fifo, (count - len));
+}
+
+static void fifo_rx_advance(struct fifo_pipe *pipe, size_t count)
+{
+	u32 tail;
+
+	tail = le32_to_cpu(*pipe->tail);
+
+	tail += count;
+	if (tail > pipe->length)
+		tail -= pipe->length;
+
+	*pipe->tail = cpu_to_le32(tail);
+}
+
+static size_t fifo_tx_avail(struct fifo_pipe *pipe)
+{
+	u32 head;
+	u32 tail;
+	u32 avail;
+
+	head = le32_to_cpu(*pipe->head);
+	tail = le32_to_cpu(*pipe->tail);
+
+	if (tail <= head)
+		avail = pipe->length - head + tail;
+	else
+		avail = tail - head;
+
+	return avail;
+}
+
+static void fifo_tx_write(struct fifo_pipe *pipe,
+			  const void *data, size_t count)
+{
+	size_t len;
+	u32 head;
+
+	head = le32_to_cpu(*pipe->head);
+
+	len = min_t(size_t, count, pipe->length - head);
+	if (len)
+		memcpy_toio(pipe->fifo + head, data, len);
+
+	if (len != count)
+		memcpy_toio(pipe->fifo, data + len, count - len);
+
+	head += count;
+	if (head >= pipe->length)
+		head -= pipe->length;
+
+	/* Ensure ordering of fifo and head update */
+	wmb();
+
+	*pipe->head = cpu_to_le32(head);
+}
+
+/* from qrtr to FIFO */
+static int xprt_write(struct qrtr_endpoint *ep, struct sk_buff *skb)
+{
+	struct qrtr_fifo_xprt *xprtp;
+	int rc;
+
+	xprtp = container_of(ep, struct qrtr_fifo_xprt, ep);
+
+	rc = skb_linearize(skb);
+	if (rc) {
+		kfree_skb(skb);
+		return rc;
+	}
+
+	if (fifo_tx_avail(&xprtp->tx_pipe) < skb->len) {
+		pr_err("No Space in FIFO\n");
+		return -EAGAIN;
+	}
+
+	fifo_tx_write(&xprtp->tx_pipe, skb->data, skb->len);
+	kfree_skb(skb);
+
+	qrtr_fifo_raise_virq(xprtp);
+
+	return 0;
+}
+
+static void xprt_read_data(struct qrtr_fifo_xprt *xprtp)
+{
+	int rc;
+	u32 hdr[8];
+	void *data;
+	size_t pkt_len;
+	size_t rx_avail;
+	size_t hdr_len = sizeof(hdr);
+
+	while (fifo_rx_avail(&xprtp->rx_pipe)) {
+		fifo_rx_peak(&xprtp->rx_pipe, &hdr, 0, hdr_len);
+		pkt_len = qrtr_peek_pkt_size((void *)&hdr);
+		if ((int)pkt_len < 0) {
+			pr_err("invalid pkt_len %zu\n", pkt_len);
+			break;
+		}
+
+		data = kzalloc(pkt_len, GFP_ATOMIC);
+		if (!data)
+			break;
+
+		rx_avail = fifo_rx_avail(&xprtp->rx_pipe);
+		if (rx_avail < pkt_len) {
+			pr_err_ratelimited("Not FULL pkt in FIFO %zu %zu\n",
+					   rx_avail, pkt_len);
+			break;
+		}
+
+		fifo_rx_peak(&xprtp->rx_pipe, data, 0, pkt_len);
+		fifo_rx_advance(&xprtp->rx_pipe, pkt_len);
+
+		rc = qrtr_endpoint_post(&xprtp->ep, data, pkt_len);
+		if (rc == -EINVAL)
+			pr_err("invalid ipcrouter packet\n");
+		kfree(data);
+		data = NULL;
+	}
+}
+
+static void qrtr_fifo_raise_virq(struct qrtr_fifo_xprt *xprtp)
+{
+	okl4_error_t err;
+	unsigned long payload = 0xffff;
+
+	err = _okl4_sys_vinterrupt_raise(xprtp->kcap, payload);
+}
+
+static irqreturn_t qrtr_fifo_virq_handler(int irq, void *dev_id)
+{
+	xprt_read_data((struct qrtr_fifo_xprt *)dev_id);
+	return IRQ_HANDLED;
+}
+
+/**
+ * qrtr_fifo_config_init() - init FIFO xprt configs
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the FIFO XPRT pointer with
+ * the FIFO XPRT configurations either from device tree or static arrays.
+ */
+static void qrtr_fifo_config_init(struct qrtr_fifo_xprt *xprtp)
+{
+	__le32 *descs;
+
+	descs = xprtp->fifo_base;
+	descs[FIFO_MAGIC_IDX] = FIFO_MAGIC_KEY;
+
+	if (xprtp->tx_fifo_idx) {
+		xprtp->tx_pipe.tail = &descs[TAIL_0_IDX];
+		xprtp->tx_pipe.head = &descs[HEAD_0_IDX];
+		xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+		xprtp->tx_pipe.length = FIFO_SIZE;
+
+		xprtp->rx_pipe.tail = &descs[TAIL_1_IDX];
+		xprtp->rx_pipe.head = &descs[HEAD_1_IDX];
+		xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+		xprtp->rx_pipe.length = FIFO_SIZE;
+	} else {
+		xprtp->tx_pipe.tail = &descs[TAIL_1_IDX];
+		xprtp->tx_pipe.head = &descs[HEAD_1_IDX];
+		xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+		xprtp->tx_pipe.length = FIFO_SIZE;
+
+		xprtp->rx_pipe.tail = &descs[TAIL_0_IDX];
+		xprtp->rx_pipe.head = &descs[HEAD_0_IDX];
+		xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+		xprtp->rx_pipe.length = FIFO_SIZE;
+	}
+
+	/* Reset respective index */
+	*xprtp->tx_pipe.head = 0;
+	*xprtp->rx_pipe.tail = 0;
+}
+
+/**
+ * qrtr_fifo_xprt_probe() - Probe an FIFO xprt
+ *
+ * @pdev: Platform device corresponding to FIFO xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an FIFO transport.
+ */
+static int qrtr_fifo_xprt_probe(struct platform_device *pdev)
+{
+	int irq;
+	int ret;
+	struct resource *r;
+	struct device *parent;
+	struct qrtr_fifo_xprt *xprtp;
+	struct device_node *ipc_irq_np;
+	struct device_node *ipc_shm_np;
+	struct platform_device *ipc_shm_dev;
+
+	xprtp = devm_kzalloc(&pdev->dev, sizeof(*xprtp), GFP_KERNEL);
+	if (!xprtp)
+		return -ENOMEM;
+
+	parent = &pdev->dev;
+	ipc_irq_np = parent->of_node;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return -ENODEV;
+
+	ret = devm_request_irq(parent, irq, qrtr_fifo_virq_handler,
+			       IRQF_TRIGGER_RISING, dev_name(parent),
+			       xprtp);
+	if (ret < 0)
+		return -ENODEV;
+
+	/* this kcap is required to raise VIRQ */
+	ret = of_property_read_u32(ipc_irq_np, "reg", &xprtp->kcap);
+	if (ret < 0)
+		return -ENODEV;
+
+	ipc_shm_np = of_parse_phandle(ipc_irq_np, "qcom,ipc-shm", 0);
+	if (!ipc_shm_np)
+		return -ENODEV;
+
+	ipc_shm_dev = of_find_device_by_node(ipc_shm_np);
+	if (!ipc_shm_dev) {
+		of_node_put(ipc_shm_np);
+		return -ENODEV;
+	}
+
+	r = platform_get_resource(ipc_shm_dev, IORESOURCE_MEM, 0);
+	if (!r) {
+		pr_err("failed to get shared FIFO\n");
+		of_node_put(ipc_shm_np);
+		return -ENODEV;
+	}
+
+	xprtp->tx_fifo_idx = of_property_read_bool(ipc_shm_np,
+						   "qcom,tx-is-first");
+	of_node_put(ipc_shm_np);
+
+	xprtp->fifo_size = resource_size(r);
+	xprtp->fifo_base = devm_ioremap_nocache(&pdev->dev, r->start,
+						resource_size(r));
+	if (!xprtp->fifo_base) {
+		pr_err("ioreamp_nocache() failed\n");
+		return -ENOMEM;
+	}
+	qrtr_fifo_config_init(xprtp);
+
+	xprtp->ep.xmit = xprt_write;
+	ret = qrtr_endpoint_register(&xprtp->ep, QRTR_EP_NID_AUTO);
+	if (ret)
+		return ret;
+
+	if (fifo_rx_avail(&xprtp->rx_pipe))
+		xprt_read_data(xprtp);
+
+	return 0;
+}
+
+static const struct of_device_id qrtr_fifo_xprt_match_table[] = {
+	{ .compatible = "qcom,ipcr-fifo-xprt" },
+	{},
+};
+
+static struct platform_driver qrtr_fifo_xprt_driver = {
+	.probe = qrtr_fifo_xprt_probe,
+	.driver = {
+		.name = "qcom_fifo_qrtr",
+		.of_match_table = qrtr_fifo_xprt_match_table,
+	 },
+};
+
+static int __init qrtr_fifo_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&qrtr_fifo_xprt_driver);
+	if (rc) {
+		pr_err("driver register failed %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+module_init(qrtr_fifo_xprt_init);
+MODULE_DESCRIPTION("QTI IPC-router FIFO XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index f18469c..5c3d455 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -36,6 +36,8 @@
 #define QRTR_MIN_EPH_SOCKET 0x4000
 #define QRTR_MAX_EPH_SOCKET 0x7fff
 
+#define QRTR_PORT_CTRL_LEGACY 0xffff
+
 /* qrtr socket states */
 #define QRTR_STATE_MULTI	-2
 #define QRTR_STATE_INIT	-1
@@ -528,14 +530,12 @@
 	hdr->type = cpu_to_le32(type);
 	hdr->src_node_id = cpu_to_le32(from->sq_node);
 	hdr->src_port_id = cpu_to_le32(from->sq_port);
-	if (to->sq_port == QRTR_PORT_CTRL) {
+	if (to->sq_node == QRTR_NODE_BCAST)
 		hdr->dst_node_id = cpu_to_le32(node->nid);
-		hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
-	} else {
+	else
 		hdr->dst_node_id = cpu_to_le32(to->sq_node);
-		hdr->dst_port_id = cpu_to_le32(to->sq_port);
-	}
 
+	hdr->dst_port_id = cpu_to_le32(to->sq_port);
 	hdr->size = cpu_to_le32(len);
 	hdr->confirm_rx = !!confirm_rx;
 
@@ -617,6 +617,48 @@
 }
 
 /**
+ * qrtr_peek_pkt_size() - Peek into the packet header to get potential pkt size
+ *
+ * @data: Starting address of the packet which points to router header.
+ *
+ * @returns: potential packet size on success, < 0 on error.
+ *
+ * This function is used by the underlying transport abstraction layer to
+ * peek into the potential packet size of an incoming packet. This information
+ * is used to perform link layer fragmentation and re-assembly
+ */
+int qrtr_peek_pkt_size(const void *data)
+{
+	const struct qrtr_hdr_v1 *v1;
+	const struct qrtr_hdr_v2 *v2;
+	unsigned int hdrlen;
+	unsigned int size;
+	unsigned int ver;
+
+	/* Version field in v1 is little endian, so this works for both cases */
+	ver = *(u8 *)data;
+
+	switch (ver) {
+	case QRTR_PROTO_VER_1:
+		v1 = data;
+		hdrlen = sizeof(*v1);
+		size = le32_to_cpu(v1->size);
+		break;
+	case QRTR_PROTO_VER_2:
+		v2 = data;
+		hdrlen = sizeof(*v2) + v2->optlen;
+		size = le32_to_cpu(v2->size);
+		break;
+	default:
+		pr_err("qrtr: Invalid version %d\n", ver);
+		return -EINVAL;
+	}
+
+	return ALIGN(size, 4) + hdrlen;
+}
+EXPORT_SYMBOL(qrtr_peek_pkt_size);
+
+/**
  * qrtr_endpoint_post() - post incoming data
  * @ep: endpoint handle
  * @data: data pointer
@@ -684,6 +726,9 @@
 		goto err;
 	}
 
+	if (cb->dst_port == QRTR_PORT_CTRL_LEGACY)
+		cb->dst_port = QRTR_PORT_CTRL;
+
 	if (len != ALIGN(size, 4) + hdrlen)
 		goto err;
 
@@ -733,49 +778,44 @@
 static struct qrtr_sock *qrtr_port_lookup(int port);
 static void qrtr_port_put(struct qrtr_sock *ipc);
 
-static bool qrtr_must_forward(u32 src_nid, u32 dst_nid, u32 type)
+static bool qrtr_must_forward(struct qrtr_node *src,
+			      struct qrtr_node *dst, u32 type)
 {
-	struct qrtr_node *dst;
-	struct qrtr_node *src;
-	bool ret = false;
-
-	if (src_nid == qrtr_local_nid)
+	/* Node structure is not maintained for local processor.
+	 * Hence src is null in that case.
+	 */
+	if (!src)
 		return true;
 
-	if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
-		return ret;
+	if (!dst)
+		return false;
 
-	dst = qrtr_node_lookup(dst_nid);
-	src = qrtr_node_lookup(src_nid);
-	if (!dst || !src)
-		goto out;
-	if (dst == src)
-		goto out;
-	if (dst->nid == QRTR_EP_NID_AUTO)
-		goto out;
+	if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
+		return false;
+
+	if (dst == src || dst->nid == QRTR_EP_NID_AUTO)
+		return false;
 
 	if (abs(dst->net_id - src->net_id) > 1)
-		ret = true;
+		return true;
 
-out:
-	qrtr_node_release(dst);
-	qrtr_node_release(src);
-
-	return ret;
+	return false;
 }
 
 static void qrtr_fwd_ctrl_pkt(struct sk_buff *skb)
 {
 	struct qrtr_node *node;
+	struct qrtr_node *src;
 	struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
 
+	src = qrtr_node_lookup(cb->src_node);
 	down_read(&qrtr_node_lock);
 	list_for_each_entry(node, &qrtr_all_epts, item) {
 		struct sockaddr_qrtr from;
 		struct sockaddr_qrtr to;
 		struct sk_buff *skbn;
 
-		if (!qrtr_must_forward(cb->src_node, node->nid, cb->type))
+		if (!qrtr_must_forward(src, node, cb->type))
 			continue;
 
 		skbn = skb_clone(skb, GFP_KERNEL);
@@ -793,6 +833,7 @@
 		qrtr_node_enqueue(node, skbn, cb->type, &from, &to, 0);
 	}
 	up_read(&qrtr_node_lock);
+	qrtr_node_release(src);
 }
 
 static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb)
@@ -945,7 +986,7 @@
 	struct sk_buff *skb;
 
 	list_for_each_entry(dst, &qrtr_all_epts, item) {
-		if (!qrtr_must_forward(nid, dst->nid, QRTR_TYPE_DEL_PROC))
+		if (!qrtr_must_forward(src, dst, QRTR_TYPE_DEL_PROC))
 			continue;
 
 		skb = qrtr_alloc_ctrl_packet(&pkt);
@@ -1152,7 +1193,8 @@
 
 		sock_hold(&ipc->sk);
 		ipc->sk.sk_err = ENETRESET;
-		ipc->sk.sk_error_report(&ipc->sk);
+		if (ipc->sk.sk_error_report)
+			ipc->sk.sk_error_report(&ipc->sk);
 		sock_put(&ipc->sk);
 	}
 }
@@ -1253,7 +1295,8 @@
 	if (sk && sk->sk_err == ENETRESET) {
 		sock_hold(sk);
 		sk->sk_err = ENETRESET;
-		sk->sk_error_report(sk);
+		if (sk->sk_error_report)
+			sk->sk_error_report(sk);
 		sock_put(sk);
 		kfree_skb(skb);
 		return 0;
@@ -1308,6 +1351,7 @@
 	struct sock *sk = sock->sk;
 	struct qrtr_ctrl_pkt *pkt;
 	struct qrtr_node *node;
+	struct qrtr_node *srv_node;
 	struct sk_buff *skb;
 	size_t plen;
 	u32 type = QRTR_TYPE_DATA;
@@ -1345,6 +1389,7 @@
 	}
 
 	node = NULL;
+	srv_node = NULL;
 	if (addr->sq_node == QRTR_NODE_BCAST) {
 		enqueue_fn = qrtr_bcast_enqueue;
 		if (addr->sq_port != QRTR_PORT_CTRL) {
@@ -1398,11 +1443,14 @@
 
 		/* drop new server cmds that are not forwardable to dst node*/
 		pkt = (struct qrtr_ctrl_pkt *)skb->data;
-		if (!qrtr_must_forward(pkt->server.node, addr->sq_node, type)) {
+		srv_node = qrtr_node_lookup(pkt->server.node);
+		if (!qrtr_must_forward(srv_node, node, type)) {
 			rc = 0;
 			kfree_skb(skb);
+			qrtr_node_release(srv_node);
 			goto out_node;
 		}
+		qrtr_node_release(srv_node);
 	}
 
 	rc = enqueue_fn(node, skb, type, &ipc->us, addr, msg->msg_flags);
diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h
index b80a149..f9aede4 100644
--- a/net/qrtr/qrtr.h
+++ b/net/qrtr/qrtr.h
@@ -32,4 +32,5 @@
 
 int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
 
+int qrtr_peek_pkt_size(const void *data);
 #endif
diff --git a/net/qrtr/usb.c b/net/qrtr/usb.c
index af21585..fd71df9 100644
--- a/net/qrtr/usb.c
+++ b/net/qrtr/usb.c
@@ -290,6 +290,7 @@
 static const struct usb_device_id qcom_usb_qrtr_ids[] = {
 	{ USB_DEVICE_INTERFACE_NUMBER(QRTR_VENDOR_ID, 0x90ef, 3) },
 	{ USB_DEVICE_INTERFACE_NUMBER(QRTR_VENDOR_ID, 0x90f0, 3) },
+	{ USB_DEVICE_INTERFACE_NUMBER(QRTR_VENDOR_ID, 0x90f3, 2) },
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, qcom_usb_qrtr_ids);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 762d2c6..17c9d9f 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -78,10 +78,10 @@
 	__rds_create_bind_key(key, addr, port, scope_id);
 	rcu_read_lock();
 	rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
-	if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
-		rds_sock_addref(rs);
-	else
+	if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
+		   !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
 		rs = NULL;
+
 	rcu_read_unlock();
 
 	rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 77e9f85..f2ff21d 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -850,6 +850,7 @@
 
 /*
  *	Route a frame to an appropriate AX.25 connection.
+ *	A NULL ax25_cb indicates an internally generated frame.
  */
 int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
 {
@@ -867,6 +868,10 @@
 
 	if (skb->len < ROSE_MIN_LEN)
 		return res;
+
+	if (!ax25)
+		return rose_loopback_queue(skb, NULL);
+
 	frametype = skb->data[2];
 	lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
 	if (frametype == ROSE_CALL_REQUEST &&
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 521189f..6e419b1 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -353,7 +353,7 @@
 	 * normally have to take channel_lock but we do this before anyone else
 	 * can see the connection.
 	 */
-	list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
+	list_add(&call->chan_wait_link, &candidate->waiting_calls);
 
 	if (cp->exclusive) {
 		call->conn = candidate;
@@ -432,7 +432,7 @@
 	call->conn = conn;
 	call->security_ix = conn->security_ix;
 	call->service_id = conn->service_id;
-	list_add(&call->chan_wait_link, &conn->waiting_calls);
+	list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
 	spin_unlock(&conn->channel_lock);
 	_leave(" = 0 [extant %d]", conn->debug_id);
 	return 0;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 816b19a..0374b06 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -596,6 +596,7 @@
 	}
 error_no_call:
 	release_sock(&rx->sk);
+error_trace:
 	trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
 	return ret;
 
@@ -604,7 +605,7 @@
 wait_error:
 	finish_wait(sk_sleep(&rx->sk), &wait);
 	call = NULL;
-	goto error_no_call;
+	goto error_trace;
 }
 
 /**
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8525de81..334f3a05 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -199,8 +199,7 @@
 err2:
 	kfree(tname);
 err1:
-	if (ret == ACT_P_CREATED)
-		tcf_idr_release(*a, bind);
+	tcf_idr_release(*a, bind);
 	return err;
 }
 
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 73e44ce..86d90fc 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -191,8 +191,7 @@
 
 	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
 	if (unlikely(!params_new)) {
-		if (ret == ACT_P_CREATED)
-			tcf_idr_release(*a, bind);
+		tcf_idr_release(*a, bind);
 		return -ENOMEM;
 	}
 
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 681f6f0..72d9c43 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -197,6 +197,15 @@
 	[TCA_TUNNEL_KEY_ENC_TTL]      = { .type = NLA_U8 },
 };
 
+static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
+{
+	if (!p)
+		return;
+	if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+		dst_release(&p->tcft_enc_metadata->dst);
+	kfree_rcu(p, rcu);
+}
+
 static int tunnel_key_init(struct net *net, struct nlattr *nla,
 			   struct nlattr *est, struct tc_action **a,
 			   int ovr, int bind, bool rtnl_held,
@@ -360,8 +369,7 @@
 	rcu_swap_protected(t->params, params_new,
 			   lockdep_is_held(&t->tcf_lock));
 	spin_unlock_bh(&t->tcf_lock);
-	if (params_new)
-		kfree_rcu(params_new, rcu);
+	tunnel_key_release_params(params_new);
 
 	if (ret == ACT_P_CREATED)
 		tcf_idr_insert(tn, *a);
@@ -369,7 +377,8 @@
 	return ret;
 
 release_tun_meta:
-	dst_release(&metadata->dst);
+	if (metadata)
+		dst_release(&metadata->dst);
 
 err_out:
 	if (exists)
@@ -385,12 +394,7 @@
 	struct tcf_tunnel_key_params *params;
 
 	params = rcu_dereference_protected(t->params, 1);
-	if (params) {
-		if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
-			dst_release(&params->tcft_enc_metadata->dst);
-
-		kfree_rcu(params, rcu);
-	}
+	tunnel_key_release_params(params);
 }
 
 static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 70f144a..2167c6c 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -960,7 +960,6 @@
 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 		 struct tcf_result *res, bool compat_mode)
 {
-	__be16 protocol = tc_skb_protocol(skb);
 #ifdef CONFIG_NET_CLS_ACT
 	const int max_reclassify_loop = 4;
 	const struct tcf_proto *orig_tp = tp;
@@ -970,6 +969,7 @@
 reclassify:
 #endif
 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
+		__be16 protocol = tc_skb_protocol(skb);
 		int err;
 
 		if (tp->protocol != protocol &&
@@ -1002,7 +1002,6 @@
 	}
 
 	tp = first_tp;
-	protocol = tc_skb_protocol(skb);
 	goto reclassify;
 #endif
 }
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 7fade71..09b3597 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1176,17 +1176,23 @@
 	struct cls_fl_head *head = rtnl_dereference(tp->root);
 	struct cls_fl_filter *fold = *arg;
 	struct cls_fl_filter *fnew;
+	struct fl_flow_mask *mask;
 	struct nlattr **tb;
-	struct fl_flow_mask mask = {};
 	int err;
 
 	if (!tca[TCA_OPTIONS])
 		return -EINVAL;
 
-	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
-	if (!tb)
+	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
+	if (!mask)
 		return -ENOBUFS;
 
+	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
+	if (!tb) {
+		err = -ENOBUFS;
+		goto errout_mask_alloc;
+	}
+
 	err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
 			       fl_policy, NULL);
 	if (err < 0)
@@ -1207,6 +1213,24 @@
 	if (err < 0)
 		goto errout;
 
+	if (tb[TCA_FLOWER_FLAGS]) {
+		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
+
+		if (!tc_flags_valid(fnew->flags)) {
+			err = -EINVAL;
+			goto errout;
+		}
+	}
+
+	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
+			   tp->chain->tmplt_priv, extack);
+	if (err)
+		goto errout;
+
+	err = fl_check_assign_mask(head, fnew, fold, mask);
+	if (err)
+		goto errout;
+
 	if (!handle) {
 		handle = 1;
 		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
@@ -1217,37 +1241,19 @@
 				    handle, GFP_KERNEL);
 	}
 	if (err)
-		goto errout;
+		goto errout_mask;
 	fnew->handle = handle;
 
-	if (tb[TCA_FLOWER_FLAGS]) {
-		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
-
-		if (!tc_flags_valid(fnew->flags)) {
-			err = -EINVAL;
-			goto errout_idr;
-		}
-	}
-
-	err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
-			   tp->chain->tmplt_priv, extack);
-	if (err)
-		goto errout_idr;
-
-	err = fl_check_assign_mask(head, fnew, fold, &mask);
-	if (err)
-		goto errout_idr;
-
 	if (!tc_skip_sw(fnew->flags)) {
 		if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
 			err = -EEXIST;
-			goto errout_mask;
+			goto errout_idr;
 		}
 
 		err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
 					     fnew->mask->filter_ht_params);
 		if (err)
-			goto errout_mask;
+			goto errout_idr;
 	}
 
 	if (!tc_skip_hw(fnew->flags)) {
@@ -1281,19 +1287,23 @@
 	}
 
 	kfree(tb);
+	kfree(mask);
 	return 0;
 
-errout_mask:
-	fl_mask_put(head, fnew->mask, false);
-
 errout_idr:
 	if (!fold)
 		idr_remove(&head->handle_idr, fnew->handle);
+
+errout_mask:
+	fl_mask_put(head, fnew->mask, false);
+
 errout:
 	tcf_exts_destroy(&fnew->exts);
 	kfree(fnew);
 errout_tb:
 	kfree(tb);
+errout_mask_alloc:
+	kfree(mask);
 	return err;
 }
 
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 9ccc93f..38bb882 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -48,7 +48,7 @@
 	u32 hash;		/* hash table size; 0 if undefined */
 	u32 alloc_hash;		/* allocated size */
 	u32 fall_through;	/* 0: only classify if explicit match */
-	struct rcu_head rcu;
+	struct rcu_work rwork;
 };
 
 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
@@ -221,17 +221,11 @@
 	return 0;
 }
 
-static int tcindex_destroy_element(struct tcf_proto *tp,
-				   void *arg, struct tcf_walker *walker)
+static void tcindex_destroy_work(struct work_struct *work)
 {
-	bool last;
-
-	return tcindex_delete(tp, arg, &last, NULL);
-}
-
-static void __tcindex_destroy(struct rcu_head *head)
-{
-	struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+	struct tcindex_data *p = container_of(to_rcu_work(work),
+					      struct tcindex_data,
+					      rwork);
 
 	kfree(p->perfect);
 	kfree(p->h);
@@ -258,9 +252,11 @@
 	return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
 }
 
-static void __tcindex_partial_destroy(struct rcu_head *head)
+static void tcindex_partial_destroy_work(struct work_struct *work)
 {
-	struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+	struct tcindex_data *p = container_of(to_rcu_work(work),
+					      struct tcindex_data,
+					      rwork);
 
 	kfree(p->perfect);
 	kfree(p);
@@ -275,7 +271,7 @@
 	kfree(cp->perfect);
 }
 
-static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
+static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
 {
 	int i, err = 0;
 
@@ -289,6 +285,9 @@
 				    TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
 		if (err < 0)
 			goto errout;
+#ifdef CONFIG_NET_CLS_ACT
+		cp->perfect[i].exts.net = net;
+#endif
 	}
 
 	return 0;
@@ -305,9 +304,9 @@
 		  struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
 {
 	struct tcindex_filter_result new_filter_result, *old_r = r;
-	struct tcindex_filter_result cr;
 	struct tcindex_data *cp = NULL, *oldp;
 	struct tcindex_filter *f = NULL; /* make gcc behave */
+	struct tcf_result cr = {};
 	int err, balloc = 0;
 	struct tcf_exts e;
 
@@ -337,7 +336,7 @@
 	if (p->perfect) {
 		int i;
 
-		if (tcindex_alloc_perfect_hash(cp) < 0)
+		if (tcindex_alloc_perfect_hash(net, cp) < 0)
 			goto errout;
 		for (i = 0; i < cp->hash; i++)
 			cp->perfect[i].res = p->perfect[i].res;
@@ -348,11 +347,8 @@
 	err = tcindex_filter_result_init(&new_filter_result);
 	if (err < 0)
 		goto errout1;
-	err = tcindex_filter_result_init(&cr);
-	if (err < 0)
-		goto errout1;
 	if (old_r)
-		cr.res = r->res;
+		cr = r->res;
 
 	if (tb[TCA_TCINDEX_HASH])
 		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -406,7 +402,7 @@
 	err = -ENOMEM;
 	if (!cp->perfect && !cp->h) {
 		if (valid_perfect_hash(cp)) {
-			if (tcindex_alloc_perfect_hash(cp) < 0)
+			if (tcindex_alloc_perfect_hash(net, cp) < 0)
 				goto errout_alloc;
 			balloc = 1;
 		} else {
@@ -443,8 +439,8 @@
 	}
 
 	if (tb[TCA_TCINDEX_CLASSID]) {
-		cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
-		tcf_bind_filter(tp, &cr.res, base);
+		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
+		tcf_bind_filter(tp, &cr, base);
 	}
 
 	if (old_r && old_r != r) {
@@ -456,7 +452,7 @@
 	}
 
 	oldp = p;
-	r->res = cr.res;
+	r->res = cr;
 	tcf_exts_change(&r->exts, &e);
 
 	rcu_assign_pointer(tp->root, cp);
@@ -475,10 +471,12 @@
 				; /* nothing */
 
 		rcu_assign_pointer(*fp, f);
+	} else {
+		tcf_exts_destroy(&new_filter_result.exts);
 	}
 
 	if (oldp)
-		call_rcu(&oldp->rcu, __tcindex_partial_destroy);
+		tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
 	return 0;
 
 errout_alloc:
@@ -487,7 +485,6 @@
 	else if (balloc == 2)
 		kfree(cp->h);
 errout1:
-	tcf_exts_destroy(&cr.exts);
 	tcf_exts_destroy(&new_filter_result.exts);
 errout:
 	kfree(cp);
@@ -562,15 +559,34 @@
 			    struct netlink_ext_ack *extack)
 {
 	struct tcindex_data *p = rtnl_dereference(tp->root);
-	struct tcf_walker walker;
+	int i;
 
 	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
-	walker.count = 0;
-	walker.skip = 0;
-	walker.fn = tcindex_destroy_element;
-	tcindex_walk(tp, &walker);
 
-	call_rcu(&p->rcu, __tcindex_destroy);
+	if (p->perfect) {
+		for (i = 0; i < p->hash; i++) {
+			struct tcindex_filter_result *r = p->perfect + i;
+
+			tcf_unbind_filter(tp, &r->res);
+			if (tcf_exts_get_net(&r->exts))
+				tcf_queue_work(&r->rwork,
+					       tcindex_destroy_rexts_work);
+			else
+				__tcindex_destroy_rexts(r);
+		}
+	}
+
+	for (i = 0; p->h && i < p->hash; i++) {
+		struct tcindex_filter *f, *next;
+		bool last;
+
+		for (f = rtnl_dereference(p->h[i]); f; f = next) {
+			next = rtnl_dereference(f->next);
+			tcindex_delete(tp, &f->result, &last, NULL);
+		}
+	}
+
+	tcf_queue_work(&p->rwork, tcindex_destroy_work);
 }
 
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 69078c8..77b289d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -68,7 +68,7 @@
 			skb = __skb_dequeue(&q->skb_bad_txq);
 			if (qdisc_is_percpu_stats(q)) {
 				qdisc_qstats_cpu_backlog_dec(q, skb);
-				qdisc_qstats_cpu_qlen_dec(q);
+				qdisc_qstats_atomic_qlen_dec(q);
 			} else {
 				qdisc_qstats_backlog_dec(q, skb);
 				q->q.qlen--;
@@ -108,7 +108,7 @@
 
 	if (qdisc_is_percpu_stats(q)) {
 		qdisc_qstats_cpu_backlog_inc(q, skb);
-		qdisc_qstats_cpu_qlen_inc(q);
+		qdisc_qstats_atomic_qlen_inc(q);
 	} else {
 		qdisc_qstats_backlog_inc(q, skb);
 		q->q.qlen++;
@@ -147,7 +147,7 @@
 
 		qdisc_qstats_cpu_requeues_inc(q);
 		qdisc_qstats_cpu_backlog_inc(q, skb);
-		qdisc_qstats_cpu_qlen_inc(q);
+		qdisc_qstats_atomic_qlen_inc(q);
 
 		skb = next;
 	}
@@ -252,7 +252,7 @@
 			skb = __skb_dequeue(&q->gso_skb);
 			if (qdisc_is_percpu_stats(q)) {
 				qdisc_qstats_cpu_backlog_dec(q, skb);
-				qdisc_qstats_cpu_qlen_dec(q);
+				qdisc_qstats_atomic_qlen_dec(q);
 			} else {
 				qdisc_qstats_backlog_dec(q, skb);
 				q->q.qlen--;
@@ -633,7 +633,7 @@
 	if (unlikely(err))
 		return qdisc_drop_cpu(skb, qdisc, to_free);
 
-	qdisc_qstats_cpu_qlen_inc(qdisc);
+	qdisc_qstats_atomic_qlen_inc(qdisc);
 	/* Note: skb can not be used after skb_array_produce(),
 	 * so we better not use qdisc_qstats_cpu_backlog_inc()
 	 */
@@ -658,7 +658,7 @@
 	if (likely(skb)) {
 		qdisc_qstats_cpu_backlog_dec(qdisc, skb);
 		qdisc_bstats_cpu_update(qdisc, skb);
-		qdisc_qstats_cpu_qlen_dec(qdisc);
+		qdisc_qstats_atomic_qlen_dec(qdisc);
 	}
 
 	return skb;
@@ -702,7 +702,6 @@
 		struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
 
 		q->backlog = 0;
-		q->qlen = 0;
 	}
 }
 
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 74c0f65..4dfe10b9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -440,6 +440,7 @@
 	int nb = 0;
 	int count = 1;
 	int rc = NET_XMIT_SUCCESS;
+	int rc_drop = NET_XMIT_DROP;
 
 	/* Do not fool qdisc_drop_all() */
 	skb->prev = NULL;
@@ -479,6 +480,7 @@
 		q->duplicate = 0;
 		rootq->enqueue(skb2, rootq, to_free);
 		q->duplicate = dupsave;
+		rc_drop = NET_XMIT_SUCCESS;
 	}
 
 	/*
@@ -491,7 +493,7 @@
 		if (skb_is_gso(skb)) {
 			segs = netem_segment(skb, sch, to_free);
 			if (!segs)
-				return NET_XMIT_DROP;
+				return rc_drop;
 		} else {
 			segs = skb;
 		}
@@ -514,8 +516,10 @@
 			1<<(prandom_u32() % 8);
 	}
 
-	if (unlikely(sch->q.qlen >= sch->limit))
-		return qdisc_drop_all(skb, sch, to_free);
+	if (unlikely(sch->q.qlen >= sch->limit)) {
+		qdisc_drop_all(skb, sch, to_free);
+		return rc_drop;
+	}
 
 	qdisc_qstats_backlog_inc(sch, skb);
 
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 078f01a..435847d 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -256,6 +256,7 @@
 		+ nla_total_size(1) /* INET_DIAG_TOS */
 		+ nla_total_size(1) /* INET_DIAG_TCLASS */
 		+ nla_total_size(4) /* INET_DIAG_MARK */
+		+ nla_total_size(4) /* INET_DIAG_CLASS_ID */
 		+ nla_total_size(addrlen * asoc->peer.transport_count)
 		+ nla_total_size(addrlen * addrcnt)
 		+ nla_total_size(sizeof(struct inet_diag_meminfo))
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index fc6c5e4..4fede55 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -97,10 +97,9 @@
 
 	switch (ev) {
 	case NETDEV_UP:
-		addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
 		if (addr) {
 			addr->a.v6.sin6_family = AF_INET6;
-			addr->a.v6.sin6_port = 0;
 			addr->a.v6.sin6_addr = ifa->addr;
 			addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
 			addr->valid = 1;
@@ -278,7 +277,8 @@
 
 	if (saddr) {
 		fl6->saddr = saddr->v6.sin6_addr;
-		fl6->fl6_sport = saddr->v6.sin6_port;
+		if (!fl6->fl6_sport)
+			fl6->fl6_sport = saddr->v6.sin6_port;
 
 		pr_debug("src=%pI6 - ", &fl6->saddr);
 	}
@@ -430,7 +430,6 @@
 		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
 		if (addr) {
 			addr->a.v6.sin6_family = AF_INET6;
-			addr->a.v6.sin6_port = 0;
 			addr->a.v6.sin6_addr = ifp->addr;
 			addr->a.v6.sin6_scope_id = dev->ifindex;
 			addr->valid = 1;
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 123e9f2..edfcf16 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -36,6 +36,7 @@
 {
 	skb->ip_summed = CHECKSUM_NONE;
 	skb->csum_not_inet = 0;
+	gso_reset_checksum(skb, ~0);
 	return sctp_compute_cksum(skb, skb_transport_offset(skb));
 }
 
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e948db2..1c9f079 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -101,7 +101,6 @@
 		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
 		if (addr) {
 			addr->a.v4.sin_family = AF_INET;
-			addr->a.v4.sin_port = 0;
 			addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
 			addr->valid = 1;
 			INIT_LIST_HEAD(&addr->list);
@@ -441,7 +440,8 @@
 	}
 	if (saddr) {
 		fl4->saddr = saddr->v4.sin_addr.s_addr;
-		fl4->fl4_sport = saddr->v4.sin_port;
+		if (!fl4->fl4_sport)
+			fl4->fl4_sport = saddr->v4.sin_port;
 	}
 
 	pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
@@ -776,10 +776,9 @@
 
 	switch (ev) {
 	case NETDEV_UP:
-		addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
 		if (addr) {
 			addr->a.v4.sin_family = AF_INET;
-			addr->a.v4.sin_port = 0;
 			addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
 			addr->valid = 1;
 			spin_lock_bh(&net->sctp.local_addr_lock);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index f4ac6c5..d05c576 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -495,7 +495,10 @@
 	 *
 	 * [INIT ACK back to where the INIT came from.]
 	 */
-	retval->transport = chunk->transport;
+	if (chunk->transport)
+		retval->transport =
+			sctp_assoc_lookup_paddr(asoc,
+						&chunk->transport->ipaddr);
 
 	retval->subh.init_hdr =
 		sctp_addto_chunk(retval, sizeof(initack), &initack);
@@ -642,8 +645,10 @@
 	 *
 	 * [COOKIE ACK back to where the COOKIE ECHO came from.]
 	 */
-	if (retval && chunk)
-		retval->transport = chunk->transport;
+	if (retval && chunk && chunk->transport)
+		retval->transport =
+			sctp_assoc_lookup_paddr(asoc,
+						&chunk->transport->ipaddr);
 
 	return retval;
 }
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 876393c..1b16250 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1884,6 +1884,7 @@
 
 		pr_debug("%s: aborting association:%p\n", __func__, asoc);
 		sctp_primitive_ABORT(net, asoc, chunk);
+		iov_iter_revert(&msg->msg_iter, msg_len);
 
 		return 0;
 	}
@@ -2045,7 +2046,7 @@
 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
 	struct sctp_transport *transport = NULL;
 	struct sctp_sndrcvinfo _sinfo, *sinfo;
-	struct sctp_association *asoc;
+	struct sctp_association *asoc, *tmp;
 	struct sctp_cmsgs cmsgs;
 	union sctp_addr *daddr;
 	bool new = false;
@@ -2071,7 +2072,7 @@
 
 	/* SCTP_SENDALL process */
 	if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
-		list_for_each_entry(asoc, &ep->asocs, asocs) {
+		list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
 			err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
 							msg_len);
 			if (err == 0)
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 3892e76..3b47457 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -84,6 +84,19 @@
 	}
 }
 
+static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
+{
+	size_t index = 0;
+
+	while (count--) {
+		if (elem == flex_array_get(fa, index))
+			break;
+		index++;
+	}
+
+	return index;
+}
+
 /* Migrates chunks from stream queues to new stream queues if needed,
  * but not across associations. Also, removes those chunks to streams
  * higher than the new max.
@@ -131,8 +144,10 @@
 		}
 	}
 
-	for (i = outcnt; i < stream->outcnt; i++)
+	for (i = outcnt; i < stream->outcnt; i++) {
 		kfree(SCTP_SO(stream, i)->ext);
+		SCTP_SO(stream, i)->ext = NULL;
+	}
 }
 
 static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
@@ -147,6 +162,13 @@
 
 	if (stream->out) {
 		fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
+		if (stream->out_curr) {
+			size_t index = fa_index(stream->out, stream->out_curr,
+						stream->outcnt);
+
+			BUG_ON(index == stream->outcnt);
+			stream->out_curr = flex_array_get(out, index);
+		}
 		fa_free(stream->out);
 	}
 
@@ -208,8 +230,6 @@
 	for (i = 0; i < stream->outcnt; i++)
 		SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
 
-	sched->init(stream);
-
 in:
 	sctp_stream_interleave_init(stream);
 	if (!incnt)
@@ -585,9 +605,9 @@
 	struct sctp_strreset_outreq *outreq = param.v;
 	struct sctp_stream *stream = &asoc->stream;
 	__u32 result = SCTP_STRRESET_DENIED;
-	__u16 i, nums, flags = 0;
 	__be16 *str_p = NULL;
 	__u32 request_seq;
+	__u16 i, nums;
 
 	request_seq = ntohl(outreq->request_seq);
 
@@ -615,6 +635,15 @@
 	if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
 		goto out;
 
+	nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
+	str_p = outreq->list_of_streams;
+	for (i = 0; i < nums; i++) {
+		if (ntohs(str_p[i]) >= stream->incnt) {
+			result = SCTP_STRRESET_ERR_WRONG_SSN;
+			goto out;
+		}
+	}
+
 	if (asoc->strreset_chunk) {
 		if (!sctp_chunk_lookup_strreset_param(
 				asoc, outreq->response_seq,
@@ -637,32 +666,19 @@
 			sctp_chunk_put(asoc->strreset_chunk);
 			asoc->strreset_chunk = NULL;
 		}
-
-		flags = SCTP_STREAM_RESET_INCOMING_SSN;
 	}
 
-	nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
-	if (nums) {
-		str_p = outreq->list_of_streams;
-		for (i = 0; i < nums; i++) {
-			if (ntohs(str_p[i]) >= stream->incnt) {
-				result = SCTP_STRRESET_ERR_WRONG_SSN;
-				goto out;
-			}
-		}
-
+	if (nums)
 		for (i = 0; i < nums; i++)
 			SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
-	} else {
+	else
 		for (i = 0; i < stream->incnt; i++)
 			SCTP_SI(stream, i)->mid = 0;
-	}
 
 	result = SCTP_STRRESET_PERFORMED;
 
 	*evp = sctp_ulpevent_make_stream_reset_event(asoc,
-		flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p,
-		GFP_ATOMIC);
+		SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
 
 out:
 	sctp_update_strreset_result(asoc, result);
@@ -738,9 +754,6 @@
 
 	result = SCTP_STRRESET_PERFORMED;
 
-	*evp = sctp_ulpevent_make_stream_reset_event(asoc,
-		SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
-
 out:
 	sctp_update_strreset_result(asoc, result);
 err:
@@ -873,6 +886,14 @@
 	if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
 		goto out;
 
+	in = ntohs(addstrm->number_of_streams);
+	incnt = stream->incnt + in;
+	if (!in || incnt > SCTP_MAX_STREAM)
+		goto out;
+
+	if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
+		goto out;
+
 	if (asoc->strreset_chunk) {
 		if (!sctp_chunk_lookup_strreset_param(
 			asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
@@ -896,14 +917,6 @@
 		}
 	}
 
-	in = ntohs(addstrm->number_of_streams);
-	incnt = stream->incnt + in;
-	if (!in || incnt > SCTP_MAX_STREAM)
-		goto out;
-
-	if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
-		goto out;
-
 	stream->incnt = incnt;
 
 	result = SCTP_STRRESET_PERFORMED;
@@ -973,9 +986,6 @@
 
 	result = SCTP_STRRESET_PERFORMED;
 
-	*evp = sctp_ulpevent_make_stream_change_event(asoc,
-		0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
-
 out:
 	sctp_update_strreset_result(asoc, result);
 err:
@@ -1036,10 +1046,10 @@
 					sout->mid_uo = 0;
 				}
 			}
-
-			flags = SCTP_STREAM_RESET_OUTGOING_SSN;
 		}
 
+		flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
+
 		for (i = 0; i < stream->outcnt; i++)
 			SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
 
@@ -1058,6 +1068,8 @@
 		nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
 		       sizeof(__u16);
 
+		flags |= SCTP_STREAM_RESET_INCOMING_SSN;
+
 		*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
 			nums, str_p, GFP_ATOMIC);
 	} else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 80e2119..e6e506b 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -144,9 +144,18 @@
 		sock_set_flag(sk, SOCK_DEAD);
 		sk->sk_shutdown |= SHUTDOWN_MASK;
 	}
+
+	sk->sk_prot->unhash(sk);
+
 	if (smc->clcsock) {
+		if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
+			/* wake up clcsock accept */
+			rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+		}
+		mutex_lock(&smc->clcsock_release_lock);
 		sock_release(smc->clcsock);
 		smc->clcsock = NULL;
+		mutex_unlock(&smc->clcsock_release_lock);
 	}
 	if (smc->use_fallback) {
 		if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
@@ -162,7 +171,6 @@
 		smc_conn_free(&smc->conn);
 	release_sock(sk);
 
-	sk->sk_prot->unhash(sk);
 	sock_put(sk); /* final sock_put */
 out:
 	return rc;
@@ -203,6 +211,7 @@
 	spin_lock_init(&smc->conn.send_lock);
 	sk->sk_prot->hash(sk);
 	sk_refcnt_debug_inc(sk);
+	mutex_init(&smc->clcsock_release_lock);
 
 	return sk;
 }
@@ -818,7 +827,7 @@
 	struct socket *new_clcsock = NULL;
 	struct sock *lsk = &lsmc->sk;
 	struct sock *new_sk;
-	int rc;
+	int rc = -EINVAL;
 
 	release_sock(lsk);
 	new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
@@ -831,7 +840,10 @@
 	}
 	*new_smc = smc_sk(new_sk);
 
-	rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+	mutex_lock(&lsmc->clcsock_release_lock);
+	if (lsmc->clcsock)
+		rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+	mutex_unlock(&lsmc->clcsock_release_lock);
 	lock_sock(lsk);
 	if  (rc < 0)
 		lsk->sk_err = -rc;
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 08786ac..adbdf19 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -113,9 +113,9 @@
 } __aligned(8);
 
 enum smc_urg_state {
-	SMC_URG_VALID,			/* data present */
-	SMC_URG_NOTYET,			/* data pending */
-	SMC_URG_READ			/* data was already read */
+	SMC_URG_VALID	= 1,			/* data present */
+	SMC_URG_NOTYET	= 2,			/* data pending */
+	SMC_URG_READ	= 3,			/* data was already read */
 };
 
 struct smc_connection {
@@ -219,6 +219,10 @@
 						 * started, waiting for unsent
 						 * data to be sent
 						 */
+	struct mutex            clcsock_release_lock;
+						/* protects clcsock of a listen
+						 * socket
+						 * */
 };
 
 static inline struct smc_sock *smc_sk(const struct sock *sk)
diff --git a/net/socket.c b/net/socket.c
index 390a8ec..7a0ddf8 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -115,6 +115,8 @@
 
 static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
 static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
+static BLOCKING_NOTIFIER_HEAD(sockev_notifier_list);
+
 static int sock_mmap(struct file *file, struct vm_area_struct *vma);
 
 static int sock_close(struct inode *inode, struct file *file);
@@ -163,6 +165,14 @@
 static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
 /*
+ * Socket Event framework helpers
+ */
+static void sockev_notify(unsigned long event, struct socket *sk)
+{
+	blocking_notifier_call_chain(&sockev_notifier_list, event, sk);
+}
+
+/**
  * Support routines.
  * Move socket addresses back and forth across the kernel/user
  * divide and look after the messy bits.
@@ -577,6 +587,7 @@
 		if (inode)
 			inode_lock(inode);
 		sock->ops->release(sock);
+		sock->sk = NULL;
 		if (inode)
 			inode_unlock(inode);
 		sock->ops = NULL;
@@ -941,8 +952,7 @@
 EXPORT_SYMBOL(dlci_ioctl_set);
 
 static long sock_do_ioctl(struct net *net, struct socket *sock,
-			  unsigned int cmd, unsigned long arg,
-			  unsigned int ifreq_size)
+			  unsigned int cmd, unsigned long arg)
 {
 	int err;
 	void __user *argp = (void __user *)arg;
@@ -968,11 +978,11 @@
 	} else {
 		struct ifreq ifr;
 		bool need_copyout;
-		if (copy_from_user(&ifr, argp, ifreq_size))
+		if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
 			return -EFAULT;
 		err = dev_ioctl(net, cmd, &ifr, &need_copyout);
 		if (!err && need_copyout)
-			if (copy_to_user(argp, &ifr, ifreq_size))
+			if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
 				return -EFAULT;
 	}
 	return err;
@@ -1071,8 +1081,7 @@
 			err = open_related_ns(&net->ns, get_net_ns);
 			break;
 		default:
-			err = sock_do_ioctl(net, sock, cmd, arg,
-					    sizeof(struct ifreq));
+			err = sock_do_ioctl(net, sock, cmd, arg);
 			break;
 		}
 	return err;
@@ -1348,6 +1357,9 @@
 	if (retval < 0)
 		return retval;
 
+	if (retval == 0)
+		sockev_notify(SOCKEV_SOCKET, sock);
+
 	return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
 }
 
@@ -1485,6 +1497,8 @@
 						      &address, addrlen);
 		}
 		fput_light(sock->file, fput_needed);
+		if (!err)
+			sockev_notify(SOCKEV_BIND, sock);
 	}
 	return err;
 }
@@ -1517,6 +1531,8 @@
 			err = sock->ops->listen(sock, backlog);
 
 		fput_light(sock->file, fput_needed);
+		if (!err)
+			sockev_notify(SOCKEV_LISTEN, sock);
 	}
 	return err;
 }
@@ -1608,7 +1624,8 @@
 
 	fd_install(newfd, newfile);
 	err = newfd;
-
+	if (!err)
+		sockev_notify(SOCKEV_ACCEPT, sock);
 out_put:
 	fput_light(sock->file, fput_needed);
 out:
@@ -1663,6 +1680,8 @@
 
 	err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen,
 				 sock->file->f_flags);
+	if (!err)
+		sockev_notify(SOCKEV_CONNECT, sock);
 out_put:
 	fput_light(sock->file, fput_needed);
 out:
@@ -1961,6 +1980,7 @@
 
 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
 	if (sock != NULL) {
+		sockev_notify(SOCKEV_SHUTDOWN, sock);
 		err = security_socket_shutdown(sock, how);
 		if (!err)
 			err = sock->ops->shutdown(sock, how);
@@ -2752,8 +2772,7 @@
 	int err;
 
 	set_fs(KERNEL_DS);
-	err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
-			    sizeof(struct compat_ifreq));
+	err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
 	set_fs(old_fs);
 	if (!err)
 		err = compat_put_timeval(&ktv, up);
@@ -2769,8 +2788,7 @@
 	int err;
 
 	set_fs(KERNEL_DS);
-	err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
-			    sizeof(struct compat_ifreq));
+	err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
 	set_fs(old_fs);
 	if (!err)
 		err = compat_put_timespec(&kts, up);
@@ -2966,6 +2984,54 @@
 	return dev_ioctl(net, cmd, &ifreq, NULL);
 }
 
+static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
+			      unsigned int cmd,
+			      struct compat_ifreq __user *uifr32)
+{
+	struct ifreq __user *uifr;
+	int err;
+
+	/* Handle the fact that while struct ifreq has the same *layout* on
+	 * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
+	 * which are handled elsewhere, it still has different *size* due to
+	 * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
+	 * resulting in struct ifreq being 32 and 40 bytes respectively).
+	 * As a result, if the struct happens to be at the end of a page and
+	 * the next page isn't readable/writable, we get a fault. To prevent
+	 * that, copy back and forth to the full size.
+	 */
+
+	uifr = compat_alloc_user_space(sizeof(*uifr));
+	if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
+		return -EFAULT;
+
+	err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
+
+	if (!err) {
+		switch (cmd) {
+		case SIOCGIFFLAGS:
+		case SIOCGIFMETRIC:
+		case SIOCGIFMTU:
+		case SIOCGIFMEM:
+		case SIOCGIFHWADDR:
+		case SIOCGIFINDEX:
+		case SIOCGIFADDR:
+		case SIOCGIFBRDADDR:
+		case SIOCGIFDSTADDR:
+		case SIOCGIFNETMASK:
+		case SIOCGIFPFLAGS:
+		case SIOCGIFTXQLEN:
+		case SIOCGMIIPHY:
+		case SIOCGMIIREG:
+		case SIOCGIFNAME:
+			if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
+				err = -EFAULT;
+			break;
+		}
+	}
+	return err;
+}
+
 static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
 			struct compat_ifreq __user *uifr32)
 {
@@ -3081,8 +3147,7 @@
 	}
 
 	set_fs(KERNEL_DS);
-	ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
-			    sizeof(struct compat_ifreq));
+	ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
 	set_fs(old_fs);
 
 out:
@@ -3182,21 +3247,22 @@
 	case SIOCSIFTXQLEN:
 	case SIOCBRADDIF:
 	case SIOCBRDELIF:
+	case SIOCGIFNAME:
 	case SIOCSIFNAME:
 	case SIOCGMIIPHY:
 	case SIOCGMIIREG:
 	case SIOCSMIIREG:
-	case SIOCSARP:
-	case SIOCGARP:
-	case SIOCDARP:
-	case SIOCATMARK:
 	case SIOCBONDENSLAVE:
 	case SIOCBONDRELEASE:
 	case SIOCBONDSETHWADDR:
 	case SIOCBONDCHANGEACTIVE:
-	case SIOCGIFNAME:
-		return sock_do_ioctl(net, sock, cmd, arg,
-				     sizeof(struct compat_ifreq));
+		return compat_ifreq_ioctl(net, sock, cmd, argp);
+
+	case SIOCSARP:
+	case SIOCGARP:
+	case SIOCDARP:
+	case SIOCATMARK:
+		return sock_do_ioctl(net, sock, cmd, arg);
 	}
 
 	return -ENOIOCTLCMD;
@@ -3397,3 +3463,14 @@
 	}
 }
 EXPORT_SYMBOL(kernel_sock_ip_overhead);
+int sockev_register_notify(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&sockev_notifier_list, nb);
+}
+EXPORT_SYMBOL(sockev_register_notify);
+
+int sockev_unregister_notify(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&sockev_notifier_list, nb);
+}
+EXPORT_SYMBOL(sockev_unregister_notify);
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index fb66562..70922d9 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -44,7 +44,7 @@
 		      unsigned char *cksum, unsigned char *buf)
 {
 	struct crypto_sync_skcipher *cipher;
-	unsigned char plain[8];
+	unsigned char *plain;
 	s32 code;
 
 	dprintk("RPC:       %s:\n", __func__);
@@ -52,6 +52,10 @@
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
+	plain = kmalloc(8, GFP_NOFS);
+	if (!plain)
+		return -ENOMEM;
+
 	plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
 	plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
 	plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
@@ -68,6 +72,7 @@
 	code = krb5_encrypt(cipher, cksum, plain, buf, 8);
 out:
 	crypto_free_sync_skcipher(cipher);
+	kfree(plain);
 	return code;
 }
 s32
@@ -77,12 +82,17 @@
 		u32 seqnum,
 		unsigned char *cksum, unsigned char *buf)
 {
-	unsigned char plain[8];
+	unsigned char *plain;
+	s32 code;
 
 	if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
 		return krb5_make_rc4_seq_num(kctx, direction, seqnum,
 					     cksum, buf);
 
+	plain = kmalloc(8, GFP_NOFS);
+	if (!plain)
+		return -ENOMEM;
+
 	plain[0] = (unsigned char) (seqnum & 0xff);
 	plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
 	plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -93,7 +103,9 @@
 	plain[6] = direction;
 	plain[7] = direction;
 
-	return krb5_encrypt(key, cksum, plain, buf, 8);
+	code = krb5_encrypt(key, cksum, plain, buf, 8);
+	kfree(plain);
+	return code;
 }
 
 static s32
@@ -101,7 +113,7 @@
 		     unsigned char *buf, int *direction, s32 *seqnum)
 {
 	struct crypto_sync_skcipher *cipher;
-	unsigned char plain[8];
+	unsigned char *plain;
 	s32 code;
 
 	dprintk("RPC:       %s:\n", __func__);
@@ -113,20 +125,28 @@
 	if (code)
 		goto out;
 
+	plain = kmalloc(8, GFP_NOFS);
+	if (!plain) {
+		code = -ENOMEM;
+		goto out;
+	}
+
 	code = krb5_decrypt(cipher, cksum, buf, plain, 8);
 	if (code)
-		goto out;
+		goto out_plain;
 
 	if ((plain[4] != plain[5]) || (plain[4] != plain[6])
 				   || (plain[4] != plain[7])) {
 		code = (s32)KG_BAD_SEQ;
-		goto out;
+		goto out_plain;
 	}
 
 	*direction = plain[4];
 
 	*seqnum = ((plain[0] << 24) | (plain[1] << 16) |
 					(plain[2] << 8) | (plain[3]));
+out_plain:
+	kfree(plain);
 out:
 	crypto_free_sync_skcipher(cipher);
 	return code;
@@ -139,26 +159,33 @@
 	       int *direction, u32 *seqnum)
 {
 	s32 code;
-	unsigned char plain[8];
 	struct crypto_sync_skcipher *key = kctx->seq;
+	unsigned char *plain;
 
 	dprintk("RPC:       krb5_get_seq_num:\n");
 
 	if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
 		return krb5_get_rc4_seq_num(kctx, cksum, buf,
 					    direction, seqnum);
+	plain = kmalloc(8, GFP_NOFS);
+	if (!plain)
+		return -ENOMEM;
 
 	if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
-		return code;
+		goto out;
 
 	if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
-	    (plain[4] != plain[7]))
-		return (s32)KG_BAD_SEQ;
+	    (plain[4] != plain[7])) {
+		code = (s32)KG_BAD_SEQ;
+		goto out;
+	}
 
 	*direction = plain[4];
 
 	*seqnum = ((plain[0]) |
 		   (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
 
-	return 0;
+out:
+	kfree(plain);
+	return code;
 }
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 860f2a1b..1a65f88 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1122,7 +1122,7 @@
 	struct kvec *resv = &rqstp->rq_res.head[0];
 	struct rsi *rsip, rsikey;
 	int ret;
-	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
 
 	memset(&rsikey, 0, sizeof(rsikey));
 	ret = gss_read_verf(gc, argv, authp,
@@ -1233,7 +1233,7 @@
 	uint64_t handle;
 	int status;
 	int ret;
-	struct net *net = rqstp->rq_xprt->xpt_net;
+	struct net *net = SVC_NET(rqstp);
 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
 	memset(&ud, 0, sizeof(ud));
@@ -1424,7 +1424,7 @@
 	__be32		*rpcstart;
 	__be32		*reject_stat = resv->iov_base + resv->iov_len;
 	int		ret;
-	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
 
 	dprintk("RPC:       svcauth_gss: argv->iov_len = %zd\n",
 			argv->iov_len);
@@ -1714,7 +1714,7 @@
 	struct rpc_gss_wire_cred *gc = &gsd->clcred;
 	struct xdr_buf *resbuf = &rqstp->rq_res;
 	int stat = -EINVAL;
-	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
 
 	if (gc->gc_proc != RPC_GSS_PROC_DATA)
 		goto out;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 109fbe5..b6e8ecc 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,6 +54,11 @@
 	h->last_refresh = now;
 }
 
+static void cache_fresh_locked(struct cache_head *head, time_t expiry,
+				struct cache_detail *detail);
+static void cache_fresh_unlocked(struct cache_head *head,
+				struct cache_detail *detail);
+
 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
 				       struct cache_head *key, int hash)
 {
@@ -95,6 +100,7 @@
 			if (cache_is_expired(detail, tmp)) {
 				hlist_del_init(&tmp->cache_list);
 				detail->entries --;
+				cache_fresh_locked(tmp, 0, detail);
 				freeme = tmp;
 				break;
 			}
@@ -110,8 +116,10 @@
 	cache_get(new);
 	write_unlock(&detail->hash_lock);
 
-	if (freeme)
+	if (freeme) {
+		cache_fresh_unlocked(freeme, detail);
 		cache_put(freeme, detail);
+	}
 	return new;
 }
 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 8ea2f5f..1fc812b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1992,13 +1992,15 @@
 static void
 call_transmit_status(struct rpc_task *task)
 {
+	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
 	task->tk_action = call_status;
 
 	/*
 	 * Common case: success.  Force the compiler to put this
-	 * test first.
+	 * test first.  Or, if any error and xprt_close_wait,
+	 * release the xprt lock so the socket can close.
 	 */
-	if (task->tk_status == 0) {
+	if (task->tk_status == 0 || xprt_close_wait(xprt)) {
 		xprt_end_transmit(task);
 		rpc_task_force_reencode(task);
 		return;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index c7872bc..08b5fa4 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -771,6 +771,12 @@
 	case RPCBVERS_3:
 		map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
 		map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
+		if (!map->r_addr) {
+			status = -ENOMEM;
+			dprintk("RPC: %5u %s: no memory available\n",
+				task->tk_pid, __func__);
+			goto bailout_free_args;
+		}
 		map->r_owner = "";
 		break;
 	case RPCBVERS_2:
@@ -793,6 +799,8 @@
 	rpc_put_task(child);
 	return;
 
+bailout_free_args:
+	kfree(map);
 bailout_release_client:
 	rpc_release_client(rpcb_clnt);
 bailout_nofree:
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index d13e05f..d65f8d3 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1144,6 +1144,8 @@
 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
 #endif
 
+extern void svc_tcp_prep_reply_hdr(struct svc_rqst *);
+
 /*
  * Common routine for processing the RPC request.
  */
@@ -1172,7 +1174,8 @@
 	clear_bit(RQ_DROPME, &rqstp->rq_flags);
 
 	/* Setup reply header */
-	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
+	if (rqstp->rq_prot == IPPROTO_TCP)
+		svc_tcp_prep_reply_hdr(rqstp);
 
 	svc_putu32(resv, rqstp->rq_xid);
 
@@ -1244,7 +1247,7 @@
 	 * for lower versions. RPC_PROG_MISMATCH seems to be the closest
 	 * fit.
 	 */
-	if (versp->vs_need_cong_ctrl &&
+	if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
 	    !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
 		goto err_bad_vers;
 
@@ -1336,7 +1339,7 @@
 	return 0;
 
  close:
-	if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+	if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
 		svc_close_xprt(rqstp->rq_xprt);
 	dprintk("svc: svc_process close\n");
 	return 0;
@@ -1459,10 +1462,10 @@
 	dprintk("svc: %s(%p)\n", __func__, req);
 
 	/* Build the svc_rqst used by the common processing routine */
-	rqstp->rq_xprt = serv->sv_bc_xprt;
 	rqstp->rq_xid = req->rq_xid;
 	rqstp->rq_prot = req->rq_xprt->prot;
 	rqstp->rq_server = serv;
+	rqstp->rq_bc_net = req->rq_xprt->xprt_net;
 
 	rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
 	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 83ccd02..6cf0fd3 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -469,10 +469,11 @@
  */
 void svc_reserve(struct svc_rqst *rqstp, int space)
 {
+	struct svc_xprt *xprt = rqstp->rq_xprt;
+
 	space += rqstp->rq_res.head[0].iov_len;
 
-	if (space < rqstp->rq_reserved) {
-		struct svc_xprt *xprt = rqstp->rq_xprt;
+	if (xprt && space < rqstp->rq_reserved) {
 		atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
 		rqstp->rq_reserved = space;
 
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 5445145..97a8282 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -574,7 +574,7 @@
 		/* Don't enable netstamp, sunrpc doesn't
 		   need that much accuracy */
 	}
-	svsk->sk_sk->sk_stamp = skb->tstamp;
+	sock_write_timestamp(svsk->sk_sk, skb->tstamp);
 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
 
 	len  = skb->len;
@@ -1198,7 +1198,7 @@
 /*
  * Setup response header. TCP has a 4B record length field.
  */
-static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
+void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
 {
 	struct kvec *resv = &rqstp->rq_res.head[0];
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 8602a5f..e8ad7dd 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -563,6 +563,99 @@
 				      DMA_TO_DEVICE);
 }
 
+/* If the xdr_buf has more elements than the device can
+ * transmit in a single RDMA Send, then the reply will
+ * have to be copied into a bounce buffer.
+ */
+static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
+				    struct xdr_buf *xdr,
+				    __be32 *wr_lst)
+{
+	int elements;
+
+	/* xdr->head */
+	elements = 1;
+
+	/* xdr->pages */
+	if (!wr_lst) {
+		unsigned int remaining;
+		unsigned long pageoff;
+
+		pageoff = xdr->page_base & ~PAGE_MASK;
+		remaining = xdr->page_len;
+		while (remaining) {
+			++elements;
+			remaining -= min_t(u32, PAGE_SIZE - pageoff,
+					   remaining);
+			pageoff = 0;
+		}
+	}
+
+	/* xdr->tail */
+	if (xdr->tail[0].iov_len)
+		++elements;
+
+	/* assume 1 SGE is needed for the transport header */
+	return elements >= rdma->sc_max_send_sges;
+}
+
+/* The device is not capable of sending the reply directly.
+ * Assemble the elements of @xdr into the transport header
+ * buffer.
+ */
+static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
+				      struct svc_rdma_send_ctxt *ctxt,
+				      struct xdr_buf *xdr, __be32 *wr_lst)
+{
+	unsigned char *dst, *tailbase;
+	unsigned int taillen;
+
+	dst = ctxt->sc_xprt_buf;
+	dst += ctxt->sc_sges[0].length;
+
+	memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
+	dst += xdr->head[0].iov_len;
+
+	tailbase = xdr->tail[0].iov_base;
+	taillen = xdr->tail[0].iov_len;
+	if (wr_lst) {
+		u32 xdrpad;
+
+		xdrpad = xdr_padsize(xdr->page_len);
+		if (taillen && xdrpad) {
+			tailbase += xdrpad;
+			taillen -= xdrpad;
+		}
+	} else {
+		unsigned int len, remaining;
+		unsigned long pageoff;
+		struct page **ppages;
+
+		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
+		pageoff = xdr->page_base & ~PAGE_MASK;
+		remaining = xdr->page_len;
+		while (remaining) {
+			len = min_t(u32, PAGE_SIZE - pageoff, remaining);
+
+			memcpy(dst, page_address(*ppages), len);
+			remaining -= len;
+			dst += len;
+			pageoff = 0;
+		}
+	}
+
+	if (taillen)
+		memcpy(dst, tailbase, taillen);
+
+	ctxt->sc_sges[0].length += xdr->len;
+	ib_dma_sync_single_for_device(rdma->sc_pd->device,
+				      ctxt->sc_sges[0].addr,
+				      ctxt->sc_sges[0].length,
+				      DMA_TO_DEVICE);
+
+	return 0;
+}
+
 /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
  * @rdma: controlling transport
  * @ctxt: send_ctxt for the Send WR
@@ -585,8 +678,10 @@
 	u32 xdr_pad;
 	int ret;
 
-	if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
-		return -EIO;
+	if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
+		return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
+
+	++ctxt->sc_cur_sge_no;
 	ret = svc_rdma_dma_map_buf(rdma, ctxt,
 				   xdr->head[0].iov_base,
 				   xdr->head[0].iov_len);
@@ -617,8 +712,7 @@
 	while (remaining) {
 		len = min_t(u32, PAGE_SIZE - page_off, remaining);
 
-		if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
-			return -EIO;
+		++ctxt->sc_cur_sge_no;
 		ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
 					    page_off, len);
 		if (ret < 0)
@@ -632,8 +726,7 @@
 	len = xdr->tail[0].iov_len;
 tail:
 	if (len) {
-		if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
-			return -EIO;
+		++ctxt->sc_cur_sge_no;
 		ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
 		if (ret < 0)
 			return ret;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 2848caf..ce5c610 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -475,13 +475,12 @@
 
 	/* Qualify the transport resource defaults with the
 	 * capabilities of this particular device */
-	newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
-	/* transport hdr, head iovec, one page list entry, tail iovec */
-	if (newxprt->sc_max_send_sges < 4) {
-		pr_err("svcrdma: too few Send SGEs available (%d)\n",
-		       newxprt->sc_max_send_sges);
-		goto errout;
-	}
+	/* Transport header, head iovec, tail iovec */
+	newxprt->sc_max_send_sges = 3;
+	/* Add one SGE per page list entry */
+	newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
+	if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
+		newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
 	newxprt->sc_max_req_size = svcrdma_max_req_size;
 	newxprt->sc_max_requests = svcrdma_max_requests;
 	newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 956a5ea..3d6bf79 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -872,7 +872,7 @@
 	for (i = 0; i <= buf->rb_sc_last; i++) {
 		sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
 		if (!sc)
-			goto out_destroy;
+			return -ENOMEM;
 
 		sc->sc_xprt = r_xprt;
 		buf->rb_sc_ctxs[i] = sc;
@@ -880,10 +880,6 @@
 	buf->rb_flags = 0;
 
 	return 0;
-
-out_destroy:
-	rpcrdma_sendctxs_destroy(buf);
-	return -ENOMEM;
 }
 
 /* The sendctx queue is not guaranteed to have a size that is a
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6b7539c..7d8cce1 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2244,8 +2244,8 @@
 	trace_rpc_socket_connect(xprt, sock, 0);
 	status = 0;
 out:
-	xprt_unlock_connect(xprt, transport);
 	xprt_clear_connecting(xprt);
+	xprt_unlock_connect(xprt, transport);
 	xprt_wake_pending_tasks(xprt, status);
 }
 
@@ -2480,8 +2480,8 @@
 	}
 	status = -EAGAIN;
 out:
-	xprt_unlock_connect(xprt, transport);
 	xprt_clear_connecting(xprt);
+	xprt_unlock_connect(xprt, transport);
 	xprt_wake_pending_tasks(xprt, status);
 }
 
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 645c160..2649a0a 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -317,7 +317,6 @@
 	res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
 	if (res) {
 		bearer_disable(net, b);
-		kfree(b);
 		errstr = "failed to create discoverer";
 		goto rejected;
 	}
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 6376467..0b21187 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -87,6 +87,11 @@
 	return limit;
 }
 
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
+{
+	return TLV_GET_LEN(tlv) - TLV_SPACE(0);
+}
+
 static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
 {
 	struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@@ -166,6 +171,11 @@
 	return buf;
 }
 
+static inline bool string_is_valid(char *s, int len)
+{
+	return memchr(s, '\0', len) ? true : false;
+}
+
 static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
 				   struct tipc_nl_compat_msg *msg,
 				   struct sk_buff *arg)
@@ -379,6 +389,7 @@
 	struct nlattr *prop;
 	struct nlattr *bearer;
 	struct tipc_bearer_config *b;
+	int len;
 
 	b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
 
@@ -386,6 +397,10 @@
 	if (!bearer)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+	if (!string_is_valid(b->name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
 		return -EMSGSIZE;
 
@@ -411,6 +426,7 @@
 {
 	char *name;
 	struct nlattr *bearer;
+	int len;
 
 	name = (char *)TLV_DATA(msg->req);
 
@@ -418,6 +434,10 @@
 	if (!bearer)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+	if (!string_is_valid(name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
 		return -EMSGSIZE;
 
@@ -478,6 +498,7 @@
 	struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
 	struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
 	int err;
+	int len;
 
 	if (!attrs[TIPC_NLA_LINK])
 		return -EINVAL;
@@ -504,6 +525,11 @@
 		return err;
 
 	name = (char *)TLV_DATA(msg->req);
+
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+	if (!string_is_valid(name, len))
+		return -EINVAL;
+
 	if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
 		return 0;
 
@@ -644,6 +670,7 @@
 	struct nlattr *prop;
 	struct nlattr *media;
 	struct tipc_link_config *lc;
+	int len;
 
 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
@@ -651,6 +678,10 @@
 	if (!media)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+	if (!string_is_valid(lc->name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
 		return -EMSGSIZE;
 
@@ -671,6 +702,7 @@
 	struct nlattr *prop;
 	struct nlattr *bearer;
 	struct tipc_link_config *lc;
+	int len;
 
 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
@@ -678,6 +710,10 @@
 	if (!bearer)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+	if (!string_is_valid(lc->name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
 		return -EMSGSIZE;
 
@@ -726,9 +762,14 @@
 	struct tipc_link_config *lc;
 	struct tipc_bearer *bearer;
 	struct tipc_media *media;
+	int len;
 
 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+	if (!string_is_valid(lc->name, len))
+		return -EINVAL;
+
 	media = tipc_media_find(lc->name);
 	if (media) {
 		cmd->doit = &__tipc_nl_media_set;
@@ -750,6 +791,7 @@
 {
 	char *name;
 	struct nlattr *link;
+	int len;
 
 	name = (char *)TLV_DATA(msg->req);
 
@@ -757,6 +799,10 @@
 	if (!link)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+	if (!string_is_valid(name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
 		return -EMSGSIZE;
 
@@ -778,6 +824,8 @@
 	};
 
 	ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+	if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
+		return -EINVAL;
 
 	depth = ntohl(ntq->depth);
 
@@ -1201,7 +1249,7 @@
 	}
 
 	len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
-	if (len && !TLV_OK(msg.req, len)) {
+	if (!len || !TLV_OK(msg.req, len)) {
 		msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
 		err = -EOPNOTSUPP;
 		goto send;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 4880197..32556f4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -624,6 +624,12 @@
 
 	__skb_queue_head_init(&xmitq);
 
+	/* Initial node interval to value larger (10 seconds), then it will be
+	 * recalculated with link lowest tolerance
+	 */
+	tipc_node_read_lock(n);
+	n->keepalive_intv = 10000;
+	tipc_node_read_unlock(n);
 	for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
 		tipc_node_read_lock(n);
 		le = &n->links[bearer_id];
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 366ce0b..88c307e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -377,11 +377,13 @@
 
 #define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
 ({                                                                             \
+	DEFINE_WAIT_FUNC(wait_, woken_wake_function);                          \
 	struct sock *sk_;						       \
 	int rc_;							       \
 									       \
 	while ((rc_ = !(condition_))) {					       \
-		DEFINE_WAIT_FUNC(wait_, woken_wake_function);	               \
+		/* coupled with smp_wmb() in tipc_sk_proto_rcv() */            \
+		smp_rmb();                                                     \
 		sk_ = (sock_)->sk;					       \
 		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
 		if (rc_)						       \
@@ -878,7 +880,6 @@
 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
 	struct tipc_sock *tsk = tipc_sk(sk);
-	struct tipc_group *grp = tsk->group;
 	struct net *net = sock_net(sk);
 	struct tipc_member *mb = NULL;
 	u32 node, port;
@@ -892,7 +893,9 @@
 	/* Block or return if destination link or member is congested */
 	rc = tipc_wait_for_cond(sock, &timeout,
 				!tipc_dest_find(&tsk->cong_links, node, 0) &&
-				!tipc_group_cong(grp, node, port, blks, &mb));
+				tsk->group &&
+				!tipc_group_cong(tsk->group, node, port, blks,
+						 &mb));
 	if (unlikely(rc))
 		return rc;
 
@@ -922,7 +925,6 @@
 	struct tipc_sock *tsk = tipc_sk(sk);
 	struct list_head *cong_links = &tsk->cong_links;
 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
-	struct tipc_group *grp = tsk->group;
 	struct tipc_msg *hdr = &tsk->phdr;
 	struct tipc_member *first = NULL;
 	struct tipc_member *mbr = NULL;
@@ -939,9 +941,10 @@
 	type = msg_nametype(hdr);
 	inst = dest->addr.name.name.instance;
 	scope = msg_lookup_scope(hdr);
-	exclude = tipc_group_exclude(grp);
 
 	while (++lookups < 4) {
+		exclude = tipc_group_exclude(tsk->group);
+
 		first = NULL;
 
 		/* Look for a non-congested destination member, if any */
@@ -950,7 +953,8 @@
 						 &dstcnt, exclude, false))
 				return -EHOSTUNREACH;
 			tipc_dest_pop(&dsts, &node, &port);
-			cong = tipc_group_cong(grp, node, port, blks, &mbr);
+			cong = tipc_group_cong(tsk->group, node, port, blks,
+					       &mbr);
 			if (!cong)
 				break;
 			if (mbr == first)
@@ -969,7 +973,8 @@
 		/* Block or return if destination link or member is congested */
 		rc = tipc_wait_for_cond(sock, &timeout,
 					!tipc_dest_find(cong_links, node, 0) &&
-					!tipc_group_cong(grp, node, port,
+					tsk->group &&
+					!tipc_group_cong(tsk->group, node, port,
 							 blks, &mbr));
 		if (unlikely(rc))
 			return rc;
@@ -1004,8 +1009,7 @@
 	struct sock *sk = sock->sk;
 	struct net *net = sock_net(sk);
 	struct tipc_sock *tsk = tipc_sk(sk);
-	struct tipc_group *grp = tsk->group;
-	struct tipc_nlist *dsts = tipc_group_dests(grp);
+	struct tipc_nlist *dsts;
 	struct tipc_mc_method *method = &tsk->mc_method;
 	bool ack = method->mandatory && method->rcast;
 	int blks = tsk_blocks(MCAST_H_SIZE + dlen);
@@ -1014,15 +1018,17 @@
 	struct sk_buff_head pkts;
 	int rc = -EHOSTUNREACH;
 
-	if (!dsts->local && !dsts->remote)
-		return -EHOSTUNREACH;
-
 	/* Block or return if any destination link or member is congested */
-	rc = tipc_wait_for_cond(sock, &timeout,	!tsk->cong_link_cnt &&
-				!tipc_group_bc_cong(grp, blks));
+	rc = tipc_wait_for_cond(sock, &timeout,
+				!tsk->cong_link_cnt && tsk->group &&
+				!tipc_group_bc_cong(tsk->group, blks));
 	if (unlikely(rc))
 		return rc;
 
+	dsts = tipc_group_dests(tsk->group);
+	if (!dsts->local && !dsts->remote)
+		return -EHOSTUNREACH;
+
 	/* Complete message header */
 	if (dest) {
 		msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
@@ -1034,7 +1040,7 @@
 	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
 	msg_set_destport(hdr, 0);
 	msg_set_destnode(hdr, 0);
-	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
+	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
 
 	/* Avoid getting stuck with repeated forced replicasts */
 	msg_set_grp_bc_ack_req(hdr, ack);
@@ -1314,7 +1320,7 @@
 
 	if (unlikely(!dest)) {
 		dest = &tsk->peer;
-		if (!syn || dest->family != AF_TIPC)
+		if (!syn && dest->family != AF_TIPC)
 			return -EDESTADDRREQ;
 	}
 
@@ -1957,6 +1963,8 @@
 		return;
 	case SOCK_WAKEUP:
 		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
+		/* coupled with smp_rmb() in tipc_wait_for_cond() */
+		smp_wmb();
 		tsk->cong_link_cnt--;
 		wakeup = true;
 		break;
@@ -2683,11 +2691,15 @@
 		rhashtable_walk_start(&iter);
 
 		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
-			spin_lock_bh(&tsk->sk.sk_lock.slock);
+			sock_hold(&tsk->sk);
+			rhashtable_walk_stop(&iter);
+			lock_sock(&tsk->sk);
 			msg = &tsk->phdr;
 			msg_set_prevnode(msg, tipc_own_addr(net));
 			msg_set_orignode(msg, tipc_own_addr(net));
-			spin_unlock_bh(&tsk->sk.sk_lock.slock);
+			release_sock(&tsk->sk);
+			rhashtable_walk_start(&iter);
+			sock_put(&tsk->sk);
 		}
 
 		rhashtable_walk_stop(&iter);
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index b84c005..d65eed8 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -404,7 +404,7 @@
 	ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
 	if (ret == -EWOULDBLOCK)
 		return -EWOULDBLOCK;
-	if (ret > 0) {
+	if (ret == sizeof(s)) {
 		read_lock_bh(&sk->sk_callback_lock);
 		ret = tipc_conn_rcv_sub(srv, con, &s);
 		read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 9783101..da2d311 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -245,10 +245,8 @@
 		}
 
 		err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
-		if (err) {
-			kfree_skb(_skb);
+		if (err)
 			goto out;
-		}
 	}
 	err = 0;
 out:
@@ -680,6 +678,11 @@
 	if (err)
 		goto err;
 
+	if (remote.proto != local.proto) {
+		err = -EINVAL;
+		goto err;
+	}
+
 	/* Autoconfigure own node identity if needed */
 	if (!tipc_own_id(net)) {
 		memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 523622d..a091c03 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -550,11 +550,14 @@
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tls_context *ctx;
 
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
 	if (!ctx)
 		return NULL;
 
 	icsk->icsk_ulp_data = ctx;
+	ctx->setsockopt = sk->sk_prot->setsockopt;
+	ctx->getsockopt = sk->sk_prot->getsockopt;
+	ctx->sk_proto_close = sk->sk_prot->close;
 	return ctx;
 }
 
@@ -685,9 +688,6 @@
 		rc = -ENOMEM;
 		goto out;
 	}
-	ctx->setsockopt = sk->sk_prot->setsockopt;
-	ctx->getsockopt = sk->sk_prot->getsockopt;
-	ctx->sk_proto_close = sk->sk_prot->close;
 
 	/* Build IPv6 TLS whenever the address of tcpv6	_prot changes */
 	if (ip_ver == TLSV6 &&
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c754f3a..f601933 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -888,7 +888,7 @@
 	addr->hash ^= sk->sk_type;
 
 	__unix_remove_socket(sk);
-	u->addr = addr;
+	smp_store_release(&u->addr, addr);
 	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
 	spin_unlock(&unix_table_lock);
 	err = 0;
@@ -1058,7 +1058,7 @@
 
 	err = 0;
 	__unix_remove_socket(sk);
-	u->addr = addr;
+	smp_store_release(&u->addr, addr);
 	__unix_insert_socket(list, sk);
 
 out_unlock:
@@ -1329,15 +1329,29 @@
 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
 	otheru = unix_sk(other);
 
-	/* copy address information from listening to new sock*/
-	if (otheru->addr) {
-		refcount_inc(&otheru->addr->refcnt);
-		newu->addr = otheru->addr;
-	}
+	/* copy address information from listening to new sock
+	 *
+	 * The contents of *(otheru->addr) and otheru->path
+	 * are seen fully set up here, since we have found
+	 * otheru in hash under unix_table_lock.  Insertion
+	 * into the hash chain we'd found it in had been done
+	 * in an earlier critical area protected by unix_table_lock,
+	 * the same one where we'd set *(otheru->addr) contents,
+	 * as well as otheru->path and otheru->addr itself.
+	 *
+	 * Using smp_store_release() here to set newu->addr
+	 * is enough to make those stores, as well as stores
+	 * to newu->path visible to anyone who gets newu->addr
+	 * by smp_load_acquire().  IOW, the same warranties
+	 * as for unix_sock instances bound in unix_bind() or
+	 * in unix_autobind().
+	 */
 	if (otheru->path.dentry) {
 		path_get(&otheru->path);
 		newu->path = otheru->path;
 	}
+	refcount_inc(&otheru->addr->refcnt);
+	smp_store_release(&newu->addr, otheru->addr);
 
 	/* Set credentials */
 	copy_peercred(sk, other);
@@ -1451,7 +1465,7 @@
 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
 {
 	struct sock *sk = sock->sk;
-	struct unix_sock *u;
+	struct unix_address *addr;
 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
 	int err = 0;
 
@@ -1466,19 +1480,15 @@
 		sock_hold(sk);
 	}
 
-	u = unix_sk(sk);
-	unix_state_lock(sk);
-	if (!u->addr) {
+	addr = smp_load_acquire(&unix_sk(sk)->addr);
+	if (!addr) {
 		sunaddr->sun_family = AF_UNIX;
 		sunaddr->sun_path[0] = 0;
 		err = sizeof(short);
 	} else {
-		struct unix_address *addr = u->addr;
-
 		err = addr->len;
 		memcpy(sunaddr, addr->name, addr->len);
 	}
-	unix_state_unlock(sk);
 	sock_put(sk);
 out:
 	return err;
@@ -2071,11 +2081,11 @@
 
 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
 {
-	struct unix_sock *u = unix_sk(sk);
+	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
 
-	if (u->addr) {
-		msg->msg_namelen = u->addr->len;
-		memcpy(msg->msg_name, u->addr->name, u->addr->len);
+	if (addr) {
+		msg->msg_namelen = addr->len;
+		memcpy(msg->msg_name, addr->name, addr->len);
 	}
 }
 
@@ -2579,15 +2589,14 @@
 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
-	unix_state_lock(sk);
-	path = unix_sk(sk)->path;
-	if (!path.dentry) {
-		unix_state_unlock(sk);
+	if (!smp_load_acquire(&unix_sk(sk)->addr))
 		return -ENOENT;
-	}
+
+	path = unix_sk(sk)->path;
+	if (!path.dentry)
+		return -ENOENT;
 
 	path_get(&path);
-	unix_state_unlock(sk);
 
 	fd = get_unused_fd_flags(O_CLOEXEC);
 	if (fd < 0)
@@ -2828,7 +2837,7 @@
 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
 			sock_i_ino(s));
 
-		if (u->addr) {
+		if (u->addr) {	// under unix_table_lock here
 			int i, len;
 			seq_putc(seq, ' ');
 
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e..3183d9b 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
 
 static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
 {
-	struct unix_address *addr = unix_sk(sk)->addr;
+	/* might or might not have unix_table_lock */
+	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
 
 	if (!addr)
 		return 0;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 5d3cce9..15eb5d3 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -75,6 +75,9 @@
 {
 	struct virtio_vsock *vsock = virtio_vsock_get();
 
+	if (!vsock)
+		return VMADDR_CID_ANY;
+
 	return vsock->guest_cid;
 }
 
@@ -584,10 +587,6 @@
 
 	virtio_vsock_update_guest_cid(vsock);
 
-	ret = vsock_core_init(&virtio_transport.transport);
-	if (ret < 0)
-		goto out_vqs;
-
 	vsock->rx_buf_nr = 0;
 	vsock->rx_buf_max_nr = 0;
 	atomic_set(&vsock->queued_replies, 0);
@@ -618,8 +617,6 @@
 	mutex_unlock(&the_virtio_vsock_mutex);
 	return 0;
 
-out_vqs:
-	vsock->vdev->config->del_vqs(vsock->vdev);
 out:
 	kfree(vsock);
 	mutex_unlock(&the_virtio_vsock_mutex);
@@ -637,6 +634,9 @@
 	flush_work(&vsock->event_work);
 	flush_work(&vsock->send_pkt_work);
 
+	/* Reset all connected sockets when the device disappear */
+	vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+
 	vdev->config->reset(vdev);
 
 	mutex_lock(&vsock->rx_lock);
@@ -669,7 +669,6 @@
 
 	mutex_lock(&the_virtio_vsock_mutex);
 	the_virtio_vsock = NULL;
-	vsock_core_exit();
 	mutex_unlock(&the_virtio_vsock_mutex);
 
 	vdev->config->del_vqs(vdev);
@@ -702,14 +701,28 @@
 	virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
 	if (!virtio_vsock_workqueue)
 		return -ENOMEM;
+
 	ret = register_virtio_driver(&virtio_vsock_driver);
 	if (ret)
-		destroy_workqueue(virtio_vsock_workqueue);
+		goto out_wq;
+
+	ret = vsock_core_init(&virtio_transport.transport);
+	if (ret)
+		goto out_vdr;
+
+	return 0;
+
+out_vdr:
+	unregister_virtio_driver(&virtio_vsock_driver);
+out_wq:
+	destroy_workqueue(virtio_vsock_workqueue);
 	return ret;
+
 }
 
 static void __exit virtio_vsock_exit(void)
 {
+	vsock_core_exit();
 	unregister_virtio_driver(&virtio_vsock_driver);
 	destroy_workqueue(virtio_vsock_workqueue);
 }
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index cb332ad..c3d5ab0 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -264,6 +264,31 @@
 }
 
 static int
+vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
+				      struct sockaddr_vm *dst,
+				      enum vmci_transport_packet_type type,
+				      u64 size,
+				      u64 mode,
+				      struct vmci_transport_waiting_info *wait,
+				      u16 proto,
+				      struct vmci_handle handle)
+{
+	struct vmci_transport_packet *pkt;
+	int err;
+
+	pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+	if (!pkt)
+		return -ENOMEM;
+
+	err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
+						mode, wait, proto, handle,
+						true);
+	kfree(pkt);
+
+	return err;
+}
+
+static int
 vmci_transport_send_control_pkt(struct sock *sk,
 				enum vmci_transport_packet_type type,
 				u64 size,
@@ -272,9 +297,7 @@
 				u16 proto,
 				struct vmci_handle handle)
 {
-	struct vmci_transport_packet *pkt;
 	struct vsock_sock *vsk;
-	int err;
 
 	vsk = vsock_sk(sk);
 
@@ -284,17 +307,10 @@
 	if (!vsock_addr_bound(&vsk->remote_addr))
 		return -EINVAL;
 
-	pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
-	if (!pkt)
-		return -ENOMEM;
-
-	err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
-						&vsk->remote_addr, type, size,
-						mode, wait, proto, handle,
-						true);
-	kfree(pkt);
-
-	return err;
+	return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
+						     &vsk->remote_addr,
+						     type, size, mode,
+						     wait, proto, handle);
 }
 
 static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
@@ -312,12 +328,29 @@
 static int vmci_transport_send_reset(struct sock *sk,
 				     struct vmci_transport_packet *pkt)
 {
+	struct sockaddr_vm *dst_ptr;
+	struct sockaddr_vm dst;
+	struct vsock_sock *vsk;
+
 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
 		return 0;
-	return vmci_transport_send_control_pkt(sk,
-					VMCI_TRANSPORT_PACKET_TYPE_RST,
-					0, 0, NULL, VSOCK_PROTO_INVALID,
-					VMCI_INVALID_HANDLE);
+
+	vsk = vsock_sk(sk);
+
+	if (!vsock_addr_bound(&vsk->local_addr))
+		return -EINVAL;
+
+	if (vsock_addr_bound(&vsk->remote_addr)) {
+		dst_ptr = &vsk->remote_addr;
+	} else {
+		vsock_addr_init(&dst, pkt->dg.src.context,
+				pkt->src_port);
+		dst_ptr = &dst;
+	}
+	return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
+					     VMCI_TRANSPORT_PACKET_TYPE_RST,
+					     0, 0, NULL, VSOCK_PROTO_INVALID,
+					     VMCI_INVALID_HANDLE);
 }
 
 static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
@@ -1618,6 +1651,10 @@
 
 static void vmci_transport_destruct(struct vsock_sock *vsk)
 {
+	/* transport can be NULL if we hit a failure at init() time */
+	if (!vmci_trans(vsk))
+		return;
+
 	/* Ensure that the detach callback doesn't use the sk/vsk
 	 * we are about to destruct.
 	 */
diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore
index 61cbc30..4a84ec1 100644
--- a/net/wireless/.gitignore
+++ b/net/wireless/.gitignore
@@ -1,2 +1,3 @@
+regdb.c
 shipped-certs.c
 extra-certs.c
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 4172204..f5062ab 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -175,8 +175,30 @@
 
 	  If unsure, say N.
 
+config CFG80211_INTERNAL_REGDB
+	bool "use statically compiled regulatory rules database" if EXPERT
+	default n
+	depends on CFG80211
+	---help---
+	  This option generates an internal data structure representing
+	  the wireless regulatory rules described in net/wireless/db.txt
+	  and includes code to query that database. This is an alternative
+	  to using CRDA for defining regulatory rules for the kernel.
+
+	  Using this option requires some parsing of the db.txt at build time,
+	  the parser will be upkept with the latest wireless-regdb updates but
+	  older wireless-regdb formats will be ignored. The parser may later
+	  be replaced to avoid issues with conflicts on versions of
+	  wireless-regdb.
+
+	  For details see:
+
+	  http://wireless.kernel.org/en/developers/Regulatory
+
+	  Most distributions have a CRDA package. So if unsure, say N.
+
 config CFG80211_CRDA_SUPPORT
-	bool "support CRDA" if EXPERT
+	bool "support CRDA" if CFG80211_INTERNAL_REGDB
 	default y
 	help
 	  You should enable this option unless you know for sure you have no
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 1d84f91..fab1c17 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -15,9 +15,16 @@
 cfg80211-$(CONFIG_OF) += of.o
 cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
 cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
+cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
 
 CFLAGS_trace.o := -I$(src)
 
+clean-files += shipped-certs.c extra-certs.c
+$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
+	@$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
+
+clean-files := regdb.c
+
 cfg80211-$(CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS) += shipped-certs.o
 ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
 cfg80211-y += extra-certs.o
@@ -55,4 +62,3 @@
 	      echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
 	  ) > $@)
 
-clean-files += shipped-certs.c extra-certs.c
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 7f52ef5..4dda927 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,6 +3,7 @@
  * Wireless configuration interface internals.
  *
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2018-2019 Intel Corporation
  */
 #ifndef __NET_WIRELESS_CORE_H
 #define __NET_WIRELESS_CORE_H
@@ -170,12 +171,23 @@
 static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss)
 {
 	atomic_inc(&bss->hold);
+	if (bss->pub.transmitted_bss) {
+		bss = container_of(bss->pub.transmitted_bss,
+				   struct cfg80211_internal_bss, pub);
+		atomic_inc(&bss->hold);
+	}
 }
 
 static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss)
 {
 	int r = atomic_dec_return(&bss->hold);
 	WARN_ON(r < 0);
+	if (bss->pub.transmitted_bss) {
+		bss = container_of(bss->pub.transmitted_bss,
+				   struct cfg80211_internal_bss, pub);
+		r = atomic_dec_return(&bss->hold);
+		WARN_ON(r < 0);
+	}
 }
 
 
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
new file mode 100644
index 0000000..baf2426
--- /dev/null
+++ b/net/wireless/genregdb.awk
@@ -0,0 +1,158 @@
+#!/usr/bin/awk -f
+#
+# genregdb.awk -- generate regdb.c from db.txt
+#
+# Actually, it reads from stdin (presumed to be db.txt) and writes
+# to stdout (presumed to be regdb.c), but close enough...
+#
+# Copyright 2009 John W. Linville <linville@tuxdriver.com>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+BEGIN {
+	active = 0
+	rules = 0;
+	print "/*"
+	print " * DO NOT EDIT -- file generated from data in db.txt"
+	print " */"
+	print ""
+	print "#include <linux/nl80211.h>"
+	print "#include <net/cfg80211.h>"
+	print "#include \"regdb.h\""
+	print ""
+	regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n"
+}
+
+function parse_country_head() {
+	country=$2
+	sub(/:/, "", country)
+	printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
+	printf "\t.alpha2 = \"%s\",\n", country
+	if ($NF ~ /DFS-ETSI/)
+		printf "\t.dfs_region = NL80211_DFS_ETSI,\n"
+	else if ($NF ~ /DFS-FCC/)
+		printf "\t.dfs_region = NL80211_DFS_FCC,\n"
+	else if ($NF ~ /DFS-JP/)
+		printf "\t.dfs_region = NL80211_DFS_JP,\n"
+	printf "\t.reg_rules = {\n"
+	active = 1
+	regdb = regdb "\t&regdom_" country ",\n"
+}
+
+function parse_reg_rule()
+{
+	flag_starts_at = 7
+
+	start = $1
+	sub(/\(/, "", start)
+	end = $3
+	bw = $5
+	sub(/\),/, "", bw)
+	gain = 0
+	power = $6
+	# power might be in mW...
+	units = $7
+	dfs_cac = 0
+
+	sub(/\(/, "", power)
+	sub(/\),/, "", power)
+	sub(/\),/, "", units)
+	sub(/\)/, "", units)
+
+	if (units == "mW") {
+		flag_starts_at = 8
+		power = 10 * log(power)/log(10)
+		if ($8 ~ /[[:digit:]]/) {
+			flag_starts_at = 9
+			dfs_cac = $8
+		}
+	} else {
+		if ($7 ~ /[[:digit:]]/) {
+			flag_starts_at = 8
+			dfs_cac = $7
+		}
+	}
+	sub(/\(/, "", dfs_cac)
+	sub(/\),/, "", dfs_cac)
+	flagstr = ""
+	for (i=flag_starts_at; i<=NF; i++)
+		flagstr = flagstr $i
+	split(flagstr, flagarray, ",")
+	flags = ""
+	for (arg in flagarray) {
+		if (flagarray[arg] == "NO-OFDM") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_OFDM | "
+		} else if (flagarray[arg] == "NO-CCK") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_CCK | "
+		} else if (flagarray[arg] == "NO-INDOOR") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_INDOOR | "
+		} else if (flagarray[arg] == "NO-OUTDOOR") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_OUTDOOR | "
+		} else if (flagarray[arg] == "DFS") {
+			flags = flags "\n\t\t\tNL80211_RRF_DFS | "
+		} else if (flagarray[arg] == "PTP-ONLY") {
+			flags = flags "\n\t\t\tNL80211_RRF_PTP_ONLY | "
+		} else if (flagarray[arg] == "PTMP-ONLY") {
+			flags = flags "\n\t\t\tNL80211_RRF_PTMP_ONLY | "
+		} else if (flagarray[arg] == "PASSIVE-SCAN") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_IR | "
+		} else if (flagarray[arg] == "NO-IBSS") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_IR | "
+		} else if (flagarray[arg] == "NO-IR") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_IR | "
+		} else if (flagarray[arg] == "AUTO-BW") {
+			flags = flags "\n\t\t\tNL80211_RRF_AUTO_BW | "
+		}
+
+	}
+	flags = flags "0"
+	printf "\t\tREG_RULE_EXT(%d, %d, %d, %d, %.0f, %d, %s),\n", start, end, bw, gain, power, dfs_cac, flags
+	rules++
+}
+
+function print_tail_country()
+{
+	active = 0
+	printf "\t},\n"
+	printf "\t.n_reg_rules = %d\n", rules
+	printf "};\n\n"
+	rules = 0;
+}
+
+/^[ \t]*#/ {
+	# Ignore
+}
+
+!active && /^[ \t]*$/ {
+	# Ignore
+}
+
+!active && /country/ {
+	parse_country_head()
+}
+
+active && /^[ \t]*\(/ {
+	parse_reg_rule()
+}
+
+active && /^[ \t]*$/ {
+	print_tail_country()
+}
+
+END {
+	if (active)
+		print_tail_country()
+	print regdb "};"
+	print ""
+	print "int reg_regdb_size = ARRAY_SIZE(reg_regdb);"
+}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index cf91be3..ff400d0 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -8997,8 +8997,10 @@
 	if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
 		int r = validate_pae_over_nl80211(rdev, info);
 
-		if (r < 0)
+		if (r < 0) {
+			kzfree(connkeys);
 			return r;
+		}
 
 		ibss.control_port_over_nl80211 = true;
 	}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 97c0e6b..1c2d500 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -61,6 +61,7 @@
 #include "core.h"
 #include "reg.h"
 #include "rdev-ops.h"
+#include "regdb.h"
 #include "nl80211.h"
 
 /*
@@ -501,6 +502,38 @@
 	return 0;
 }
 
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+static int reg_query_builtin(const char *alpha2)
+{
+	const struct ieee80211_regdomain *regdom = NULL;
+	unsigned int i;
+
+	for (i = 0; i < reg_regdb_size; i++) {
+		if (alpha2_equal(alpha2, reg_regdb[i]->alpha2)) {
+			regdom = reg_copy_regd(reg_regdb[i]);
+			break;
+		}
+	}
+	if (!regdom)
+		return -ENODATA;
+
+	return reg_schedule_apply(regdom);
+}
+
+/* Feel free to add any other sanity checks here */
+static void reg_regdb_size_check(void)
+{
+	/* We should ideally BUILD_BUG_ON() but then random builds would fail */
+	WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
+}
+#else
+static inline void reg_regdb_size_check(void) {}
+static inline int reg_query_builtin(const char *alpha2)
+{
+	return -ENODATA;
+}
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
 #ifdef CONFIG_CFG80211_CRDA_SUPPORT
 /* Max number of consecutive attempts to communicate with CRDA  */
 #define REG_MAX_CRDA_TIMEOUTS 10
@@ -1097,6 +1130,10 @@
 
 static bool reg_query_database(struct regulatory_request *request)
 {
+	/* query internal regulatory database (if it exists) */
+	if (reg_query_builtin(request->alpha2) == 0)
+		return true;
+
 	if (query_regdb_file(request->alpha2) == 0)
 		return true;
 
@@ -1254,7 +1291,7 @@
  * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
  * however it is safe for now to assume that a frequency rule should not be
  * part of a frequency's band if the start freq or end freq are off by more
- * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
+ * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
  * 60 GHz band.
  * This resolution can be lowered and should be considered as we add
  * regulatory rule support for other "bands".
@@ -1269,7 +1306,7 @@
 	 * with the Channel starting frequency above 45 GHz.
 	 */
 	u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
-			10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+			20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
 	if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
 		return true;
 	if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
@@ -3880,6 +3917,8 @@
 	spin_lock_init(&reg_pending_beacons_lock);
 	spin_lock_init(&reg_indoor_lock);
 
+	reg_regdb_size_check();
+
 	rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
 
 	user_alpha2[0] = '9';
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
new file mode 100644
index 0000000..3279cfc
--- /dev/null
+++ b/net/wireless/regdb.h
@@ -0,0 +1,23 @@
+#ifndef __REGDB_H__
+#define __REGDB_H__
+
+/*
+ * Copyright 2009 John W. Linville <linville@tuxdriver.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+extern const struct ieee80211_regdomain *reg_regdb[];
+extern int reg_regdb_size;
+
+#endif /* __REGDB_H__ */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 3391c14..4529d74 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,6 +5,7 @@
  * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2016	Intel Deutschland GmbH
+ * Copyright (C) 2018-2019 Intel Corporation
  */
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -109,6 +110,12 @@
 				   pub);
 		bss->refcount++;
 	}
+	if (bss->pub.transmitted_bss) {
+		bss = container_of(bss->pub.transmitted_bss,
+				   struct cfg80211_internal_bss,
+				   pub);
+		bss->refcount++;
+	}
 }
 
 static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
@@ -125,6 +132,18 @@
 		if (hbss->refcount == 0)
 			bss_free(hbss);
 	}
+
+	if (bss->pub.transmitted_bss) {
+		struct cfg80211_internal_bss *tbss;
+
+		tbss = container_of(bss->pub.transmitted_bss,
+				    struct cfg80211_internal_bss,
+				    pub);
+		tbss->refcount--;
+		if (tbss->refcount == 0)
+			bss_free(tbss);
+	}
+
 	bss->refcount--;
 	if (bss->refcount == 0)
 		bss_free(bss);
@@ -150,6 +169,7 @@
 	}
 
 	list_del_init(&bss->list);
+	list_del_init(&bss->pub.nontrans_list);
 	rb_erase(&bss->rbn, &rdev->bss_tree);
 	rdev->bss_entries--;
 	WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
@@ -159,6 +179,156 @@
 	return true;
 }
 
+static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
+				  const u8 *subelement, size_t subie_len,
+				  u8 *new_ie, gfp_t gfp)
+{
+	u8 *pos, *tmp;
+	const u8 *tmp_old, *tmp_new;
+	u8 *sub_copy;
+
+	/* copy subelement as we need to change its content to
+	 * mark an ie after it is processed.
+	 */
+	sub_copy = kmalloc(subie_len, gfp);
+	if (!sub_copy)
+		return 0;
+	memcpy(sub_copy, subelement, subie_len);
+
+	pos = &new_ie[0];
+
+	/* set new ssid */
+	tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len);
+	if (tmp_new) {
+		memcpy(pos, tmp_new, tmp_new[1] + 2);
+		pos += (tmp_new[1] + 2);
+	}
+
+	/* go through IEs in ie (skip SSID) and subelement,
+	 * merge them into new_ie
+	 */
+	tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+	tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie;
+
+	while (tmp_old + tmp_old[1] + 2 - ie <= ielen) {
+		if (tmp_old[0] == 0) {
+			tmp_old++;
+			continue;
+		}
+
+		tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy, subie_len);
+		if (!tmp) {
+			/* ie in old ie but not in subelement */
+			if (tmp_old[0] != WLAN_EID_MULTIPLE_BSSID) {
+				memcpy(pos, tmp_old, tmp_old[1] + 2);
+				pos += tmp_old[1] + 2;
+			}
+		} else {
+			/* ie in transmitting ie also in subelement,
+			 * copy from subelement and flag the ie in subelement
+			 * as copied (by setting eid field to 0xff). For
+			 * vendor ie, compare OUI + type + subType to
+			 * determine if they are the same ie.
+			 */
+			if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) {
+				if (!memcmp(tmp_old + 2, tmp + 2, 5)) {
+					/* same vendor ie, copy from
+					 * subelement
+					 */
+					memcpy(pos, tmp, tmp[1] + 2);
+					pos += tmp[1] + 2;
+					tmp[0] = 0xff;
+				} else {
+					memcpy(pos, tmp_old, tmp_old[1] + 2);
+					pos += tmp_old[1] + 2;
+				}
+			} else {
+				/* copy ie from subelement into new ie */
+				memcpy(pos, tmp, tmp[1] + 2);
+				pos += tmp[1] + 2;
+				tmp[0] = 0xff;
+			}
+		}
+
+		if (tmp_old + tmp_old[1] + 2 - ie == ielen)
+			break;
+
+		tmp_old += tmp_old[1] + 2;
+	}
+
+	/* go through subelement again to check if there is any ie not
+	 * copied to new ie, skip ssid, capability, bssid-index ie
+	 */
+	tmp_new = sub_copy;
+	while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) {
+		if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP ||
+		      tmp_new[0] == WLAN_EID_SSID ||
+		      tmp_new[0] == WLAN_EID_MULTI_BSSID_IDX ||
+		      tmp_new[0] == 0xff)) {
+			memcpy(pos, tmp_new, tmp_new[1] + 2);
+			pos += tmp_new[1] + 2;
+		}
+		if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len)
+			break;
+		tmp_new += tmp_new[1] + 2;
+	}
+
+	kfree(sub_copy);
+	return pos - new_ie;
+}
+
+static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
+		   const u8 *ssid, size_t ssid_len)
+{
+	const struct cfg80211_bss_ies *ies;
+	const u8 *ssidie;
+
+	if (bssid && !ether_addr_equal(a->bssid, bssid))
+		return false;
+
+	if (!ssid)
+		return true;
+
+	ies = rcu_access_pointer(a->ies);
+	if (!ies)
+		return false;
+	ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+	if (!ssidie)
+		return false;
+	if (ssidie[1] != ssid_len)
+		return false;
+	return memcmp(ssidie + 2, ssid, ssid_len) == 0;
+}
+
+static int
+cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss,
+			   struct cfg80211_bss *nontrans_bss)
+{
+	const u8 *ssid;
+	size_t ssid_len;
+	struct cfg80211_bss *bss = NULL;
+
+	rcu_read_lock();
+	ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
+	if (!ssid) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	ssid_len = ssid[1];
+	ssid = ssid + 2;
+	rcu_read_unlock();
+
+	/* check if nontrans_bss is in the list */
+	list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
+		if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len))
+			return 0;
+	}
+
+	/* add to the list */
+	list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
+	return 0;
+}
+
 static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
 				  unsigned long expire_time)
 {
@@ -480,73 +650,43 @@
 	__cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
 }
 
-const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
-				 const u8 *match, int match_len,
-				 int match_offset)
+const struct element *
+cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len,
+			 const u8 *match, unsigned int match_len,
+			 unsigned int match_offset)
 {
-	/* match_offset can't be smaller than 2, unless match_len is
-	 * zero, in which case match_offset must be zero as well.
-	 */
-	if (WARN_ON((match_len && match_offset < 2) ||
-		    (!match_len && match_offset)))
-		return NULL;
+	const struct element *elem;
 
-	while (len >= 2 && len >= ies[1] + 2) {
-		if ((ies[0] == eid) &&
-		    (ies[1] + 2 >= match_offset + match_len) &&
-		    !memcmp(ies + match_offset, match, match_len))
-			return ies;
-
-		len -= ies[1] + 2;
-		ies += ies[1] + 2;
+	for_each_element_id(elem, eid, ies, len) {
+		if (elem->datalen >= match_offset + match_len &&
+		    !memcmp(elem->data + match_offset, match, match_len))
+			return elem;
 	}
 
 	return NULL;
 }
-EXPORT_SYMBOL(cfg80211_find_ie_match);
+EXPORT_SYMBOL(cfg80211_find_elem_match);
 
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
-				  const u8 *ies, int len)
+const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type,
+						const u8 *ies,
+						unsigned int len)
 {
-	const u8 *ie;
+	const struct element *elem;
 	u8 match[] = { oui >> 16, oui >> 8, oui, oui_type };
 	int match_len = (oui_type < 0) ? 3 : sizeof(match);
 
 	if (WARN_ON(oui_type > 0xff))
 		return NULL;
 
-	ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, ies, len,
-				    match, match_len, 2);
+	elem = cfg80211_find_elem_match(WLAN_EID_VENDOR_SPECIFIC, ies, len,
+					match, match_len, 0);
 
-	if (ie && (ie[1] < 4))
+	if (!elem || elem->datalen < 4)
 		return NULL;
 
-	return ie;
+	return elem;
 }
-EXPORT_SYMBOL(cfg80211_find_vendor_ie);
-
-static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
-		   const u8 *ssid, size_t ssid_len)
-{
-	const struct cfg80211_bss_ies *ies;
-	const u8 *ssidie;
-
-	if (bssid && !ether_addr_equal(a->bssid, bssid))
-		return false;
-
-	if (!ssid)
-		return true;
-
-	ies = rcu_access_pointer(a->ies);
-	if (!ies)
-		return false;
-	ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
-	if (!ssidie)
-		return false;
-	if (ssidie[1] != ssid_len)
-		return false;
-	return memcmp(ssidie + 2, ssid, ssid_len) == 0;
-}
+EXPORT_SYMBOL(cfg80211_find_vendor_elem);
 
 /**
  * enum bss_compare_mode - BSS compare mode
@@ -882,6 +1022,12 @@
 	return true;
 }
 
+struct cfg80211_non_tx_bss {
+	struct cfg80211_bss *tx_bss;
+	u8 max_bssid_indicator;
+	u8 bssid_index;
+};
+
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_internal_bss *
 cfg80211_bss_update(struct cfg80211_registered_device *rdev,
@@ -985,6 +1131,8 @@
 		memcpy(found->pub.chain_signal, tmp->pub.chain_signal,
 		       IEEE80211_MAX_CHAINS);
 		ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
+		found->pub.max_bssid_indicator = tmp->pub.max_bssid_indicator;
+		found->pub.bssid_index = tmp->pub.bssid_index;
 	} else {
 		struct cfg80211_internal_bss *new;
 		struct cfg80211_internal_bss *hidden;
@@ -1009,6 +1157,7 @@
 		memcpy(new, tmp, sizeof(*new));
 		new->refcount = 1;
 		INIT_LIST_HEAD(&new->hidden_list);
+		INIT_LIST_HEAD(&new->pub.nontrans_list);
 
 		if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
 			hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN);
@@ -1042,6 +1191,17 @@
 			goto drop;
 		}
 
+		/* This must be before the call to bss_ref_get */
+		if (tmp->pub.transmitted_bss) {
+			struct cfg80211_internal_bss *pbss =
+				container_of(tmp->pub.transmitted_bss,
+					     struct cfg80211_internal_bss,
+					     pub);
+
+			new->pub.transmitted_bss = tmp->pub.transmitted_bss;
+			bss_ref_get(rdev, pbss);
+		}
+
 		list_add_tail(&new->list, &rdev->bss_list);
 		rdev->bss_entries++;
 		rb_insert_bss(rdev, new);
@@ -1130,14 +1290,16 @@
 }
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
-struct cfg80211_bss *
-cfg80211_inform_bss_data(struct wiphy *wiphy,
-			 struct cfg80211_inform_bss *data,
-			 enum cfg80211_bss_frame_type ftype,
-			 const u8 *bssid, u64 tsf, u16 capability,
-			 u16 beacon_interval, const u8 *ie, size_t ielen,
-			 gfp_t gfp)
+static struct cfg80211_bss *
+cfg80211_inform_single_bss_data(struct wiphy *wiphy,
+				struct cfg80211_inform_bss *data,
+				enum cfg80211_bss_frame_type ftype,
+				const u8 *bssid, u64 tsf, u16 capability,
+				u16 beacon_interval, const u8 *ie, size_t ielen,
+				struct cfg80211_non_tx_bss *non_tx_data,
+				gfp_t gfp)
 {
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 	struct cfg80211_bss_ies *ies;
 	struct ieee80211_channel *channel;
 	struct cfg80211_internal_bss tmp = {}, *res;
@@ -1163,6 +1325,11 @@
 	tmp.pub.beacon_interval = beacon_interval;
 	tmp.pub.capability = capability;
 	tmp.ts_boottime = data->boottime_ns;
+	if (non_tx_data) {
+		tmp.pub.transmitted_bss = non_tx_data->tx_bss;
+		tmp.pub.bssid_index = non_tx_data->bssid_index;
+		tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator;
+	}
 
 	/*
 	 * If we do not know here whether the IEs are from a Beacon or Probe
@@ -1209,19 +1376,246 @@
 			regulatory_hint_found_beacon(wiphy, channel, gfp);
 	}
 
+	if (non_tx_data && non_tx_data->tx_bss) {
+		/* this is a nontransmitting bss, we need to add it to
+		 * transmitting bss' list if it is not there
+		 */
+		if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
+					       &res->pub)) {
+			if (__cfg80211_unlink_bss(rdev, res))
+				rdev->bss_generation++;
+		}
+	}
+
 	trace_cfg80211_return_bss(&res->pub);
 	/* cfg80211_bss_update gives us a referenced result */
 	return &res->pub;
 }
+
+static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
+				       struct cfg80211_inform_bss *data,
+				       enum cfg80211_bss_frame_type ftype,
+				       const u8 *bssid, u64 tsf,
+				       u16 beacon_interval, const u8 *ie,
+				       size_t ielen,
+				       struct cfg80211_non_tx_bss *non_tx_data,
+				       gfp_t gfp)
+{
+	const u8 *mbssid_index_ie;
+	const struct element *elem, *sub;
+	size_t new_ie_len;
+	u8 new_bssid[ETH_ALEN];
+	u8 *new_ie;
+	u16 capability;
+	struct cfg80211_bss *bss;
+
+	if (!non_tx_data)
+		return;
+	if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+		return;
+	if (!wiphy->support_mbssid)
+		return;
+	if (wiphy->support_only_he_mbssid &&
+	    !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+		return;
+
+	new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
+	if (!new_ie)
+		return;
+
+	for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) {
+		if (elem->datalen < 4)
+			continue;
+		for_each_element(sub, elem->data + 1, elem->datalen - 1) {
+			if (sub->id != 0 || sub->datalen < 4) {
+				/* not a valid BSS profile */
+				continue;
+			}
+
+			if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+			    sub->data[1] != 2) {
+				/* The first element within the Nontransmitted
+				 * BSSID Profile is not the Nontransmitted
+				 * BSSID Capability element.
+				 */
+				continue;
+			}
+
+			/* found a Nontransmitted BSSID Profile */
+			mbssid_index_ie = cfg80211_find_ie
+				(WLAN_EID_MULTI_BSSID_IDX,
+				 sub->data, sub->datalen);
+			if (!mbssid_index_ie || mbssid_index_ie[1] < 1 ||
+			    mbssid_index_ie[2] == 0) {
+				/* No valid Multiple BSSID-Index element */
+				continue;
+			}
+
+			non_tx_data->bssid_index = mbssid_index_ie[2];
+			non_tx_data->max_bssid_indicator = elem->data[0];
+
+			cfg80211_gen_new_bssid(bssid,
+					       non_tx_data->max_bssid_indicator,
+					       non_tx_data->bssid_index,
+					       new_bssid);
+			memset(new_ie, 0, IEEE80211_MAX_DATA_LEN);
+			new_ie_len = cfg80211_gen_new_ie(ie, ielen, sub->data,
+							 sub->datalen, new_ie,
+							 gfp);
+			if (!new_ie_len)
+				continue;
+
+			capability = get_unaligned_le16(sub->data + 2);
+			bss = cfg80211_inform_single_bss_data(wiphy, data,
+							      ftype,
+							      new_bssid, tsf,
+							      capability,
+							      beacon_interval,
+							      new_ie,
+							      new_ie_len,
+							      non_tx_data,
+							      gfp);
+			if (!bss)
+				break;
+			cfg80211_put_bss(wiphy, bss);
+		}
+	}
+
+	kfree(new_ie);
+}
+
+struct cfg80211_bss *
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+			 struct cfg80211_inform_bss *data,
+			 enum cfg80211_bss_frame_type ftype,
+			 const u8 *bssid, u64 tsf, u16 capability,
+			 u16 beacon_interval, const u8 *ie, size_t ielen,
+			 gfp_t gfp)
+{
+	struct cfg80211_bss *res;
+	struct cfg80211_non_tx_bss non_tx_data;
+
+	res = cfg80211_inform_single_bss_data(wiphy, data, ftype, bssid, tsf,
+					      capability, beacon_interval, ie,
+					      ielen, NULL, gfp);
+	non_tx_data.tx_bss = res;
+	cfg80211_parse_mbssid_data(wiphy, data, ftype, bssid, tsf,
+				   beacon_interval, ie, ielen, &non_tx_data,
+				   gfp);
+	return res;
+}
 EXPORT_SYMBOL(cfg80211_inform_bss_data);
 
-/* cfg80211_inform_bss_width_frame helper */
-struct cfg80211_bss *
-cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
-			       struct cfg80211_inform_bss *data,
-			       struct ieee80211_mgmt *mgmt, size_t len,
-			       gfp_t gfp)
+static void
+cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy,
+				 struct cfg80211_inform_bss *data,
+				 struct ieee80211_mgmt *mgmt, size_t len,
+				 struct cfg80211_non_tx_bss *non_tx_data,
+				 gfp_t gfp)
+{
+	enum cfg80211_bss_frame_type ftype;
+	const u8 *ie = mgmt->u.probe_resp.variable;
+	size_t ielen = len - offsetof(struct ieee80211_mgmt,
+				      u.probe_resp.variable);
 
+	ftype = ieee80211_is_beacon(mgmt->frame_control) ?
+		CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
+
+	cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid,
+				   le64_to_cpu(mgmt->u.probe_resp.timestamp),
+				   le16_to_cpu(mgmt->u.probe_resp.beacon_int),
+				   ie, ielen, non_tx_data, gfp);
+}
+
+static void
+cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
+				   struct cfg80211_bss *nontrans_bss,
+				   struct ieee80211_mgmt *mgmt, size_t len,
+				   gfp_t gfp)
+{
+	u8 *ie, *new_ie, *pos;
+	const u8 *nontrans_ssid, *trans_ssid, *mbssid;
+	size_t ielen = len - offsetof(struct ieee80211_mgmt,
+				      u.probe_resp.variable);
+	size_t new_ie_len;
+	struct cfg80211_bss_ies *new_ies;
+	const struct cfg80211_bss_ies *old;
+	u8 cpy_len;
+
+	ie = mgmt->u.probe_resp.variable;
+
+	new_ie_len = ielen;
+	trans_ssid = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+	if (!trans_ssid)
+		return;
+	new_ie_len -= trans_ssid[1];
+	mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
+	if (!mbssid)
+		return;
+	new_ie_len -= mbssid[1];
+	rcu_read_lock();
+	nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
+	if (!nontrans_ssid) {
+		rcu_read_unlock();
+		return;
+	}
+	new_ie_len += nontrans_ssid[1];
+	rcu_read_unlock();
+
+	/* generate new ie for nontrans BSS
+	 * 1. replace SSID with nontrans BSS' SSID
+	 * 2. skip MBSSID IE
+	 */
+	new_ie = kzalloc(new_ie_len, gfp);
+	if (!new_ie)
+		return;
+	new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, gfp);
+	if (!new_ies) {
+		kfree(new_ie);
+		return;
+	}
+
+	pos = new_ie;
+
+	/* copy the nontransmitted SSID */
+	cpy_len = nontrans_ssid[1] + 2;
+	memcpy(pos, nontrans_ssid, cpy_len);
+	pos += cpy_len;
+	/* copy the IEs between SSID and MBSSID */
+	cpy_len = trans_ssid[1] + 2;
+	memcpy(pos, (trans_ssid + cpy_len), (mbssid - (trans_ssid + cpy_len)));
+	pos += (mbssid - (trans_ssid + cpy_len));
+	/* copy the IEs after MBSSID */
+	cpy_len = mbssid[1] + 2;
+	memcpy(pos, mbssid + cpy_len, ((ie + ielen) - (mbssid + cpy_len)));
+
+	/* update ie */
+	new_ies->len = new_ie_len;
+	new_ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
+	new_ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control);
+	memcpy(new_ies->data, new_ie, new_ie_len);
+	if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+		old = rcu_access_pointer(nontrans_bss->proberesp_ies);
+		rcu_assign_pointer(nontrans_bss->proberesp_ies, new_ies);
+		rcu_assign_pointer(nontrans_bss->ies, new_ies);
+		if (old)
+			kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+	} else {
+		old = rcu_access_pointer(nontrans_bss->beacon_ies);
+		rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies);
+		rcu_assign_pointer(nontrans_bss->ies, new_ies);
+		if (old)
+			kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+	}
+}
+
+/* cfg80211_inform_bss_width_frame helper */
+static struct cfg80211_bss *
+cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
+				      struct cfg80211_inform_bss *data,
+				      struct ieee80211_mgmt *mgmt, size_t len,
+				      struct cfg80211_non_tx_bss *non_tx_data,
+				      gfp_t gfp)
 {
 	struct cfg80211_internal_bss tmp = {}, *res;
 	struct cfg80211_bss_ies *ies;
@@ -1279,6 +1673,11 @@
 	tmp.pub.chains = data->chains;
 	memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
 	ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
+	if (non_tx_data) {
+		tmp.pub.transmitted_bss = non_tx_data->tx_bss;
+		tmp.pub.bssid_index = non_tx_data->bssid_index;
+		tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator;
+	}
 
 	signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
 		wiphy->max_adj_channel_rssi_comp;
@@ -1300,6 +1699,53 @@
 	/* cfg80211_bss_update gives us a referenced result */
 	return &res->pub;
 }
+
+struct cfg80211_bss *
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+			       struct cfg80211_inform_bss *data,
+			       struct ieee80211_mgmt *mgmt, size_t len,
+			       gfp_t gfp)
+{
+	struct cfg80211_bss *res, *tmp_bss;
+	const u8 *ie = mgmt->u.probe_resp.variable;
+	const struct cfg80211_bss_ies *ies1, *ies2;
+	size_t ielen = len - offsetof(struct ieee80211_mgmt,
+				      u.probe_resp.variable);
+	struct cfg80211_non_tx_bss non_tx_data;
+
+	res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
+						    len, NULL, gfp);
+	if (!res || !wiphy->support_mbssid ||
+	    !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+		return res;
+	if (wiphy->support_only_he_mbssid &&
+	    !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+		return res;
+
+	non_tx_data.tx_bss = res;
+	/* process each non-transmitting bss */
+	cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
+					 &non_tx_data, gfp);
+
+	/* check if the res has other nontransmitting bss which is not
+	 * in MBSSID IE
+	 */
+	ies1 = rcu_access_pointer(res->ies);
+
+	/* go through nontrans_list, if the timestamp of the BSS is
+	 * earlier than the timestamp of the transmitting BSS then
+	 * update it
+	 */
+	list_for_each_entry(tmp_bss, &res->nontrans_list,
+			    nontrans_list) {
+		ies2 = rcu_access_pointer(tmp_bss->ies);
+		if (ies2->tsf < ies1->tsf)
+			cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
+							   mgmt, len, gfp);
+	}
+
+	return res;
+}
 EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
 
 void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
@@ -1337,7 +1783,8 @@
 void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
-	struct cfg80211_internal_bss *bss;
+	struct cfg80211_internal_bss *bss, *tmp1;
+	struct cfg80211_bss *nontrans_bss, *tmp;
 
 	if (WARN_ON(!pub))
 		return;
@@ -1345,10 +1792,21 @@
 	bss = container_of(pub, struct cfg80211_internal_bss, pub);
 
 	spin_lock_bh(&rdev->bss_lock);
-	if (!list_empty(&bss->list)) {
-		if (__cfg80211_unlink_bss(rdev, bss))
+	if (list_empty(&bss->list))
+		goto out;
+
+	list_for_each_entry_safe(nontrans_bss, tmp,
+				 &pub->nontrans_list,
+				 nontrans_list) {
+		tmp1 = container_of(nontrans_bss,
+				    struct cfg80211_internal_bss, pub);
+		if (__cfg80211_unlink_bss(rdev, tmp1))
 			rdev->bss_generation++;
 	}
+
+	if (__cfg80211_unlink_bss(rdev, bss))
+		rdev->bss_generation++;
+out:
 	spin_unlock_bh(&rdev->bss_lock);
 }
 EXPORT_SYMBOL(cfg80211_unlink_bss);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index aad1c8e..2821b3e 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -773,7 +773,7 @@
 }
 EXPORT_SYMBOL(cfg80211_classify8021d);
 
-const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
+const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id)
 {
 	const struct cfg80211_bss_ies *ies;
 
@@ -781,9 +781,9 @@
 	if (!ies)
 		return NULL;
 
-	return cfg80211_find_ie(ie, ies->data, ies->len);
+	return cfg80211_find_elem(id, ies->data, ies->len);
 }
-EXPORT_SYMBOL(ieee80211_bss_get_ie);
+EXPORT_SYMBOL(ieee80211_bss_get_elem);
 
 void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
 {
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d49aa79..f7f53f9 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -352,17 +352,15 @@
 	unsigned int lci = 1;
 	struct sock *sk;
 
-	read_lock_bh(&x25_list_lock);
-
-	while ((sk = __x25_find_socket(lci, nb)) != NULL) {
+	while ((sk = x25_find_socket(lci, nb)) != NULL) {
 		sock_put(sk);
 		if (++lci == 4096) {
 			lci = 0;
 			break;
 		}
+		cond_resched();
 	}
 
-	read_unlock_bh(&x25_list_lock);
 	return lci;
 }
 
@@ -681,8 +679,7 @@
 	struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
 	int len, i, rc = 0;
 
-	if (!sock_flag(sk, SOCK_ZAPPED) ||
-	    addr_len != sizeof(struct sockaddr_x25) ||
+	if (addr_len != sizeof(struct sockaddr_x25) ||
 	    addr->sx25_family != AF_X25) {
 		rc = -EINVAL;
 		goto out;
@@ -697,9 +694,13 @@
 	}
 
 	lock_sock(sk);
-	x25_sk(sk)->source_addr = addr->sx25_addr;
-	x25_insert_socket(sk);
-	sock_reset_flag(sk, SOCK_ZAPPED);
+	if (sock_flag(sk, SOCK_ZAPPED)) {
+		x25_sk(sk)->source_addr = addr->sx25_addr;
+		x25_insert_socket(sk);
+		sock_reset_flag(sk, SOCK_ZAPPED);
+	} else {
+		rc = -EINVAL;
+	}
 	release_sock(sk);
 	SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
 out:
@@ -815,8 +816,13 @@
 	sock->state = SS_CONNECTED;
 	rc = 0;
 out_put_neigh:
-	if (rc)
+	if (rc) {
+		read_lock_bh(&x25_list_lock);
 		x25_neigh_put(x25->neighbour);
+		x25->neighbour = NULL;
+		read_unlock_bh(&x25_list_lock);
+		x25->state = X25_STATE_0;
+	}
 out_put_route:
 	x25_route_put(rt);
 out:
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index be3520e..790b514 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -346,6 +346,12 @@
 
 		skb->sp->xvec[skb->sp->len++] = x;
 
+		skb_dst_force(skb);
+		if (!skb_dst(skb)) {
+			XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+			goto drop;
+		}
+
 lock:
 		spin_lock(&x->lock);
 
@@ -385,7 +391,6 @@
 		XFRM_SKB_CB(skb)->seq.input.low = seq;
 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
 
-		skb_dst_force(skb);
 		dev_hold(skb->dev);
 
 		if (crypto_done)
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 261995d3..6d20fbc 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -102,6 +102,7 @@
 		skb_dst_force(skb);
 		if (!skb_dst(skb)) {
 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+			err = -EHOSTUNREACH;
 			goto error_nolock;
 		}
 
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 119a427..6ea8036 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1628,7 +1628,10 @@
 		dst_copy_metrics(dst1, dst);
 
 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
-			__u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
+			__u32 mark = 0;
+
+			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
+				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
 
 			family = xfrm[i]->props.family;
 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b669262..cc0203e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -426,6 +426,12 @@
 	module_put(mode->owner);
 }
 
+void xfrm_state_free(struct xfrm_state *x)
+{
+	kmem_cache_free(xfrm_state_cache, x);
+}
+EXPORT_SYMBOL(xfrm_state_free);
+
 static void xfrm_state_gc_destroy(struct xfrm_state *x)
 {
 	tasklet_hrtimer_cancel(&x->mtimer);
@@ -452,7 +458,7 @@
 	}
 	xfrm_dev_state_free(x);
 	security_xfrm_state_free(x);
-	kmem_cache_free(xfrm_state_cache, x);
+	xfrm_state_free(x);
 }
 
 static void xfrm_state_gc_task(struct work_struct *work)
@@ -788,7 +794,7 @@
 {
 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
 	si->sadcnt = net->xfrm.state_num;
-	si->sadhcnt = net->xfrm.state_hmask;
+	si->sadhcnt = net->xfrm.state_hmask + 1;
 	si->sadhmcnt = xfrm_state_hashmax;
 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index df7ca2d..ab55782 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1488,10 +1488,15 @@
 		if (!ut[i].family)
 			ut[i].family = family;
 
-		if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
-		    (ut[i].family != prev_family))
-			return -EINVAL;
-
+		switch (ut[i].mode) {
+		case XFRM_MODE_TUNNEL:
+		case XFRM_MODE_BEET:
+			break;
+		default:
+			if (ut[i].family != prev_family)
+				return -EINVAL;
+			break;
+		}
 		if (ut[i].mode >= XFRM_MODE_MAX)
 			return -EINVAL;
 
@@ -2288,13 +2293,13 @@
 
 	}
 
-	kfree(x);
+	xfrm_state_free(x);
 	kfree(xp);
 
 	return 0;
 
 free_state:
-	kfree(x);
+	xfrm_state_free(x);
 nomem:
 	return err;
 }
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 904e775..cf40a82 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -55,6 +55,23 @@
 	return 0;
 }
 
+static int write_kprobe_events(const char *val)
+{
+	int fd, ret, flags;
+
+	if ((val != NULL) && (val[0] == '\0'))
+		flags = O_WRONLY | O_TRUNC;
+	else
+		flags = O_WRONLY | O_APPEND;
+
+	fd = open("/sys/kernel/debug/tracing/kprobe_events", flags);
+
+	ret = write(fd, val, strlen(val));
+	close(fd);
+
+	return ret;
+}
+
 static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
 {
 	bool is_socket = strncmp(event, "socket", 6) == 0;
@@ -166,10 +183,9 @@
 
 #ifdef __x86_64__
 		if (strncmp(event, "sys_", 4) == 0) {
-			snprintf(buf, sizeof(buf),
-				 "echo '%c:__x64_%s __x64_%s' >> /sys/kernel/debug/tracing/kprobe_events",
-				 is_kprobe ? 'p' : 'r', event, event);
-			err = system(buf);
+			snprintf(buf, sizeof(buf), "%c:__x64_%s __x64_%s",
+				is_kprobe ? 'p' : 'r', event, event);
+			err = write_kprobe_events(buf);
 			if (err >= 0) {
 				need_normal_check = false;
 				event_prefix = "__x64_";
@@ -177,10 +193,9 @@
 		}
 #endif
 		if (need_normal_check) {
-			snprintf(buf, sizeof(buf),
-				 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
-				 is_kprobe ? 'p' : 'r', event, event);
-			err = system(buf);
+			snprintf(buf, sizeof(buf), "%c:%s %s",
+				is_kprobe ? 'p' : 'r', event, event);
+			err = write_kprobe_events(buf);
 			if (err < 0) {
 				printf("failed to create kprobe '%s' error '%s'\n",
 				       event, strerror(errno));
@@ -520,7 +535,7 @@
 		return 1;
 
 	/* clear all kprobes */
-	i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
+	i = write_kprobe_events("");
 
 	/* scan over all elf sections to get license and map info */
 	for (i = 1; i < ehdr.e_shnum; i++) {
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
index 49b1355..e8f1bd6 100644
--- a/samples/livepatch/livepatch-shadow-fix1.c
+++ b/samples/livepatch/livepatch-shadow-fix1.c
@@ -89,6 +89,11 @@
 	 * pointer to handle resource release.
 	 */
 	leak = kzalloc(sizeof(int), GFP_KERNEL);
+	if (!leak) {
+		kfree(d);
+		return NULL;
+	}
+
 	klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
 			 shadow_leak_ctor, leak);
 
diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c
index 4c54b25..4aa8a88 100644
--- a/samples/livepatch/livepatch-shadow-mod.c
+++ b/samples/livepatch/livepatch-shadow-mod.c
@@ -118,6 +118,10 @@
 
 	/* Oops, forgot to save leak! */
 	leak = kzalloc(sizeof(int), GFP_KERNEL);
+	if (!leak) {
+		kfree(d);
+		return NULL;
+	}
 
 	pr_info("%s: dummy @ %p, expires @ %lx\n",
 		__func__, d, d->jiffies_expire);
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
index 57d0d87..bb99889 100644
--- a/samples/mei/mei-amt-version.c
+++ b/samples/mei/mei-amt-version.c
@@ -117,7 +117,7 @@
 
 	me->verbose = verbose;
 
-	me->fd = open("/dev/mei", O_RDWR);
+	me->fd = open("/dev/mei0", O_RDWR);
 	if (me->fd == -1) {
 		mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
 		goto err;
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 8081b6c..34414c6 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -47,8 +47,8 @@
 	$xs	= "[0-9a-f ]";	# hex character or space
 	$funcre = qr/^$x* <(.*)>:$/;
 	if ($arch eq 'aarch64') {
-		#ffffffc0006325cc:       a9bb7bfd        stp     x29, x30, [sp,#-80]!
-		$re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
+		#ffffffc0006325cc:       a9bb7bfd        stp     x29, x30, [sp, #-80]!
+		$re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
 	} elsif ($arch eq 'arm') {
 		#c0008ffc:	e24dd064	sub	sp, sp, #100	; 0x64
 		$re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 64220e3..98a7d63 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -78,7 +78,7 @@
 	fi
 
 	# Strip out the base of the path
-	code=${code//$basepath/""}
+	code=${code//^$basepath/""}
 
 	# In the case of inlines, move everything to same line
 	code=${code//$'\n'/' '}
diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
index 086d272..0aebd75 100644
--- a/scripts/gdb/linux/proc.py
+++ b/scripts/gdb/linux/proc.py
@@ -41,7 +41,7 @@
 
     def invoke(self, arg, from_tty):
         # linux_banner should contain a newline
-        gdb.write(gdb.parse_and_eval("linux_banner").string())
+        gdb.write(gdb.parse_and_eval("(char *)linux_banner").string())
 
 LxVersion()
 
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index 25bd2b8..c2f577d 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -73,7 +73,7 @@
 {
 	fprintf(stderr,
 	        "%s:%d:warning: ignoring unsupported character '%c'\n",
-	        zconf_curname(), zconf_lineno(), chr);
+	        current_file->name, yylineno, chr);
 }
 %}
 
@@ -221,6 +221,8 @@
 	}
 	<<EOF>>	{
 		BEGIN(INITIAL);
+		yylval.string = text;
+		return T_WORD_QUOTE;
 	}
 }
 
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0d998c54..5a77efd 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1204,6 +1204,30 @@
 	return 1;
 }
 
+static inline int is_arm_mapping_symbol(const char *str)
+{
+	return str[0] == '$' && strchr("axtd", str[1])
+	       && (str[2] == '\0' || str[2] == '.');
+}
+
+/*
+ * If there's no name there, ignore it; likewise, ignore it if it's
+ * one of the magic symbols emitted used by current ARM tools.
+ *
+ * Otherwise if find_symbols_between() returns those symbols, they'll
+ * fail the whitelist tests and cause lots of false alarms ... fixable
+ * only by merging __exit and __init sections into __text, bloating
+ * the kernel (which is especially evil on embedded platforms).
+ */
+static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
+{
+	const char *name = elf->strtab + sym->st_name;
+
+	if (!name || !strlen(name))
+		return 0;
+	return !is_arm_mapping_symbol(name);
+}
+
 /**
  * Find symbol based on relocation record info.
  * In some cases the symbol supplied is a valid symbol so
@@ -1229,6 +1253,8 @@
 			continue;
 		if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
 			continue;
+		if (!is_valid_name(elf, sym))
+			continue;
 		if (sym->st_value == addr)
 			return sym;
 		/* Find a symbol nearby - addr are maybe negative */
@@ -1247,30 +1273,6 @@
 		return NULL;
 }
 
-static inline int is_arm_mapping_symbol(const char *str)
-{
-	return str[0] == '$' && strchr("axtd", str[1])
-	       && (str[2] == '\0' || str[2] == '.');
-}
-
-/*
- * If there's no name there, ignore it; likewise, ignore it if it's
- * one of the magic symbols emitted used by current ARM tools.
- *
- * Otherwise if find_symbols_between() returns those symbols, they'll
- * fail the whitelist tests and cause lots of false alarms ... fixable
- * only by merging __exit and __init sections into __text, bloating
- * the kernel (which is especially evil on embedded platforms).
- */
-static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
-{
-	const char *name = elf->strtab + sym->st_name;
-
-	if (!name || !strlen(name))
-		return 0;
-	return !is_arm_mapping_symbol(name);
-}
-
 /*
  * Find symbols before or equal addr and after addr - in the section sec.
  * If we find two symbols with equal offset prefer one with a valid name.
@@ -2157,7 +2159,7 @@
 /* Cannot check for assembler */
 static void add_retpoline(struct buffer *b)
 {
-	buf_printf(b, "\n#ifdef RETPOLINE\n");
+	buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
 	buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
 	buf_printf(b, "#endif\n");
 }
diff --git a/security/Kconfig b/security/Kconfig
index 6c379cd..f045dc7 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -6,6 +6,10 @@
 
 source security/keys/Kconfig
 
+if ARCH_QCOM
+source security/pfe/Kconfig
+endif
+
 config SECURITY_DMESG_RESTRICT
 	bool "Restrict unprivileged access to the kernel syslog"
 	default n
diff --git a/security/Makefile b/security/Makefile
index 4d2d378..47bffaa 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -10,6 +10,7 @@
 subdir-$(CONFIG_SECURITY_APPARMOR)	+= apparmor
 subdir-$(CONFIG_SECURITY_YAMA)		+= yama
 subdir-$(CONFIG_SECURITY_LOADPIN)	+= loadpin
+subdir-$(CONFIG_ARCH_QCOM)		+= pfe
 
 # always enable default capabilities
 obj-y					+= commoncap.o
@@ -26,6 +27,7 @@
 obj-$(CONFIG_SECURITY_YAMA)		+= yama/
 obj-$(CONFIG_SECURITY_LOADPIN)		+= loadpin/
 obj-$(CONFIG_CGROUP_DEVICE)		+= device_cgroup.o
+obj-$(CONFIG_ARCH_QCOM)			+= pfe/
 
 # Object integrity file lists
 subdir-$(CONFIG_INTEGRITY)		+= integrity
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 08c88de..11975ec 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -1444,7 +1444,10 @@
 			new = aa_label_merge(label, target, GFP_KERNEL);
 		if (IS_ERR_OR_NULL(new)) {
 			info = "failed to build target label";
-			error = PTR_ERR(new);
+			if (!new)
+				error = -ENOMEM;
+			else
+				error = PTR_ERR(new);
 			new = NULL;
 			perms.allow = 0;
 			goto audit;
diff --git a/security/keys/key.c b/security/keys/key.c
index d97c939..249a6da 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -265,8 +265,8 @@
 
 		spin_lock(&user->lock);
 		if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
-			if (user->qnkeys + 1 >= maxkeys ||
-			    user->qnbytes + quotalen >= maxbytes ||
+			if (user->qnkeys + 1 > maxkeys ||
+			    user->qnbytes + quotalen > maxbytes ||
 			    user->qnbytes + quotalen < user->qnbytes)
 				goto no_quota;
 		}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 41bcf57..99a5514 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -661,9 +661,6 @@
 	BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
 	       (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
 
-	if (ctx->index_key.description)
-		ctx->index_key.desc_len = strlen(ctx->index_key.description);
-
 	/* Check to see if this top-level keyring is what we are looking for
 	 * and whether it is valid or not.
 	 */
@@ -914,6 +911,7 @@
 	struct keyring_search_context ctx = {
 		.index_key.type		= type,
 		.index_key.description	= description,
+		.index_key.desc_len	= strlen(description),
 		.cred			= current_cred(),
 		.match_data.cmp		= key_default_cmp,
 		.match_data.raw_data	= description,
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 5af2934..d38be9d 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -166,8 +166,7 @@
 	int rc;
 
 	struct keyring_search_context ctx = {
-		.index_key.type		= key->type,
-		.index_key.description	= key->description,
+		.index_key		= key->index_key,
 		.cred			= m->file->f_cred,
 		.match_data.cmp		= lookup_user_key_possessed,
 		.match_data.raw_data	= key,
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 114f740..7385536 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -545,6 +545,7 @@
 	struct keyring_search_context ctx = {
 		.index_key.type		= type,
 		.index_key.description	= description,
+		.index_key.desc_len	= strlen(description),
 		.cred			= current_cred(),
 		.match_data.cmp		= key_default_cmp,
 		.match_data.raw_data	= description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 424e1d9..6797843 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -246,7 +246,7 @@
 	struct key *authkey;
 	key_ref_t authkey_ref;
 
-	sprintf(description, "%x", target_id);
+	ctx.index_key.desc_len = sprintf(description, "%x", target_id);
 
 	authkey_ref = search_process_keyrings(&ctx);
 
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index f840010..33028c0 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -321,6 +321,7 @@
 		if (a->u.net->sk) {
 			struct sock *sk = a->u.net->sk;
 			struct unix_sock *u;
+			struct unix_address *addr;
 			int len = 0;
 			char *p = NULL;
 
@@ -351,14 +352,15 @@
 #endif
 			case AF_UNIX:
 				u = unix_sk(sk);
+				addr = smp_load_acquire(&u->addr);
+				if (!addr)
+					break;
 				if (u->path.dentry) {
 					audit_log_d_path(ab, " path=", &u->path);
 					break;
 				}
-				if (!u->addr)
-					break;
-				len = u->addr->len-sizeof(short);
-				p = &u->addr->name->sun_path[0];
+				len = addr->len-sizeof(short);
+				p = &addr->name->sun_path[0];
 				audit_log_format(ab, " path=");
 				if (*p)
 					audit_log_untrustedstring(ab, p);
diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig
new file mode 100644
index 0000000..47c8a03
--- /dev/null
+++ b/security/pfe/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
+	depends on ARCH_QCOM
+
+config PFT
+	bool "Per-File-Tagger driver"
+	depends on SECURITY
+	default n
+	help
+		This driver is used for tagging enterprise files.
+		It is part of the Per-File-Encryption (PFE) feature.
+		The driver is tagging files when created by
+		registered application.
+		Tagged files are encrypted using the dm-req-crypt driver.
+
+config PFK
+	bool "Per-File-Key driver"
+	depends on SECURITY
+	depends on SECURITY_SELINUX
+	default n
+	help
+		This driver is used for storing eCryptfs information
+		in file node.
+		This is part of eCryptfs hardware enhanced solution
+		provided by Qualcomm Technologies, Inc.
+		Information is used when file is encrypted later using
+		ICE or dm crypto engine
+
+config PFK_WRAPPED_KEY_SUPPORTED
+	bool "Per-File-Key driver with wrapped key support"
+	depends on SECURITY
+	depends on SECURITY_SELINUX
+	depends on QSEECOM
+	depends on PFK
+	default n
+	help
+		Adds wrapped key support in PFK driver. Instead of setting
+		the key directly in ICE, it unwraps the key and sets the key
+		in ICE.
+		It ensures the key is protected within a secure environment
+		and only the wrapped key is present in the kernel.
+endmenu
diff --git a/security/pfe/Makefile b/security/pfe/Makefile
new file mode 100644
index 0000000..5758772
--- /dev/null
+++ b/security/pfe/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
+ccflags-y += -Ifs/crypto
+ccflags-y += -Idrivers/misc
+
+obj-$(CONFIG_PFT) += pft.o
+obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o pfk_f2fs.o
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
new file mode 100644
index 0000000..218b283
--- /dev/null
+++ b/security/pfe/pfk.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Per-File-Key (PFK).
+ *
+ * This driver is responsible for overall management of various
+ * Per File Encryption variants that work on top of or as part of different
+ * file systems.
+ *
+ * The driver has the following purpose :
+ * 1) Define priorities between PFE's if more than one is enabled
+ * 2) Extract key information from inode
+ * 3) Load and manage various keys in ICE HW engine
+ * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
+ *    that need to take decision on HW encryption management of the data
+ *    Some examples:
+ *	BLOCK LAYER: when it takes decision on whether 2 chunks can be united
+ *	to one encryption / decryption request sent to the HW
+ *
+ *	UFS DRIVER: when it need to configure ICE HW with a particular key slot
+ *	to be used for encryption / decryption
+ *
+ * PFE variants can differ on particular way of storing the cryptographic info
+ * inside inode, actions to be taken upon file operations, etc., but the common
+ * properties are described above
+ *
+ */
+
+#define pr_fmt(fmt)	"pfk [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/bio.h>
+#include <linux/security.h>
+#include <crypto/algapi.h>
+#include <crypto/ice.h>
+
+#include <linux/pfk.h>
+
+#include "pfk_kc.h"
+#include "objsec.h"
+#include "pfk_ice.h"
+#include "pfk_ext4.h"
+#include "pfk_f2fs.h"
+#include "pfk_internal.h"
+
+static bool pfk_ready;
+
+
+/* might be replaced by a table when more than one cipher is supported */
+#define PFK_SUPPORTED_KEY_SIZE 32
+#define PFK_SUPPORTED_SALT_SIZE 32
+
+/* Various PFE types and function tables to support each one of them */
+enum pfe_type {EXT4_CRYPT_PFE, F2FS_CRYPT_PFE, INVALID_PFE};
+
+typedef int (*pfk_parse_inode_type)(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe);
+
+typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
+	&pfk_ext4_parse_inode, /* EXT4_CRYPT_PFE */
+	&pfk_f2fs_parse_inode, /* F2FS_CRYPT_PFE */
+};
+
+static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
+	&pfk_ext4_allow_merge_bio, /* EXT4_CRYPT_PFE */
+	&pfk_f2fs_allow_merge_bio, /* F2FS_CRYPT_PFE */
+};
+
+static void __exit pfk_exit(void)
+{
+	pfk_ready = false;
+	pfk_ext4_deinit();
+	pfk_f2fs_deinit();
+	pfk_kc_deinit();
+}
+
+static int __init pfk_init(void)
+{
+	int ret = 0;
+
+	ret = pfk_ext4_init();
+	if (ret != 0)
+		goto fail;
+
+	ret = pfk_f2fs_init();
+	if (ret != 0)
+		goto fail;
+
+	ret = pfk_kc_init();
+	if (ret != 0) {
+		pr_err("could init pfk key cache, error %d\n", ret);
+		pfk_ext4_deinit();
+		pfk_f2fs_deinit();
+		goto fail;
+	}
+
+	pfk_ready = true;
+	pr_debug("Driver initialized successfully\n");
+
+	return 0;
+
+fail:
+	pr_err("Failed to init driver\n");
+	return -ENODEV;
+}
+
+/*
+ * If more than one type is supported simultaneously, this function will also
+ * set the priority between them
+ */
+static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
+{
+	if (!inode)
+		return INVALID_PFE;
+
+	if (pfk_is_ext4_type(inode))
+		return EXT4_CRYPT_PFE;
+
+	if (pfk_is_f2fs_type(inode))
+		return F2FS_CRYPT_PFE;
+
+	return INVALID_PFE;
+}
+
+/**
+ * inode_to_filename() - get the filename from inode pointer.
+ * @inode: inode pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+char *inode_to_filename(const struct inode *inode)
+{
+	struct dentry *dentry = NULL;
+	char *filename = NULL;
+
+	if (!inode)
+		return "NULL";
+
+	if (hlist_empty(&inode->i_dentry))
+		return "unknown";
+
+	dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+	filename = dentry->d_iname;
+
+	return filename;
+}
+
+/**
+ * pfk_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_is_ready(void)
+{
+	return pfk_ready;
+}
+
+/**
+ * pfk_bio_get_inode() - get the inode from a bio.
+ * @bio: Pointer to BIO structure.
+ *
+ * Walk the bio struct links to get the inode.
+ * Please note, that in general bio may consist of several pages from
+ * several files, but in our case we always assume that all pages come
+ * from the same file, since our logic ensures it. That is why we only
+ * walk through the first page to look for inode.
+ *
+ * Return: pointer to the inode struct if successful, or NULL otherwise.
+ *
+ */
+static struct inode *pfk_bio_get_inode(const struct bio *bio)
+{
+	if (!bio)
+		return NULL;
+	if (!bio_has_data((struct bio *)bio))
+		return NULL;
+	if (!bio->bi_io_vec)
+		return NULL;
+	if (!bio->bi_io_vec->bv_page)
+		return NULL;
+
+	if (PageAnon(bio->bi_io_vec->bv_page)) {
+		struct inode *inode;
+
+		/* Using direct-io (O_DIRECT) without page cache */
+		inode = dio_bio_get_inode((struct bio *)bio);
+		pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
+
+		return inode;
+	}
+
+	if (!page_mapping(bio->bi_io_vec->bv_page))
+		return NULL;
+
+	return page_mapping(bio->bi_io_vec->bv_page)->host;
+}
+
+/**
+ * pfk_key_size_to_key_type() - translate key size to key size enum
+ * @key_size: key size in bytes
+ * @key_size_type: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported key size)
+ */
+int pfk_key_size_to_key_type(size_t key_size,
+	enum ice_crpto_key_size *key_size_type)
+{
+	/*
+	 *  currently only 32 bit key size is supported
+	 *  in the future, table with supported key sizes might
+	 *  be introduced
+	 */
+
+	if (key_size != PFK_SUPPORTED_KEY_SIZE) {
+		pr_err("not supported key size %zu\n", key_size);
+		return -EINVAL;
+	}
+
+	if (key_size_type)
+		*key_size_type = ICE_CRYPTO_KEY_SIZE_256;
+
+	return 0;
+}
+
+/*
+ * Retrieves filesystem type from inode's superblock
+ */
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+	const char *fs_type)
+{
+	if (!inode || !fs_type)
+		return false;
+
+	if (!inode->i_sb)
+		return false;
+
+	if (!inode->i_sb->s_type)
+		return false;
+
+	return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+
+/**
+ * pfk_get_key_for_bio() - get the encryption key to be used for a bio
+ *
+ * @bio: pointer to the BIO
+ * @key_info: pointer to the key information which will be filled in
+ * @algo_mode: optional pointer to the algorithm identifier which will be set
+ * @is_pfe: will be set to false if the BIO should be left unencrypted
+ *
+ * Return: 0 if a key is being used, otherwise a -errno value
+ */
+static int pfk_get_key_for_bio(const struct bio *bio,
+		struct pfk_key_info *key_info,
+		enum ice_cryto_algo_mode *algo_mode,
+		bool *is_pfe, unsigned int *data_unit)
+{
+	const struct inode *inode;
+	enum pfe_type which_pfe;
+	const struct blk_encryption_key *key;
+	char *s_type = NULL;
+
+	inode = pfk_bio_get_inode(bio);
+	which_pfe = pfk_get_pfe_type(inode);
+	s_type = (char *)pfk_kc_get_storage_type();
+
+	/*
+	 * Update dun based on storage type.
+	 * 512 byte dun - For ext4 emmc
+	 * 4K dun - For ext4 ufs, f2fs ufs and f2fs emmc
+	 */
+
+	if (data_unit) {
+		if (!bio_dun(bio) && !memcmp(s_type, "sdcc", strlen("sdcc")))
+			*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
+		else
+			*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB;
+	}
+
+	if (which_pfe != INVALID_PFE) {
+		/* Encrypted file; override ->bi_crypt_key */
+		pr_debug("parsing inode %lu with PFE type %d\n",
+			 inode->i_ino, which_pfe);
+		return (*(pfk_parse_inode_ftable[which_pfe]))
+				(bio, inode, key_info, algo_mode, is_pfe);
+	}
+
+	/*
+	 * bio is not for an encrypted file.  Use ->bi_crypt_key if it was set.
+	 * Otherwise, don't encrypt/decrypt the bio.
+	 */
+	key = bio->bi_crypt_key;
+	if (!key) {
+		*is_pfe = false;
+		return -EINVAL;
+	}
+
+	/* Note: the "salt" is really just the second half of the XTS key. */
+	BUILD_BUG_ON(sizeof(key->raw) !=
+		     PFK_SUPPORTED_KEY_SIZE + PFK_SUPPORTED_SALT_SIZE);
+	key_info->key = &key->raw[0];
+	key_info->key_size = PFK_SUPPORTED_KEY_SIZE;
+	key_info->salt = &key->raw[PFK_SUPPORTED_KEY_SIZE];
+	key_info->salt_size = PFK_SUPPORTED_SALT_SIZE;
+	if (algo_mode)
+		*algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+	return 0;
+}
+
+/**
+ * pfk_load_key_start() - loads PFE encryption key to the ICE
+ *			  Can also be invoked from non
+ *			  PFE context, in this case it
+ *			  is not relevant and is_pfe
+ *			  flag is set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @ice_setting: Pointer to ice setting structure that will be filled with
+ * ice configuration values, including the index to which the key was loaded
+ *  @is_pfe: will be false if inode is not relevant to PFE, in such a case
+ * it should be treated as non PFE by the block layer
+ *
+ * Returns the index where the key is stored in encryption hw and additional
+ * information that will be used later for configuration of the encryption hw.
+ *
+ * Must be followed by pfk_load_key_end when key is no longer used by ice
+ *
+ */
+int pfk_load_key_start(const struct bio *bio,
+		struct ice_crypto_setting *ice_setting, bool *is_pfe,
+		bool async)
+{
+	int ret = 0;
+	struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+	enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+	enum ice_crpto_key_size key_size_type = 0;
+	unsigned int data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
+	u32 key_index = 0;
+
+	if (!is_pfe) {
+		pr_err("is_pfe is NULL\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_is_ready())
+		return -ENODEV;
+
+	if (!ice_setting) {
+		pr_err("ice setting is NULL\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe,
+					&data_unit);
+
+	if (ret != 0)
+		return ret;
+
+	ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
+	if (ret != 0)
+		return ret;
+
+	ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
+			key_info.salt, key_info.salt_size, &key_index, async,
+			data_unit);
+	if (ret) {
+		if (ret != -EBUSY && ret != -EAGAIN)
+			pr_err("start: could not load key into pfk key cache, error %d\n",
+					ret);
+
+		return ret;
+	}
+
+	ice_setting->key_size = key_size_type;
+	ice_setting->algo_mode = algo_mode;
+	/* hardcoded for now */
+	ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+	ice_setting->key_index = key_index;
+
+	pr_debug("loaded key for file %s key_index %d\n",
+		inode_to_filename(pfk_bio_get_inode(bio)), key_index);
+
+	return 0;
+}
+
+/**
+ * pfk_load_key_end() - marks the PFE key as no longer used by ICE
+ *			Can also be invoked from non
+ *			PFE context, in this case it is not
+ *			relevant and is_pfe flag is
+ *			set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
+ *			from PFE context
+ */
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	int ret = 0;
+	struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+
+	if (!is_pfe) {
+		pr_err("is_pfe is NULL\n");
+		return -EINVAL;
+	}
+
+	/* only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_is_ready())
+		return -ENODEV;
+
+	ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe, NULL);
+	if (ret != 0)
+		return ret;
+
+	pfk_kc_load_key_end(key_info.key, key_info.key_size,
+		key_info.salt, key_info.salt_size);
+
+	pr_debug("finished using key for file %s\n",
+		inode_to_filename(pfk_bio_get_inode(bio)));
+
+	return 0;
+}
+
+/**
+ * pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
+ * @bio1:	Pointer to first BIO structure.
+ * @bio2:	Pointer to second BIO structure.
+ *
+ * Prevent merging of BIOs from encrypted and non-encrypted
+ * files, or files encrypted with different key.
+ * Also prevent non encrypted and encrypted data from the same file
+ * to be merged (ecryptfs header if stored inside file should be non
+ * encrypted)
+ * This API is called by the file system block layer.
+ *
+ * Return: true if the BIOs allowed to be merged, false
+ * otherwise.
+ */
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
+{
+	const struct blk_encryption_key *key1;
+	const struct blk_encryption_key *key2;
+	const struct inode *inode1;
+	const struct inode *inode2;
+	enum pfe_type which_pfe1;
+	enum pfe_type which_pfe2;
+
+	if (!pfk_is_ready())
+		return false;
+
+	if (!bio1 || !bio2)
+		return false;
+
+	if (bio1 == bio2)
+		return true;
+
+	key1 = bio1->bi_crypt_key;
+	key2 = bio2->bi_crypt_key;
+
+	inode1 = pfk_bio_get_inode(bio1);
+	inode2 = pfk_bio_get_inode(bio2);
+
+	which_pfe1 = pfk_get_pfe_type(inode1);
+	which_pfe2 = pfk_get_pfe_type(inode2);
+
+	/*
+	 * If one bio is for an encrypted file and the other is for a different
+	 * type of encrypted file or for blocks that are not part of an
+	 * encrypted file, do not merge.
+	 */
+	if (which_pfe1 != which_pfe2)
+		return false;
+
+	if (which_pfe1 != INVALID_PFE) {
+		/* Both bios are for the same type of encrypted file. */
+		return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
+				inode1, inode2);
+	}
+
+	/*
+	 * Neither bio is for an encrypted file.  Merge only if the default keys
+	 * are the same (or both are NULL).
+	 */
+	return key1 == key2 ||
+		(key1 && key2 &&
+		 !crypto_memneq(key1->raw, key2->raw, sizeof(key1->raw)));
+}
+
+int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	int ret = -EINVAL;
+
+	if (!key || !salt)
+		return ret;
+
+	ret = pfk_kc_remove_key_with_salt(key, key_size, salt, salt_size);
+	if (ret)
+		pr_err("Clear key error: ret value %d\n", ret);
+	return ret;
+}
+
+/**
+ * Flush key table on storage core reset. During core reset key configuration
+ * is lost in ICE. We need to flash the cache, so that the keys will be
+ * reconfigured again for every subsequent transaction
+ */
+void pfk_clear_on_reset(void)
+{
+	if (!pfk_is_ready())
+		return;
+
+	pfk_kc_clear_on_reset();
+}
+
+module_init(pfk_init);
+module_exit(pfk_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key driver");
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
new file mode 100644
index 0000000..0ccd46b
--- /dev/null
+++ b/security/pfe/pfk_ext4.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Per-File-Key (PFK) - EXT4
+ *
+ * This driver is used for working with EXT4 crypt extension
+ *
+ * The key information  is stored in node by EXT4 when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+#define pr_fmt(fmt)	"pfk_ext4 [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_ext4.h"
+//#include "ext4_ice.h"
+
+static bool pfk_ext4_ready;
+
+/*
+ * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_ext4_deinit(void)
+{
+	pfk_ext4_ready = false;
+}
+
+/*
+ * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_ext4_init(void)
+{
+	pfk_ext4_ready = true;
+	pr_info("PFK EXT4 inited successfully\n");
+
+	return 0;
+}
+
+/**
+ * pfk_ecryptfs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_ext4_is_ready(void)
+{
+	return pfk_ext4_ready;
+}
+
+/**
+ * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_ext4_type(const struct inode *inode)
+{
+	if (!pfe_is_inode_filesystem_type(inode, "ext4"))
+		return false;
+
+	return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_ext4_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_ext4_parse_cipher(const struct inode *inode,
+	enum ice_cryto_algo_mode *algo)
+{
+	/*
+	 * currently only AES XTS algo is supported
+	 * in the future, table with supported ciphers might
+	 * be introduced
+	 */
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!fscrypt_is_aes_xts_cipher(inode)) {
+		pr_err("ext4 alghoritm is not supported by pfk\n");
+		return -EINVAL;
+	}
+
+	if (algo)
+		*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+	return 0;
+}
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe)
+{
+	int ret = 0;
+
+	if (!is_pfe)
+		return -EINVAL;
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_ext4_is_ready())
+		return -ENODEV;
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!key_info)
+		return -EINVAL;
+
+	key_info->key = fscrypt_get_ice_encryption_key(inode);
+	if (!key_info->key) {
+		pr_err("could not parse key from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+	if (!key_info->key_size) {
+		pr_err("could not parse key size from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+	if (!key_info->salt) {
+		pr_err("could not parse salt from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+	if (!key_info->salt_size) {
+		pr_err("could not parse salt size from ext4\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_ext4_parse_cipher(inode, algo);
+	if (ret != 0) {
+		pr_err("not supported cipher\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2)
+{
+	/* if there is no ext4 pfk, don't disallow merging blocks */
+	if (!pfk_ext4_is_ready())
+		return true;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	return fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+}
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
new file mode 100644
index 0000000..bca23f3
--- /dev/null
+++ b/security/pfe/pfk_ext4.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _PFK_EXT4_H_
+#define _PFK_EXT4_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_ext4_type(const struct inode *inode);
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe);
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+int __init pfk_ext4_init(void);
+
+void pfk_ext4_deinit(void);
+
+#endif /* _PFK_EXT4_H_ */
diff --git a/security/pfe/pfk_f2fs.c b/security/pfe/pfk_f2fs.c
new file mode 100644
index 0000000..5ea79ace
--- /dev/null
+++ b/security/pfe/pfk_f2fs.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Per-File-Key (PFK) - f2fs
+ *
+ * This driver is used for working with EXT4/F2FS crypt extension
+ *
+ * The key information  is stored in node by EXT4/F2FS when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+#define pr_fmt(fmt)	"pfk_f2fs [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_f2fs.h"
+
+static bool pfk_f2fs_ready;
+
+/*
+ * pfk_f2fs_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_f2fs_deinit(void)
+{
+	pfk_f2fs_ready = false;
+}
+
+/*
+ * pfk_f2fs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_f2fs_init(void)
+{
+	pfk_f2fs_ready = true;
+	pr_info("PFK F2FS inited successfully\n");
+
+	return 0;
+}
+
+/**
+ * pfk_f2fs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_f2fs_is_ready(void)
+{
+	return pfk_f2fs_ready;
+}
+
+/**
+ * pfk_is_f2fs_type() - return true if inode belongs to ICE F2FS PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_f2fs_type(const struct inode *inode)
+{
+	if (!pfe_is_inode_filesystem_type(inode, "f2fs"))
+		return false;
+
+	return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_f2fs_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_f2fs_parse_cipher(const struct inode *inode,
+		enum ice_cryto_algo_mode *algo)
+{
+	/*
+	 * currently only AES XTS algo is supported
+	 * in the future, table with supported ciphers might
+	 * be introduced
+	 */
+	if (!inode)
+		return -EINVAL;
+
+	if (!fscrypt_is_aes_xts_cipher(inode)) {
+		pr_err("f2fs alghoritm is not supported by pfk\n");
+		return -EINVAL;
+	}
+
+	if (algo)
+		*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+	return 0;
+}
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+		const struct inode *inode,
+		struct pfk_key_info *key_info,
+		enum ice_cryto_algo_mode *algo,
+		bool *is_pfe)
+{
+	int ret = 0;
+
+	if (!is_pfe)
+		return -EINVAL;
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_f2fs_is_ready())
+		return -ENODEV;
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!key_info)
+		return -EINVAL;
+
+	key_info->key = fscrypt_get_ice_encryption_key(inode);
+	if (!key_info->key) {
+		pr_err("could not parse key from f2fs\n");
+		return -EINVAL;
+	}
+
+	key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+	if (!key_info->key_size) {
+		pr_err("could not parse key size from f2fs\n");
+		return -EINVAL;
+	}
+
+	key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+	if (!key_info->salt) {
+		pr_err("could not parse salt from f2fs\n");
+		return -EINVAL;
+	}
+
+	key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+	if (!key_info->salt_size) {
+		pr_err("could not parse salt size from f2fs\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_f2fs_parse_cipher(inode, algo);
+	if (ret != 0) {
+		pr_err("not supported cipher\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+		const struct bio *bio2, const struct inode *inode1,
+		const struct inode *inode2)
+{
+	bool mergeable;
+
+	/* if there is no f2fs pfk, don't disallow merging blocks */
+	if (!pfk_f2fs_is_ready())
+		return true;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	mergeable = fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+	if (!mergeable)
+		return false;
+
+
+	/* ICE allows only consecutive iv_key stream. */
+	if (!bio_dun(bio1) && !bio_dun(bio2))
+		return true;
+	else if (!bio_dun(bio1) || !bio_dun(bio2))
+		return false;
+
+	return bio_end_dun(bio1) == bio_dun(bio2);
+}
diff --git a/security/pfe/pfk_f2fs.h b/security/pfe/pfk_f2fs.h
new file mode 100644
index 0000000..3c6f7ec
--- /dev/null
+++ b/security/pfe/pfk_f2fs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _PFK_F2FS_H_
+#define _PFK_F2FS_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_f2fs_type(const struct inode *inode);
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+		const struct inode *inode,
+		struct pfk_key_info *key_info,
+		enum ice_cryto_algo_mode *algo,
+		bool *is_pfe);
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+int __init pfk_f2fs_init(void);
+
+void pfk_f2fs_deinit(void);
+
+#endif /* _PFK_F2FS_H_ */
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
new file mode 100644
index 0000000..501ff67
--- /dev/null
+++ b/security/pfe/pfk_ice.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/async.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/device-mapper.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qseecomi.h>
+#include <crypto/ice.h>
+#include "pfk_ice.h"
+
+/**********************************/
+/** global definitions		 **/
+/**********************************/
+
+#define TZ_ES_INVALIDATE_ICE_KEY 0x3
+#define TZ_ES_CONFIG_SET_ICE_KEY 0x4
+
+/* index 0 and 1 is reserved for FDE */
+#define MIN_ICE_KEY_INDEX 2
+
+#define MAX_ICE_KEY_INDEX 31
+
+#define TZ_ES_CONFIG_SET_ICE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \
+	TZ_ES_CONFIG_SET_ICE_KEY)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_ID \
+		TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
+			TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1( \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define CONTEXT_SIZE 0x1000
+
+#define ICE_BUFFER_SIZE 64
+
+static uint8_t ice_buffer[ICE_BUFFER_SIZE];
+
+enum {
+	ICE_CIPHER_MODE_XTS_128 = 0,
+	ICE_CIPHER_MODE_CBC_128 = 1,
+	ICE_CIPHER_MODE_XTS_256 = 3,
+	ICE_CIPHER_MODE_CBC_256 = 4
+};
+
+static int set_key(uint32_t index, const uint8_t *key, const uint8_t *salt,
+		unsigned int data_unit)
+{
+	struct scm_desc desc = {0};
+	int ret = 0;
+	uint32_t smc_id = 0;
+	char *tzbuf = (char *)ice_buffer;
+	uint32_t size = ICE_BUFFER_SIZE / 2;
+
+	memset(tzbuf, 0, ICE_BUFFER_SIZE);
+
+	memcpy(ice_buffer, key, size);
+	memcpy(ice_buffer+size, salt, size);
+
+	dmac_flush_range(tzbuf, tzbuf + ICE_BUFFER_SIZE);
+
+	smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID;
+
+	desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID;
+	desc.args[0] = index;
+	desc.args[1] = virt_to_phys(tzbuf);
+	desc.args[2] = ICE_BUFFER_SIZE;
+	desc.args[3] = ICE_CIPHER_MODE_XTS_256;
+	desc.args[4] = data_unit;
+
+	ret = scm_call2_noretry(smc_id, &desc);
+	if (ret)
+		pr_err("%s:SCM call Error: 0x%x\n", __func__, ret);
+
+	return ret;
+}
+
+static int clear_key(uint32_t index)
+{
+	struct scm_desc desc = {0};
+	int ret = 0;
+	uint32_t smc_id = 0;
+
+	smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+
+	desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+	desc.args[0] = index;
+
+	ret = scm_call2_noretry(smc_id, &desc);
+	if (ret)
+		pr_err("%s:SCM call Error: 0x%x\n", __func__, ret);
+	return ret;
+}
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+			char *storage_type, unsigned int data_unit)
+{
+	int ret = 0, ret1 = 0;
+	char *s_type = storage_type;
+
+	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+		pr_err("%s Invalid index %d\n", __func__, index);
+		return -EINVAL;
+	}
+	if (!key || !salt) {
+		pr_err("%s Invalid key/salt\n", __func__);
+		return -EINVAL;
+	}
+
+	if (s_type == NULL) {
+		pr_err("%s Invalid Storage type\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
+	if (ret) {
+		pr_err("%s: could not enable clocks: %d\n", __func__, ret);
+		goto out;
+	}
+
+	ret = set_key(index, key, salt, data_unit);
+	if (ret) {
+		pr_err("%s: Set Key Error: %d\n", __func__, ret);
+		if (ret == -EBUSY) {
+			if (qcom_ice_setup_ice_hw((const char *)s_type, false))
+				pr_err("%s: clock disable failed\n", __func__);
+			goto out;
+		}
+		/* Try to invalidate the key to keep ICE in proper state */
+		ret1 = clear_key(index);
+		if (ret1)
+			pr_err("%s: Invalidate key error: %d\n", __func__, ret);
+	}
+
+	ret1 = qcom_ice_setup_ice_hw((const char *)s_type, false);
+	if (ret)
+		pr_err("%s: Error %d disabling clocks\n", __func__, ret);
+
+out:
+	return ret;
+}
+
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
+{
+	int ret = 0;
+
+	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+		pr_err("%s Invalid index %d\n", __func__, index);
+		return -EINVAL;
+	}
+
+	if (storage_type == NULL) {
+		pr_err("%s Invalid Storage type\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
+	if (ret) {
+		pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
+		return ret;
+	}
+
+	ret = clear_key(index);
+	if (ret)
+		pr_err("%s: Invalidate key error: %d\n", __func__, ret);
+
+	if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
+		pr_err("%s: could not disable clocks\n", __func__);
+
+	return ret;
+}
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
new file mode 100644
index 0000000..0331439
--- /dev/null
+++ b/security/pfe/pfk_ice.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef PFK_ICE_H_
+#define PFK_ICE_H_
+
+/*
+ * PFK ICE
+ *
+ * ICE keys configuration through scm calls.
+ *
+ */
+
+#include <linux/types.h>
+
+int pfk_ice_init(void);
+int pfk_ice_deinit(void);
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+			char *storage_type, unsigned int data_unit);
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
+
+#endif /* PFK_ICE_H_ */
diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h
new file mode 100644
index 0000000..7a800d3
--- /dev/null
+++ b/security/pfe/pfk_internal.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _PFK_INTERNAL_H_
+#define _PFK_INTERNAL_H_
+
+#include <linux/types.h>
+#include <crypto/ice.h>
+
+struct pfk_key_info {
+	const unsigned char *key;
+	const unsigned char *salt;
+	size_t key_size;
+	size_t salt_size;
+};
+
+int pfk_key_size_to_key_type(size_t key_size,
+	enum ice_crpto_key_size *key_size_type);
+
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+	const char *fs_type);
+
+char *inode_to_filename(const struct inode *inode);
+
+#endif /* _PFK_INTERNAL_H_ */
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
new file mode 100644
index 0000000..64c168d
--- /dev/null
+++ b/security/pfe/pfk_kc.c
@@ -0,0 +1,912 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * PFK Key Cache
+ *
+ * Key Cache used internally in PFK.
+ * The purpose of the cache is to save access time to QSEE when loading keys.
+ * Currently the cache is the same size as the total number of keys that can
+ * be loaded to ICE. Since this number is relatively small, the algorithms for
+ * cache eviction are simple, linear and based on last usage timestamp, i.e
+ * the node that will be evicted is the one with the oldest timestamp.
+ * Empty entries always have the oldest timestamp.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <crypto/ice.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched/signal.h>
+
+#include "pfk_kc.h"
+#include "pfk_ice.h"
+
+
+/** the first available index in ice engine */
+#define PFK_KC_STARTING_INDEX 2
+
+/** currently the only supported key and salt sizes */
+#define PFK_KC_KEY_SIZE 32
+#define PFK_KC_SALT_SIZE 32
+
+/** Table size */
+#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
+
+/** The maximum key and salt size */
+#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
+#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
+#define PFK_UFS "ufs"
+
+static DEFINE_SPINLOCK(kc_lock);
+static unsigned long flags;
+static bool kc_ready;
+static char *s_type = "sdcc";
+
+/**
+ * enum pfk_kc_entry_state - state of the entry inside kc table
+ *
+ * @FREE:		   entry is free
+ * @ACTIVE_ICE_PRELOAD:    entry is actively used by ICE engine
+			   and cannot be used by others. SCM call
+			   to load key to ICE is pending to be performed
+ * @ACTIVE_ICE_LOADED:     entry is actively used by ICE engine and
+			   cannot be used by others. SCM call to load the
+			   key to ICE was successfully executed and key is
+			   now loaded
+ * @INACTIVE_INVALIDATING: entry is being invalidated during file close
+			   and cannot be used by others until invalidation
+			   is complete
+ * @INACTIVE:		   entry's key is already loaded, but is not
+			   currently being used. It can be re-used for
+			   optimization and to avoid SCM call cost or
+			   it can be taken by another key if there are
+			   no FREE entries
+ * @SCM_ERROR:		   error occurred while scm call was performed to
+			   load the key to ICE
+ */
+enum pfk_kc_entry_state {
+	FREE,
+	ACTIVE_ICE_PRELOAD,
+	ACTIVE_ICE_LOADED,
+	INACTIVE_INVALIDATING,
+	INACTIVE,
+	SCM_ERROR
+};
+
+struct kc_entry {
+	unsigned char key[PFK_MAX_KEY_SIZE];
+	size_t key_size;
+
+	unsigned char salt[PFK_MAX_SALT_SIZE];
+	size_t salt_size;
+
+	u64 time_stamp;
+	u32 key_index;
+
+	struct task_struct *thread_pending;
+
+	enum pfk_kc_entry_state state;
+
+	/* ref count for the number of requests in the HW queue for this key */
+	int loaded_ref_cnt;
+	int scm_error;
+};
+
+static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
+
+/**
+ * kc_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the key cache is ready.
+ */
+static inline bool kc_is_ready(void)
+{
+	return kc_ready;
+}
+
+static inline void kc_spin_lock(void)
+{
+	spin_lock_irqsave(&kc_lock, flags);
+}
+
+static inline void kc_spin_unlock(void)
+{
+	spin_unlock_irqrestore(&kc_lock, flags);
+}
+
+/**
+ * pfk_kc_get_storage_type() - return the hardware storage type.
+ *
+ * Return: storage type queried during bootup.
+ */
+const char *pfk_kc_get_storage_type(void)
+{
+	return s_type;
+}
+
+/**
+ * kc_entry_is_available() - checks whether the entry is available
+ *
+ * Return true if it is , false otherwise or if invalid
+ * Should be invoked under spinlock
+ */
+static bool kc_entry_is_available(const struct kc_entry *entry)
+{
+	if (!entry)
+		return false;
+
+	return (entry->state == FREE || entry->state == INACTIVE);
+}
+
+/**
+ * kc_entry_wait_till_available() - waits till entry is available
+ *
+ * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
+ * by signal
+ *
+ * Should be invoked under spinlock
+ */
+static int kc_entry_wait_till_available(struct kc_entry *entry)
+{
+	int res = 0;
+
+	while (!kc_entry_is_available(entry)) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			res = -ERESTARTSYS;
+			break;
+		}
+		/* assuming only one thread can try to invalidate
+		 * the same entry
+		 */
+		entry->thread_pending = current;
+		kc_spin_unlock();
+		schedule();
+		kc_spin_lock();
+	}
+	set_current_state(TASK_RUNNING);
+
+	return res;
+}
+
+/**
+ * kc_entry_start_invalidating() - moves entry to state
+ *			           INACTIVE_INVALIDATING
+ *				   If entry is in use, waits till
+ *				   it gets available
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static int kc_entry_start_invalidating(struct kc_entry *entry)
+{
+	int res;
+
+	res = kc_entry_wait_till_available(entry);
+	if (res)
+		return res;
+
+	entry->state = INACTIVE_INVALIDATING;
+
+	return 0;
+}
+
+/**
+ * kc_entry_finish_invalidating() - moves entry to state FREE
+ *				    wakes up all the tasks waiting
+ *				    on it
+ *
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static void kc_entry_finish_invalidating(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	if (entry->state != INACTIVE_INVALIDATING)
+		return;
+
+	entry->state = FREE;
+}
+
+/**
+ * kc_min_entry() - compare two entries to find one with minimal time
+ * @a: ptr to the first entry. If NULL the other entry will be returned
+ * @b: pointer to the second entry
+ *
+ * Return the entry which timestamp is the minimal, or b if a is NULL
+ */
+static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
+		struct kc_entry *b)
+{
+	if (!a)
+		return b;
+
+	if (time_before64(b->time_stamp, a->time_stamp))
+		return b;
+
+	return a;
+}
+
+/**
+ * kc_entry_at_index() - return entry at specific index
+ * @index: index of entry to be accessed
+ *
+ * Return entry
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_entry_at_index(int index)
+{
+	return &(kc_table[index]);
+}
+
+/**
+ * kc_find_key_at_index() - find kc entry starting at specific index
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ * @sarting_index: index to start search with, if entry found, updated with
+ * index of that entry
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
+	size_t key_size, const unsigned char *salt, size_t salt_size,
+	int *starting_index)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+
+		if (salt != NULL) {
+			if (entry->salt_size != salt_size)
+				continue;
+
+			if (memcmp(entry->salt, salt, salt_size) != 0)
+				continue;
+		}
+
+		if (entry->key_size != key_size)
+			continue;
+
+		if (memcmp(entry->key, key, key_size) == 0) {
+			*starting_index = i;
+			return entry;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * kc_find_key() - find kc entry
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	int index = 0;
+
+	return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
+}
+
+/**
+ * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
+ * that is not locked
+ *
+ * Returns entry with minimal timestamp. Empty entries have timestamp
+ * of 0, therefore they are returned first.
+ * If all the entries are locked, will return NULL
+ * Should be invoked under spin lock
+ */
+static struct kc_entry *kc_find_oldest_entry_non_locked(void)
+{
+	struct kc_entry *curr_min_entry = NULL;
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+
+		if (entry->state == FREE)
+			return entry;
+
+		if (entry->state == INACTIVE)
+			curr_min_entry = kc_min_entry(curr_min_entry, entry);
+	}
+
+	return curr_min_entry;
+}
+
+/**
+ * kc_update_timestamp() - updates timestamp of entry to current
+ *
+ * @entry: entry to update
+ *
+ */
+static void kc_update_timestamp(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	entry->time_stamp = get_jiffies_64();
+}
+
+/**
+ * kc_clear_entry() - clear the key from entry and mark entry not in use
+ *
+ * @entry: pointer to entry
+ *
+ * Should be invoked under spinlock
+ */
+static void kc_clear_entry(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	memset(entry->key, 0, entry->key_size);
+	memset(entry->salt, 0, entry->salt_size);
+
+	entry->key_size = 0;
+	entry->salt_size = 0;
+
+	entry->time_stamp = 0;
+	entry->scm_error = 0;
+
+	entry->state = FREE;
+
+	entry->loaded_ref_cnt = 0;
+	entry->thread_pending = NULL;
+}
+
+/**
+ * kc_update_entry() - replaces the key in given entry and
+ *			loads the new key to ICE
+ *
+ * @entry: entry to replace key in
+ * @key: key
+ * @key_size: key_size
+ * @salt: salt
+ * @salt_size: salt_size
+ * @data_unit: dun size
+ *
+ * The previous key is securely released and wiped, the new one is loaded
+ * to ICE.
+ * Should be invoked under spinlock
+ * Caller to validate that key/salt_size matches the size in struct kc_entry
+ */
+static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
+	size_t key_size, const unsigned char *salt, size_t salt_size,
+	unsigned int data_unit)
+{
+	int ret;
+
+	kc_clear_entry(entry);
+
+	memcpy(entry->key, key, key_size);
+	entry->key_size = key_size;
+
+	memcpy(entry->salt, salt, salt_size);
+	entry->salt_size = salt_size;
+
+	/* Mark entry as no longer free before releasing the lock */
+	entry->state = ACTIVE_ICE_PRELOAD;
+	kc_spin_unlock();
+
+	ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
+			entry->salt, s_type, data_unit);
+
+	kc_spin_lock();
+	return ret;
+}
+
+/**
+ * pfk_kc_init() - init function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_init(void)
+{
+	int i = 0;
+	struct kc_entry *entry = NULL;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		entry->key_index = PFK_KC_STARTING_INDEX + i;
+	}
+	kc_ready = true;
+	kc_spin_unlock();
+
+	return 0;
+}
+
+/**
+ * pfk_kc_denit() - deinit function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_deinit(void)
+{
+	int res = pfk_kc_clear();
+
+	kc_ready = false;
+
+	return res;
+}
+
+/**
+ * pfk_kc_load_key_start() - retrieve the key from cache or add it if
+ * it's not there and return the ICE hw key index in @key_index.
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ * @key_index: the pointer to key_index where the output will be stored
+ * @async: whether scm calls are allowed in the caller context
+ *
+ * If key is present in cache, than the key_index will be retrieved from cache.
+ * If it is not present, the oldest entry from kc table will be evicted,
+ * the key will be loaded to ICE via QSEE to the index that is the evicted
+ * entry number and stored in cache.
+ * Entry that is going to be used is marked as being used, it will mark
+ * as not being used when ICE finishes using it and pfk_kc_load_key_end
+ * will be invoked.
+ * As QSEE calls can only be done from a non-atomic context, when @async flag
+ * is set to 'false', it specifies that it is ok to make the calls in the
+ * current context. Otherwise, when @async is set, the caller should retry the
+ * call again from a different context, and -EAGAIN error will be returned.
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size, u32 *key_index,
+		bool async, unsigned int data_unit)
+{
+	int ret = 0;
+	struct kc_entry *entry = NULL;
+	bool entry_exists = false;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key || !salt || !key_index) {
+		pr_err("%s key/salt/key_index NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (key_size != PFK_KC_KEY_SIZE) {
+		pr_err("unsupported key size %zu\n", key_size);
+		return -EINVAL;
+	}
+
+	if (salt_size != PFK_KC_SALT_SIZE) {
+		pr_err("unsupported salt size %zu\n", salt_size);
+		return -EINVAL;
+	}
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		if (async) {
+			pr_debug("%s task will populate entry\n", __func__);
+			kc_spin_unlock();
+			return -EAGAIN;
+		}
+
+		entry = kc_find_oldest_entry_non_locked();
+		if (!entry) {
+			/* could not find a single non locked entry,
+			 * return EBUSY to upper layers so that the
+			 * request will be rescheduled
+			 */
+			kc_spin_unlock();
+			return -EBUSY;
+		}
+	} else {
+		entry_exists = true;
+	}
+
+	pr_debug("entry with index %d is in state %d\n",
+		entry->key_index, entry->state);
+
+	switch (entry->state) {
+	case (INACTIVE):
+		if (entry_exists) {
+			kc_update_timestamp(entry);
+			entry->state = ACTIVE_ICE_LOADED;
+
+			if (!strcmp(s_type, (char *)PFK_UFS)) {
+				if (async)
+					entry->loaded_ref_cnt++;
+			} else {
+				entry->loaded_ref_cnt++;
+			}
+			break;
+		}
+	case (FREE):
+		ret = kc_update_entry(entry, key, key_size, salt, salt_size,
+					data_unit);
+		if (ret) {
+			entry->state = SCM_ERROR;
+			entry->scm_error = ret;
+			pr_err("%s: key load error (%d)\n", __func__, ret);
+		} else {
+			kc_update_timestamp(entry);
+			entry->state = ACTIVE_ICE_LOADED;
+
+			/*
+			 * In case of UFS only increase ref cnt for async calls,
+			 * sync calls from within work thread do not pass
+			 * requests further to HW
+			 */
+			if (!strcmp(s_type, (char *)PFK_UFS)) {
+				if (async)
+					entry->loaded_ref_cnt++;
+			} else {
+				entry->loaded_ref_cnt++;
+			}
+		}
+		break;
+	case (ACTIVE_ICE_PRELOAD):
+	case (INACTIVE_INVALIDATING):
+		ret = -EAGAIN;
+		break;
+	case (ACTIVE_ICE_LOADED):
+		kc_update_timestamp(entry);
+
+		if (!strcmp(s_type, (char *)PFK_UFS)) {
+			if (async)
+				entry->loaded_ref_cnt++;
+		} else {
+			entry->loaded_ref_cnt++;
+		}
+		break;
+	case(SCM_ERROR):
+		ret = entry->scm_error;
+		kc_clear_entry(entry);
+		entry->state = FREE;
+		break;
+	default:
+		pr_err("invalid state %d for entry with key index %d\n",
+			entry->state, entry->key_index);
+		ret = -EINVAL;
+	}
+
+	*key_index = entry->key_index;
+	kc_spin_unlock();
+
+	return ret;
+}
+
+/**
+ * pfk_kc_load_key_end() - finish the process of key loading that was started
+ *						   by pfk_kc_load_key_start
+ *						   by marking the entry as not
+ *						   being in use
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ *
+ */
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	struct kc_entry *entry = NULL;
+	struct task_struct *tmp_pending = NULL;
+	int ref_cnt = 0;
+
+	if (!kc_is_ready())
+		return;
+
+	if (!key || !salt)
+		return;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return;
+
+	if (salt_size != PFK_KC_SALT_SIZE)
+		return;
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		kc_spin_unlock();
+		pr_err("internal error, there should an entry to unlock\n");
+
+		return;
+	}
+	ref_cnt = --entry->loaded_ref_cnt;
+
+	if (ref_cnt < 0)
+		pr_err("internal error, ref count should never be negative\n");
+
+	if (!ref_cnt) {
+		entry->state = INACTIVE;
+		/*
+		 * wake-up invalidation if it's waiting
+		 * for the entry to be released
+		 */
+		if (entry->thread_pending) {
+			tmp_pending = entry->thread_pending;
+			entry->thread_pending = NULL;
+
+			kc_spin_unlock();
+			wake_up_process(tmp_pending);
+			return;
+		}
+	}
+
+	kc_spin_unlock();
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the key
+ * @salt_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also in case of non
+ * (existing key)
+ */
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	struct kc_entry *entry = NULL;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key)
+		return -EINVAL;
+
+	if (!salt)
+		return -EINVAL;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return -EINVAL;
+
+	if (salt_size != PFK_KC_SALT_SIZE)
+		return -EINVAL;
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		pr_debug("%s: key does not exist\n", __func__);
+		kc_spin_unlock();
+		return -EINVAL;
+	}
+
+	res = kc_entry_start_invalidating(entry);
+	if (res != 0) {
+		kc_spin_unlock();
+		return res;
+	}
+	kc_clear_entry(entry);
+
+	kc_spin_unlock();
+
+	qti_pfk_ice_invalidate_key(entry->key_index, s_type);
+
+	kc_spin_lock();
+	kc_entry_finish_invalidating(entry);
+	kc_spin_unlock();
+
+	return 0;
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * when no salt is available. Will only search key part, if there are several,
+ * all will be removed
+ *
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also for non-existing key)
+ */
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
+{
+	struct kc_entry *entry = NULL;
+	int index = 0;
+	int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
+	int temp_indexes_size = 0;
+	int i = 0;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key)
+		return -EINVAL;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return -EINVAL;
+
+	memset(temp_indexes, -1, sizeof(temp_indexes));
+
+	kc_spin_lock();
+
+	entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+	if (!entry) {
+		pr_err("%s: key does not exist\n", __func__);
+		kc_spin_unlock();
+		return -EINVAL;
+	}
+
+	res = kc_entry_start_invalidating(entry);
+	if (res != 0) {
+		kc_spin_unlock();
+		return res;
+	}
+
+	temp_indexes[temp_indexes_size++] = index;
+	kc_clear_entry(entry);
+
+	/* let's clean additional entries with the same key if there are any */
+	do {
+		index++;
+		entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+		if (!entry)
+			break;
+
+		res = kc_entry_start_invalidating(entry);
+		if (res != 0) {
+			kc_spin_unlock();
+			goto out;
+		}
+
+		temp_indexes[temp_indexes_size++] = index;
+
+		kc_clear_entry(entry);
+
+
+	} while (true);
+
+	kc_spin_unlock();
+
+	temp_indexes_size--;
+	for (i = temp_indexes_size; i >= 0 ; i--)
+		qti_pfk_ice_invalidate_key(
+			kc_entry_at_index(temp_indexes[i])->key_index,
+					s_type);
+
+	/* fall through */
+	res = 0;
+
+out:
+	kc_spin_lock();
+	for (i = temp_indexes_size; i >= 0 ; i--)
+		kc_entry_finish_invalidating(
+				kc_entry_at_index(temp_indexes[i]));
+	kc_spin_unlock();
+
+	return res;
+}
+
+/**
+ * pfk_kc_clear() - clear the table and remove all keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+int pfk_kc_clear(void)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		res = kc_entry_start_invalidating(entry);
+		if (res != 0) {
+			kc_spin_unlock();
+			goto out;
+		}
+		kc_clear_entry(entry);
+	}
+	kc_spin_unlock();
+
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+		qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
+					s_type);
+
+	/* fall through */
+	res = 0;
+out:
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+		kc_entry_finish_invalidating(kc_entry_at_index(i));
+	kc_spin_unlock();
+
+	return res;
+}
+
+/**
+ * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
+ * The assumption is that at this point we don't have any pending transactions
+ * Also, there is no need to clear keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+void pfk_kc_clear_on_reset(void)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	if (!kc_is_ready())
+		return;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		kc_clear_entry(entry);
+	}
+	kc_spin_unlock();
+}
+
+static int pfk_kc_find_storage_type(char **device)
+{
+	char boot[20] = {'\0'};
+	char *match = (char *)strnstr(saved_command_line,
+				"androidboot.bootdevice=",
+				strlen(saved_command_line));
+	if (match) {
+		memcpy(boot, (match + strlen("androidboot.bootdevice=")),
+			sizeof(boot) - 1);
+		if (strnstr(boot, PFK_UFS, strlen(boot)))
+			*device = PFK_UFS;
+
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int __init pfk_kc_pre_init(void)
+{
+	return pfk_kc_find_storage_type(&s_type);
+}
+
+static void __exit pfk_kc_exit(void)
+{
+	s_type = NULL;
+}
+
+module_init(pfk_kc_pre_init);
+module_exit(pfk_kc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key-KC driver");
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
new file mode 100644
index 0000000..30765bf
--- /dev/null
+++ b/security/pfe/pfk_kc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef PFK_KC_H_
+#define PFK_KC_H_
+
+#include <linux/types.h>
+
+int pfk_kc_init(void);
+int pfk_kc_deinit(void);
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size, u32 *key_index,
+		bool async, unsigned int data_unit);
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
+int pfk_kc_clear(void);
+void pfk_kc_clear_on_reset(void);
+const char *pfk_kc_get_storage_type(void);
+extern char *saved_command_line;
+
+
+#endif /* PFK_KC_H_ */
diff --git a/security/security.c b/security/security.c
index 957be34..81cebf2 100644
--- a/security/security.c
+++ b/security/security.c
@@ -623,6 +623,14 @@
 }
 EXPORT_SYMBOL_GPL(security_inode_create);
 
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+				umode_t mode)
+{
+	if (unlikely(IS_PRIVATE(dir)))
+		return 0;
+	return call_int_hook(inode_post_create, 0, dir, dentry, mode);
+}
+
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry)
 {
@@ -1004,6 +1012,13 @@
 
 void security_cred_free(struct cred *cred)
 {
+	/*
+	 * There is a failure case in prepare_creds() that
+	 * may result in a call here with ->security being NULL.
+	 */
+	if (unlikely(cred->security == NULL))
+		return;
+
 	call_void_hook(cred_free, cred);
 }
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index fe251c6..3c3878f 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2934,7 +2934,7 @@
 		return rc;
 
 	/* Allow all mounts performed by the kernel */
-	if (flags & MS_KERNMOUNT)
+	if (flags & (MS_KERNMOUNT | MS_SUBMOUNT))
 		return 0;
 
 	ad.type = LSM_AUDIT_DATA_DENTRY;
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index cc5e26b..4b0da5f 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -26,8 +26,7 @@
 #include <linux/in.h>
 #include <linux/spinlock.h>
 #include <net/net_namespace.h>
-#include "flask.h"
-#include "avc.h"
+#include "security.h"
 
 struct task_security_struct {
 	u32 osid;		/* SID prior to last execve */
@@ -64,6 +63,8 @@
 	u32 sid;		/* SID of this object */
 	u16 sclass;		/* security class of this object */
 	unsigned char initialized;	/* initialization flag */
+	u32 tag;		/* Per-File-Encryption tag */
+	void *pfk_data;		/* Per-File-Key data from ecryptfs */
 	spinlock_t lock;
 };
 
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 23e762d..ba68c43 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #include <linux/refcount.h>
 #include <linux/workqueue.h>
-#include "flask.h"
 
 #define SECSID_NULL			0x00000000 /* unspecified SID */
 #define SECSID_WILD			0xffffffff /* wildcard SID */
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index f4eadd3..d31a52e 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -732,7 +732,8 @@
 	kfree(key);
 	if (datum) {
 		levdatum = datum;
-		ebitmap_destroy(&levdatum->level->cat);
+		if (levdatum->level)
+			ebitmap_destroy(&levdatum->level->cat);
 		kfree(levdatum->level);
 	}
 	kfree(datum);
@@ -2108,6 +2109,7 @@
 {
 	int i, j, rc;
 	u32 nel, len;
+	__be64 prefixbuf[1];
 	__le32 buf[3];
 	struct ocontext *l, *c;
 	u32 nodebuf[8];
@@ -2217,21 +2219,30 @@
 					goto out;
 				break;
 			}
-			case OCON_IBPKEY:
-				rc = next_entry(nodebuf, fp, sizeof(u32) * 4);
+			case OCON_IBPKEY: {
+				u32 pkey_lo, pkey_hi;
+
+				rc = next_entry(prefixbuf, fp, sizeof(u64));
 				if (rc)
 					goto out;
 
-				c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf));
+				/* we need to have subnet_prefix in CPU order */
+				c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]);
 
-				if (nodebuf[2] > 0xffff ||
-				    nodebuf[3] > 0xffff) {
+				rc = next_entry(buf, fp, sizeof(u32) * 2);
+				if (rc)
+					goto out;
+
+				pkey_lo = le32_to_cpu(buf[0]);
+				pkey_hi = le32_to_cpu(buf[1]);
+
+				if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) {
 					rc = -EINVAL;
 					goto out;
 				}
 
-				c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]);
-				c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]);
+				c->u.ibpkey.low_pkey  = pkey_lo;
+				c->u.ibpkey.high_pkey = pkey_hi;
 
 				rc = context_read_and_validate(&c->context[0],
 							       p,
@@ -2239,7 +2250,10 @@
 				if (rc)
 					goto out;
 				break;
-			case OCON_IBENDPORT:
+			}
+			case OCON_IBENDPORT: {
+				u32 port;
+
 				rc = next_entry(buf, fp, sizeof(u32) * 2);
 				if (rc)
 					goto out;
@@ -2249,12 +2263,13 @@
 				if (rc)
 					goto out;
 
-				if (buf[1] > 0xff || buf[1] == 0) {
+				port = le32_to_cpu(buf[1]);
+				if (port > U8_MAX || port == 0) {
 					rc = -EINVAL;
 					goto out;
 				}
 
-				c->u.ibendport.port = le32_to_cpu(buf[1]);
+				c->u.ibendport.port = port;
 
 				rc = context_read_and_validate(&c->context[0],
 							       p,
@@ -2262,7 +2277,8 @@
 				if (rc)
 					goto out;
 				break;
-			}
+			} /* end case */
+			} /* end switch */
 		}
 	}
 	rc = 0;
@@ -3105,6 +3121,7 @@
 {
 	unsigned int i, j, rc;
 	size_t nel, len;
+	__be64 prefixbuf[1];
 	__le32 buf[3];
 	u32 nodebuf[8];
 	struct ocontext *c;
@@ -3192,12 +3209,17 @@
 					return rc;
 				break;
 			case OCON_IBPKEY:
-				*((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix);
+				/* subnet_prefix is in CPU order */
+				prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix);
 
-				nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey);
-				nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey);
+				rc = put_entry(prefixbuf, sizeof(u64), 1, fp);
+				if (rc)
+					return rc;
 
-				rc = put_entry(nodebuf, sizeof(u32), 4, fp);
+				buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey);
+				buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey);
+
+				rc = put_entry(buf, sizeof(u32), 2, fp);
 				if (rc)
 					return rc;
 				rc = context_write(p, &c->context[0], fp);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 70d3066..017c47e 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -4333,6 +4333,12 @@
 	int request = 0;
 	int rc;
 
+	/*
+	 * Validate requested permissions
+	 */
+	if (perm & ~KEY_NEED_ALL)
+		return -EINVAL;
+
 	keyp = key_ref_to_ptr(key_ref);
 	if (keyp == NULL)
 		return -EINVAL;
@@ -4356,10 +4362,10 @@
 	ad.a.u.key_struct.key = keyp->serial;
 	ad.a.u.key_struct.key_desc = keyp->description;
 #endif
-	if (perm & KEY_NEED_READ)
-		request = MAY_READ;
+	if (perm & (KEY_NEED_READ | KEY_NEED_SEARCH | KEY_NEED_VIEW))
+		request |= MAY_READ;
 	if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR))
-		request = MAY_WRITE;
+		request |= MAY_WRITE;
 	rc = smk_access(tkp, keyp->security, request, &ad);
 	rc = smk_bu_note("key access", tkp, keyp->security, request, rc);
 	return rc;
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index ffda91a..02514fe 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -368,7 +368,9 @@
 			break;
 		case YAMA_SCOPE_RELATIONAL:
 			rcu_read_lock();
-			if (!task_is_descendant(current, child) &&
+			if (!pid_alive(child))
+				rc = -EPERM;
+			if (!rc && !task_is_descendant(current, child) &&
 			    !ptracer_exception_found(current, child) &&
 			    !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
 				rc = -EPERM;
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 5fb078a..009e469 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -532,7 +532,8 @@
 {
 	/* first let's check the buffer parameter's */
 	if (params->buffer.fragment_size == 0 ||
-	    params->buffer.fragments > U32_MAX / params->buffer.fragment_size)
+	    params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
+	    params->buffer.fragments == 0)
 		return -EINVAL;
 
 	/* now codec parameters */
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 1214d88..0477c5d 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -25,6 +25,7 @@
 #include <linux/time.h>
 #include <linux/mutex.h>
 #include <linux/device.h>
+#include <linux/nospec.h>
 #include <sound/core.h>
 #include <sound/minors.h>
 #include <sound/pcm.h>
@@ -129,6 +130,7 @@
 				return -EFAULT;
 			if (stream < 0 || stream > 1)
 				return -EINVAL;
+			stream = array_index_nospec(stream, 2);
 			if (get_user(subdevice, &info->subdevice))
 				return -EFAULT;
 			mutex_lock(&register_mutex);
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 529d9f4..0cb65d0 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -41,6 +41,7 @@
 	   * Mackie(Loud) U.420/U.420d
 	   * TASCAM FireOne
 	   * Stanton Controllers & Systems 1 Deck/Mixer
+	   * APOGEE duet FireWire
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-oxfw.
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
index 54cdd4f..ac20acf 100644
--- a/sound/firewire/amdtp-stream-trace.h
+++ b/sound/firewire/amdtp-stream-trace.h
@@ -131,7 +131,7 @@
 		__entry->index = index;
 	),
 	TP_printk(
-		"%02u %04u %04x %04x %02d %03u %3u %3u %02u %01u %02u",
+		"%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u",
 		__entry->second,
 		__entry->cycle,
 		__entry->src,
@@ -169,7 +169,7 @@
 		__entry->dest = fw_parent_device(s->unit)->node_id;
 		__entry->payload_quadlets = payload_length / 4;
 		__entry->data_blocks = data_blocks,
-		__entry->data_blocks = s->data_block_counter,
+		__entry->data_block_counter = s->data_block_counter,
 		__entry->packet_index = s->packet_index;
 		__entry->irq = !!in_interrupt();
 		__entry->index = index;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index cb9acfe..293933f 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -629,15 +629,17 @@
 }
 
 static int handle_in_packet_without_header(struct amdtp_stream *s,
-			unsigned int payload_quadlets, unsigned int cycle,
+			unsigned int payload_length, unsigned int cycle,
 			unsigned int index)
 {
 	__be32 *buffer;
+	unsigned int payload_quadlets;
 	unsigned int data_blocks;
 	struct snd_pcm_substream *pcm;
 	unsigned int pcm_frames;
 
 	buffer = s->buffer.packets[s->packet_index].buffer;
+	payload_quadlets = payload_length / 4;
 	data_blocks = payload_quadlets / s->data_block_quadlets;
 
 	trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 9367635..5636e89 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -434,7 +434,7 @@
 	/* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
 	SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
 	/* Apogee Electronics, Ensemble */
-	SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
+	SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
 	/* ESI, Quatafire610 */
 	SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
 	/* AcousticReality, eARMasterOne */
@@ -474,7 +474,19 @@
 	/* Focusrite, SaffirePro 26 I/O */
 	SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
 	/* Focusrite, SaffirePro 10 I/O */
-	SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
+	{
+		// The combination of vendor_id and model_id is the same as the
+		// same as the one of Liquid Saffire 56.
+		.match_flags	= IEEE1394_MATCH_VENDOR_ID |
+				  IEEE1394_MATCH_MODEL_ID |
+				  IEEE1394_MATCH_SPECIFIER_ID |
+				  IEEE1394_MATCH_VERSION,
+		.vendor_id	= VEN_FOCUSRITE,
+		.model_id	= 0x000006,
+		.specifier_id	= 0x00a02d,
+		.version	= 0x010001,
+		.driver_data	= (kernel_ulong_t)&saffirepro_10_spec,
+	},
 	/* Focusrite, Saffire(no label and LE) */
 	SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
 			    &saffire_spec),
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index 654a503..4d19117 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -152,7 +152,7 @@
 	if (reg == NULL)
 		return -ENOMEM;
 
-	if (enable) {
+	if (!enable) {
 		/*
 		 * Each quadlet is corresponding to data channels in a data
 		 * blocks in reverse order. Precisely, quadlets for available
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index f0555a2..6c9b743 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -136,7 +136,9 @@
 		byte = (u8 *)buffer + p->pcm_byte_offset;
 
 		for (c = 0; c < channels; ++c) {
-			*dst = (byte[0] << 24) | (byte[1] << 16) | byte[2];
+			*dst = (byte[0] << 24) |
+			       (byte[1] << 16) |
+			       (byte[2] << 8);
 			byte += 3;
 			dst++;
 		}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 2ea8be6..5f82a37 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -20,6 +20,7 @@
 #define VENDOR_LACIE		0x00d04b
 #define VENDOR_TASCAM		0x00022e
 #define OUI_STANTON		0x001260
+#define OUI_APOGEE		0x0003db
 
 #define MODEL_SATELLITE		0x00200f
 
@@ -436,6 +437,13 @@
 		.vendor_id	= OUI_STANTON,
 		.model_id	= 0x002000,
 	},
+	// APOGEE, duet FireWire
+	{
+		.match_flags	= IEEE1394_MATCH_VENDOR_ID |
+				  IEEE1394_MATCH_MODEL_ID,
+		.vendor_id	= OUI_APOGEE,
+		.model_id	= 0x01dddd,
+	},
 	{ }
 };
 MODULE_DEVICE_TABLE(ieee1394, oxfw_id_table);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 617ff1a..27eb027 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -144,9 +144,9 @@
 		return -ENODEV;
 	if (!acomp->ops) {
 		request_module("i915");
-		/* 10s timeout */
+		/* 60s timeout */
 		wait_for_completion_timeout(&bind_complete,
-					    msecs_to_jiffies(10 * 1000));
+					    msecs_to_jiffies(60 * 1000));
 	}
 	if (!acomp->ops) {
 		dev_info(bus->dev, "couldn't bind with audio component\n");
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 598d140..5fc497c 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -903,6 +903,9 @@
 	struct dsp_spos_instance * ins = chip->dsp_spos_instance;
 	int i;
 
+	if (!ins)
+		return 0;
+
 	snd_info_free_entry(ins->proc_sym_info_entry);
 	ins->proc_sym_info_entry = NULL;
 
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 6ebe817..1f25e6d 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -36,6 +36,7 @@
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/moduleparam.h>
+#include <linux/nospec.h>
 
 #include <sound/core.h>
 #include <sound/tlv.h>
@@ -1026,6 +1027,8 @@
 
 	if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
 		return -EINVAL;
+	ipcm->substream = array_index_nospec(ipcm->substream,
+					     EMU10K1_FX8010_PCM_COUNT);
 	if (ipcm->channels > 32)
 		return -EINVAL;
 	pcm = &emu->fx8010.pcm[ipcm->substream];
@@ -1072,6 +1075,8 @@
 
 	if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
 		return -EINVAL;
+	ipcm->substream = array_index_nospec(ipcm->substream,
+					     EMU10K1_FX8010_PCM_COUNT);
 	pcm = &emu->fx8010.pcm[ipcm->substream];
 	mutex_lock(&emu->fx8010.lock);
 	spin_lock_irq(&emu->reg_lock);
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index d361bb7..8db1890 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -109,7 +109,8 @@
 	err = snd_hda_codec_build_controls(codec);
 	if (err < 0)
 		goto error_module;
-	if (codec->card->registered) {
+	/* only register after the bus probe finished; otherwise it's racy */
+	if (!codec->bus->bus_probing && codec->card->registered) {
 		err = snd_card_register(codec->card);
 		if (err < 0)
 			goto error_module;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 0d98bb9..acacc19 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -68,6 +68,7 @@
 	unsigned int response_reset:1;	/* controller was reset */
 	unsigned int in_reset:1;	/* during reset operation */
 	unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
+	unsigned int bus_probing :1;	/* during probing process */
 
 	int primary_dig_out_type;	/* primary digital out PCM type */
 	unsigned int mixer_assigned;	/* codec addr for mixer name */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1ddeebc..1bb7613 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2315,6 +2315,7 @@
 	int dev = chip->dev_index;
 	int err;
 
+	to_hda_bus(bus)->bus_probing = 1;
 	hda->probe_continued = 1;
 
 	/* bind with i915 if needed */
@@ -2410,6 +2411,7 @@
 	if (err < 0)
 		hda->init_failed = 1;
 	complete_all(&hda->probe_wait);
+	to_hda_bus(bus)->bus_probing = 0;
 	return err;
 }
 
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 0621920..e85fb04 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -249,10 +249,12 @@
 	struct snd_card *card = dev_get_drvdata(dev);
 	struct azx *chip = card->private_data;
 	struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
+	struct hdac_bus *bus = azx_bus(chip);
 
 	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 
 	azx_stop_chip(chip);
+	synchronize_irq(bus->irq);
 	azx_enter_link_reset(chip);
 	hda_tegra_disable_clocks(hda);
 
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 3c5f2a6..3cbd211 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -923,6 +923,8 @@
 	SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+	SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
@@ -930,9 +932,13 @@
 	SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
 	SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
 	SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
+	SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
 	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8a3d069..8772931 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -117,6 +117,8 @@
 	int codec_variant;	/* flag for other variants */
 	unsigned int has_alc5505_dsp:1;
 	unsigned int no_depop_delay:1;
+	unsigned int done_hp_init:1;
+	unsigned int no_shutup_pins:1;
 
 	/* for PLL fix */
 	hda_nid_t pll_nid;
@@ -475,6 +477,14 @@
 		set_eapd(codec, *p, on);
 }
 
+static void alc_shutup_pins(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+
+	if (!spec->no_shutup_pins)
+		snd_hda_shutup_pins(codec);
+}
+
 /* generic shutup callback;
  * just turning off EAPD and a little pause for avoiding pop-noise
  */
@@ -485,7 +495,7 @@
 	alc_auto_setup_eapd(codec, false);
 	if (!spec->no_depop_delay)
 		msleep(200);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 }
 
 /* generic EAPD initialization */
@@ -514,6 +524,15 @@
 	}
 }
 
+/* get a primary headphone pin if available */
+static hda_nid_t alc_get_hp_pin(struct alc_spec *spec)
+{
+	if (spec->gen.autocfg.hp_pins[0])
+		return spec->gen.autocfg.hp_pins[0];
+	if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+		return spec->gen.autocfg.line_out_pins[0];
+	return 0;
+}
 
 /*
  * Realtek SSID verification
@@ -724,9 +743,7 @@
 	 * 15   : 1 --> enable the function "Mute internal speaker
 	 *	        when the external headphone out jack is plugged"
 	 */
-	if (!spec->gen.autocfg.hp_pins[0] &&
-	    !(spec->gen.autocfg.line_out_pins[0] &&
-	      spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
+	if (!alc_get_hp_pin(spec)) {
 		hda_nid_t nid;
 		tmp = (ass >> 11) & 0x3;	/* HP to chassis */
 		nid = ports[tmp];
@@ -806,7 +823,7 @@
 	if (spec && spec->shutup)
 		spec->shutup(codec);
 	else
-		snd_hda_shutup_pins(codec);
+		alc_shutup_pins(codec);
 }
 
 static void alc_reboot_notify(struct hda_codec *codec)
@@ -1847,6 +1864,8 @@
 	ALC887_FIXUP_BASS_CHMAP,
 	ALC1220_FIXUP_GB_DUAL_CODECS,
 	ALC1220_FIXUP_CLEVO_P950,
+	ALC1220_FIXUP_SYSTEM76_ORYP5,
+	ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2048,6 +2067,17 @@
 	snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
 }
 
+static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+				const struct hda_fixup *fix, int action);
+
+static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
+				     const struct hda_fixup *fix,
+				     int action)
+{
+	alc1220_fixup_clevo_p950(codec, fix, action);
+	alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
+}
+
 static const struct hda_fixup alc882_fixups[] = {
 	[ALC882_FIXUP_ABIT_AW9D_MAX] = {
 		.type = HDA_FIXUP_PINS,
@@ -2292,6 +2322,19 @@
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc1220_fixup_clevo_p950,
 	},
+	[ALC1220_FIXUP_SYSTEM76_ORYP5] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc1220_fixup_system76_oryp5,
+	},
+	[ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+			{}
+		},
+		.chained = true,
+		.chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
+	},
 };
 
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2368,6 +2411,8 @@
 	SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
 	SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
 	SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+	SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+	SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -2914,7 +2959,7 @@
 			(alc_get_coef0(codec) & 0x00ff) == 0x018) {
 		msleep(150);
 	}
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 }
 
 static struct coef_fw alc282_coefs[] = {
@@ -2958,7 +3003,7 @@
 static void alc282_init(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp_pin_sense;
 	int coef78;
 
@@ -2995,7 +3040,7 @@
 static void alc282_shutup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp_pin_sense;
 	int coef78;
 
@@ -3017,14 +3062,15 @@
 	if (hp_pin_sense)
 		msleep(85);
 
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
 	if (hp_pin_sense)
 		msleep(100);
 
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 	alc_write_coef_idx(codec, 0x78, coef78);
 }
 
@@ -3073,14 +3119,9 @@
 static void alc283_init(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp_pin_sense;
 
-	if (!spec->gen.autocfg.hp_outs) {
-		if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
-			hp_pin = spec->gen.autocfg.line_out_pins[0];
-	}
-
 	alc283_restore_default_value(codec);
 
 	if (!hp_pin)
@@ -3114,14 +3155,9 @@
 static void alc283_shutup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp_pin_sense;
 
-	if (!spec->gen.autocfg.hp_outs) {
-		if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
-			hp_pin = spec->gen.autocfg.line_out_pins[0];
-	}
-
 	if (!hp_pin) {
 		alc269_shutup(codec);
 		return;
@@ -3140,22 +3176,23 @@
 	if (hp_pin_sense)
 		msleep(100);
 
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
 	alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
 	if (hp_pin_sense)
 		msleep(100);
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 	alc_write_coef_idx(codec, 0x43, 0x9614);
 }
 
 static void alc256_init(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp_pin_sense;
 
 	if (!hp_pin)
@@ -3191,7 +3228,7 @@
 static void alc256_shutup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp_pin_sense;
 
 	if (!hp_pin) {
@@ -3214,20 +3251,21 @@
 	/* NOTE: call this before clearing the pin, otherwise codec stalls */
 	alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
 	if (hp_pin_sense)
 		msleep(100);
 
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 }
 
 static void alc225_init(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp1_pin_sense, hp2_pin_sense;
 
 	if (!hp_pin)
@@ -3270,7 +3308,7 @@
 static void alc225_shutup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp1_pin_sense, hp2_pin_sense;
 
 	if (!hp_pin) {
@@ -3308,13 +3346,13 @@
 		msleep(100);
 
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 }
 
 static void alc_default_init(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp_pin_sense;
 
 	if (!hp_pin)
@@ -3343,7 +3381,7 @@
 static void alc_default_shutup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 	bool hp_pin_sense;
 
 	if (!hp_pin) {
@@ -3362,14 +3400,58 @@
 	if (hp_pin_sense)
 		msleep(85);
 
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
 	if (hp_pin_sense)
 		msleep(100);
 
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
+}
+
+static void alc294_hp_init(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
+	int i, val;
+
+	if (!hp_pin)
+		return;
+
+	snd_hda_codec_write(codec, hp_pin, 0,
+			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+	msleep(100);
+
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+	alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
+	alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
+
+	/* Wait for depop procedure finish  */
+	val = alc_read_coefex_idx(codec, 0x58, 0x01);
+	for (i = 0; i < 20 && val & 0x0080; i++) {
+		msleep(50);
+		val = alc_read_coefex_idx(codec, 0x58, 0x01);
+	}
+	/* Set HP depop to auto mode */
+	alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
+	msleep(50);
+}
+
+static void alc294_init(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+
+	if (!spec->done_hp_init) {
+		alc294_hp_init(codec);
+		spec->done_hp_init = true;
+	}
+	alc_default_init(codec);
 }
 
 static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
@@ -4102,6 +4184,7 @@
 	case 0x10ec0295:
 	case 0x10ec0289:
 	case 0x10ec0299:
+		alc_process_coef_fw(codec, alc225_pre_hsmode);
 		alc_process_coef_fw(codec, coef0225);
 		break;
 	case 0x10ec0867:
@@ -4736,7 +4819,7 @@
 	struct alc_spec *spec = codec->spec;
 
 	hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	hda_nid_t hp_pin = alc_get_hp_pin(spec);
 
 	int new_headset_mode;
 
@@ -4938,16 +5021,12 @@
 	}
 }
 
-static void alc_no_shutup(struct hda_codec *codec)
-{
-}
-
 static void alc_fixup_no_shutup(struct hda_codec *codec,
 				const struct hda_fixup *fix, int action)
 {
 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
 		struct alc_spec *spec = codec->spec;
-		spec->shutup = alc_no_shutup;
+		spec->no_shutup_pins = 1;
 	}
 }
 
@@ -5015,7 +5094,7 @@
 static void alc_shutup_dell_xps13(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
-	int hp_pin = spec->gen.autocfg.hp_pins[0];
+	int hp_pin = alc_get_hp_pin(spec);
 
 	/* Prevent pop noises when headphones are plugged in */
 	snd_hda_codec_write(codec, hp_pin, 0,
@@ -5108,7 +5187,7 @@
 
 	if (action == HDA_FIXUP_ACT_PROBE) {
 		int mic_pin = find_ext_mic_pin(codec);
-		int hp_pin = spec->gen.autocfg.hp_pins[0];
+		int hp_pin = alc_get_hp_pin(spec);
 
 		if (snd_BUG_ON(!mic_pin || !hp_pin))
 			return;
@@ -5380,6 +5459,13 @@
 	snd_hda_override_wcaps(codec, 0x03, 0);
 }
 
+static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
+				  const struct hda_fixup *fix, int action)
+{
+	if (action == HDA_FIXUP_ACT_PRE_PROBE)
+		snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -5492,6 +5578,7 @@
 	ALC293_FIXUP_LENOVO_SPK_NOISE,
 	ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
 	ALC255_FIXUP_DELL_SPK_NOISE,
+	ALC225_FIXUP_DISABLE_MIC_VREF,
 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
 	ALC295_FIXUP_DISABLE_DAC3,
 	ALC280_FIXUP_HP_HEADSET_MIC,
@@ -5523,6 +5610,9 @@
 	ALC294_FIXUP_ASUS_MIC,
 	ALC294_FIXUP_ASUS_HEADSET_MIC,
 	ALC294_FIXUP_ASUS_SPK,
+	ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+	ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
+	ALC255_FIXUP_ACER_HEADSET_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6191,6 +6281,12 @@
 		.chained = true,
 		.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
 	},
+	[ALC225_FIXUP_DISABLE_MIC_VREF] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc_fixup_disable_mic_vref,
+		.chained = true,
+		.chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+	},
 	[ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
 		.type = HDA_FIXUP_VERBS,
 		.v.verbs = (const struct hda_verb[]) {
@@ -6200,7 +6296,7 @@
 			{}
 		},
 		.chained = true,
-		.chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+		.chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
 	},
 	[ALC280_FIXUP_HP_HEADSET_MIC] = {
 		.type = HDA_FIXUP_FUNC,
@@ -6424,7 +6520,7 @@
 	[ALC294_FIXUP_ASUS_HEADSET_MIC] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
-			{ 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
+			{ 0x19, 0x01a1103c }, /* use as headset mic */
 			{ }
 		},
 		.chained = true,
@@ -6441,6 +6537,36 @@
 		.chained = true,
 		.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
 	},
+	[ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+	},
+	[ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
+		.type = HDA_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
+			/* Disable PCBEEP-IN passthrough */
+			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
+			{ 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
+	},
+	[ALC255_FIXUP_ACER_HEADSET_MIC] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x03a11130 },
+			{ 0x1a, 0x90a60140 }, /* use as internal mic */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
+	},
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6460,6 +6586,7 @@
 	SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
 	SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
 	SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -6491,6 +6618,7 @@
 	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+	SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
@@ -6503,6 +6631,7 @@
 	SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
+	SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6564,15 +6693,18 @@
 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+	SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
 	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
 	SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
-	SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
-	SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+	SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
 	SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6583,7 +6715,6 @@
 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
-	SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -6617,6 +6748,7 @@
 	SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+	SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -6825,7 +6957,7 @@
 	{.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
 	{.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
 	{.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
-	{.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
+	{.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
 	{.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
 	{.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
 	{.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
@@ -7119,7 +7251,7 @@
 		{0x12, 0x90a60130},
 		{0x19, 0x03a11020},
 		{0x21, 0x0321101f}),
-	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
 		{0x12, 0x90a60130},
 		{0x14, 0x90170110},
 		{0x19, 0x04a11040},
@@ -7198,6 +7330,10 @@
 	SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
 		{0x12, 0x90a60130},
 		{0x17, 0x90170110},
+		{0x21, 0x03211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+		{0x12, 0x90a60130},
+		{0x17, 0x90170110},
 		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC295_STANDARD_PINS,
@@ -7271,37 +7407,6 @@
 	alc_update_coef_idx(codec, 0x4, 0, 1<<11);
 }
 
-static void alc294_hp_init(struct hda_codec *codec)
-{
-	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
-	int i, val;
-
-	if (!hp_pin)
-		return;
-
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
-
-	msleep(100);
-
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
-
-	alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
-	alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
-
-	/* Wait for depop procedure finish  */
-	val = alc_read_coefex_idx(codec, 0x58, 0x01);
-	for (i = 0; i < 20 && val & 0x0080; i++) {
-		msleep(50);
-		val = alc_read_coefex_idx(codec, 0x58, 0x01);
-	}
-	/* Set HP depop to auto mode */
-	alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
-	msleep(50);
-}
-
 /*
  */
 static int patch_alc269(struct hda_codec *codec)
@@ -7427,7 +7532,7 @@
 		spec->codec_variant = ALC269_TYPE_ALC294;
 		spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
 		alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
-		alc294_hp_init(codec);
+		spec->init_hook = alc294_init;
 		break;
 	case 0x10ec0300:
 		spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7439,7 +7544,7 @@
 		spec->codec_variant = ALC269_TYPE_ALC700;
 		spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
 		alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
-		alc294_hp_init(codec);
+		spec->init_hook = alc294_init;
 		break;
 
 	}
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 1bff4b1..ba99ff0 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -30,6 +30,7 @@
 #include <linux/math64.h>
 #include <linux/vmalloc.h>
 #include <linux/io.h>
+#include <linux/nospec.h>
 
 #include <sound/core.h>
 #include <sound/control.h>
@@ -4092,15 +4093,16 @@
 				    struct snd_pcm_channel_info *info)
 {
 	struct hdsp *hdsp = snd_pcm_substream_chip(substream);
-	int mapped_channel;
+	unsigned int channel = info->channel;
 
-	if (snd_BUG_ON(info->channel >= hdsp->max_channels))
+	if (snd_BUG_ON(channel >= hdsp->max_channels))
+		return -EINVAL;
+	channel = array_index_nospec(channel, hdsp->max_channels);
+
+	if (hdsp->channel_map[channel] < 0)
 		return -EINVAL;
 
-	if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
-		return -EINVAL;
-
-	info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
+	info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
 	info->first = 0;
 	info->step = 32;
 	return 0;
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 3135e9e..7f376b6 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -1147,18 +1147,21 @@
 	struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
 								    DRV_NAME);
 	struct audio_drv_data *adata = dev_get_drvdata(component->dev);
+	struct device *parent = component->dev->parent;
 
 	switch (adata->asic_type) {
 	case CHIP_STONEY:
 		ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
 							    SNDRV_DMA_TYPE_DEV,
-							    NULL, ST_MIN_BUFFER,
+							    parent,
+							    ST_MIN_BUFFER,
 							    ST_MAX_BUFFER);
 		break;
 	default:
 		ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
 							    SNDRV_DMA_TYPE_DEV,
-							    NULL, MIN_BUFFER,
+							    parent,
+							    MIN_BUFFER,
 							    MAX_BUFFER);
 		break;
 	}
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index d00734d..e5b6769 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -795,6 +795,8 @@
 	if (hcd->spdif)
 		hcp->daidrv[i] = hdmi_spdif_dai;
 
+	dev_set_drvdata(dev, hcp);
+
 	ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv,
 				     dai_count);
 	if (ret) {
@@ -802,8 +804,6 @@
 			__func__, ret);
 		return ret;
 	}
-
-	dev_set_drvdata(dev, hcp);
 	return 0;
 }
 
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 3356c91..e3de1ff 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -688,15 +688,22 @@
 }
 EXPORT_SYMBOL_GPL(pcm3168a_probe);
 
-void pcm3168a_remove(struct device *dev)
+static void pcm3168a_disable(struct device *dev)
 {
 	struct pcm3168a_priv *pcm3168a = dev_get_drvdata(dev);
 
-	pm_runtime_disable(dev);
 	regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
-				pcm3168a->supplies);
+			       pcm3168a->supplies);
 	clk_disable_unprepare(pcm3168a->scki);
 }
+
+void pcm3168a_remove(struct device *dev)
+{
+	pm_runtime_disable(dev);
+#ifndef CONFIG_PM
+	pcm3168a_disable(dev);
+#endif
+}
 EXPORT_SYMBOL_GPL(pcm3168a_remove);
 
 #ifdef CONFIG_PM
@@ -751,10 +758,7 @@
 
 	regcache_cache_only(pcm3168a->regmap, true);
 
-	regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
-			       pcm3168a->supplies);
-
-	clk_disable_unprepare(pcm3168a->scki);
+	pcm3168a_disable(dev);
 
 	return 0;
 }
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index d88e673..18a931c 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -1126,8 +1126,11 @@
 		return ret;
 	}
 
-	regmap_read(rt274->regmap,
+	ret = regmap_read(rt274->regmap,
 		RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
+	if (ret)
+		return ret;
+
 	if (val != RT274_VENDOR_ID) {
 		dev_err(&i2c->dev,
 			"Device with ID register %#x is not rt274\n", val);
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 6478d10..cdb1f40 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -278,6 +278,8 @@
 
 	rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
 			GFP_KERNEL);
+	if (!rt5514_dsp)
+		return -ENOMEM;
 
 	rt5514_dsp->dev = &rt5514_spi->dev;
 	mutex_init(&rt5514_dsp->dma_lock);
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index 8068140..cdd659f 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -849,18 +849,18 @@
 #define RT5682_SCLK_SRC_PLL2			(0x2 << 13)
 #define RT5682_SCLK_SRC_SDW			(0x3 << 13)
 #define RT5682_SCLK_SRC_RCCLK			(0x4 << 13)
-#define RT5682_PLL1_SRC_MASK			(0x3 << 10)
-#define RT5682_PLL1_SRC_SFT			10
-#define RT5682_PLL1_SRC_MCLK			(0x0 << 10)
-#define RT5682_PLL1_SRC_BCLK1			(0x1 << 10)
-#define RT5682_PLL1_SRC_SDW			(0x2 << 10)
-#define RT5682_PLL1_SRC_RC			(0x3 << 10)
-#define RT5682_PLL2_SRC_MASK			(0x3 << 8)
-#define RT5682_PLL2_SRC_SFT			8
-#define RT5682_PLL2_SRC_MCLK			(0x0 << 8)
-#define RT5682_PLL2_SRC_BCLK1			(0x1 << 8)
-#define RT5682_PLL2_SRC_SDW			(0x2 << 8)
-#define RT5682_PLL2_SRC_RC			(0x3 << 8)
+#define RT5682_PLL2_SRC_MASK			(0x3 << 10)
+#define RT5682_PLL2_SRC_SFT			10
+#define RT5682_PLL2_SRC_MCLK			(0x0 << 10)
+#define RT5682_PLL2_SRC_BCLK1			(0x1 << 10)
+#define RT5682_PLL2_SRC_SDW			(0x2 << 10)
+#define RT5682_PLL2_SRC_RC			(0x3 << 10)
+#define RT5682_PLL1_SRC_MASK			(0x3 << 8)
+#define RT5682_PLL1_SRC_SFT			8
+#define RT5682_PLL1_SRC_MCLK			(0x0 << 8)
+#define RT5682_PLL1_SRC_BCLK1			(0x1 << 8)
+#define RT5682_PLL1_SRC_SDW			(0x2 << 8)
+#define RT5682_PLL1_SRC_RC			(0x3 << 8)
 
 
 
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index e2b5a11..f03195d 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -822,6 +822,10 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
+		/* Initial cold start */
+		if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
+			break;
+
 		/* Switch off BCLK_N Divider */
 		snd_soc_component_update_bits(component, AIC32X4_BCLKN,
 				    AIC32X4_BCLKEN, 0);
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 6ec19fb..2e75b5b 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -221,7 +221,7 @@
 
 config SND_SOC_EUKREA_TLV320
 	tristate "Eukrea TLV320"
-	depends on ARCH_MXC && I2C
+	depends on ARCH_MXC && !ARM64 && I2C
 	select SND_SOC_TLV320AIC23_I2C
 	select SND_SOC_IMX_AUDMUX
 	select SND_SOC_IMX_SSI
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 392d5ee..99e07b0 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -86,49 +86,49 @@
 	if (!buf)
 		return -ENOMEM;
 
-	ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
+	ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
 		       pdcr, ptcr);
 
 	if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR)
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"TxFS output from %s, ",
 				audmux_port_string((ptcr >> 27) & 0x7));
 	else
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"TxFS input, ");
 
 	if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR)
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"TxClk output from %s",
 				audmux_port_string((ptcr >> 22) & 0x7));
 	else
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"TxClk input");
 
-	ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
 
 	if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) {
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"Port is symmetric");
 	} else {
 		if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR)
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					"RxFS output from %s, ",
 					audmux_port_string((ptcr >> 17) & 0x7));
 		else
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					"RxFS input, ");
 
 		if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR)
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					"RxClk output from %s",
 					audmux_port_string((ptcr >> 12) & 0x7));
 		else
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					"RxClk input");
 	}
 
-	ret += snprintf(buf + ret, PAGE_SIZE - ret,
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 			"\nData received from %s\n",
 			audmux_port_string((pdcr >> 13) & 0x7));
 
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 6c36da5..e662400 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -399,7 +399,13 @@
 				struct snd_pcm_hw_params *params,
 				struct snd_soc_dai *dai)
 {
-	snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+	int ret;
+
+	ret =
+		snd_pcm_lib_malloc_pages(substream,
+				params_buffer_bytes(params));
+	if (ret)
+		return ret;
 	memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
 	return 0;
 }
diff --git a/sound/soc/intel/atom/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
index 27413eb..b8c4567 100644
--- a/sound/soc/intel/atom/sst/sst_loader.c
+++ b/sound/soc/intel/atom/sst/sst_loader.c
@@ -354,14 +354,14 @@
 	const struct firmware *fw;
 
 	retval = request_firmware(&fw, sst->firmware_name, sst->dev);
-	if (fw == NULL) {
-		dev_err(sst->dev, "fw is returning as null\n");
-		return -EINVAL;
-	}
 	if (retval) {
 		dev_err(sst->dev, "request fw failed %d\n", retval);
 		return retval;
 	}
+	if (fw == NULL) {
+		dev_err(sst->dev, "fw is returning as null\n");
+		return -EINVAL;
+	}
 	mutex_lock(&sst->sst_lock);
 	retval = sst_cache_and_parse_fw(sst, fw);
 	mutex_unlock(&sst->sst_lock);
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 7b0ee67..78ec97b 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -192,7 +192,7 @@
 		.stream_name = "Loopback",
 		.cpu_dai_name = "Loopback Pin",
 		.platform_name = "haswell-pcm-audio",
-		.dynamic = 0,
+		.dynamic = 1,
 		.codec_name = "snd-soc-dummy",
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 9d9f6e4..08a5152 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -390,6 +390,20 @@
 
 static const struct dmi_system_id cht_max98090_quirk_table[] = {
 	{
+		/* Clapper model Chromebook */
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Clapper"),
+		},
+		.driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+	},
+	{
+		/* Gnawty model Chromebook (Acer Chromebook CB3-111) */
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Gnawty"),
+		},
+		.driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+	},
+	{
 		/* Swanky model Chromebook (Toshiba Chromebook 2) */
 		.matches = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "Swanky"),
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index eab1f43..a402298 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -146,7 +146,7 @@
 		.stream_name = "Loopback",
 		.cpu_dai_name = "Loopback Pin",
 		.platform_name = "haswell-pcm-audio",
-		.dynamic = 0,
+		.dynamic = 1,
 		.codec_name = "snd-soc-dummy",
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f8d35c5..252ff3f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2039,19 +2039,19 @@
 		out = is_connected_output_ep(w, NULL, NULL);
 	}
 
-	ret = snprintf(buf, PAGE_SIZE, "%s: %s%s  in %d out %d",
+	ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s  in %d out %d",
 		       w->name, w->power ? "On" : "Off",
 		       w->force ? " (forced)" : "", in, out);
 
 	if (w->reg >= 0)
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				" - R%d(0x%x) mask 0x%x",
 				w->reg, w->reg, w->mask << w->shift);
 
-	ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
 
 	if (w->sname)
-		ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
 				w->sname,
 				w->active ? "active" : "inactive");
 
@@ -2064,7 +2064,7 @@
 			if (!p->connect)
 				continue;
 
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					" %s  \"%s\" \"%s\"\n",
 					(rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
 					p->name ? p->name : "static",
diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
index e557946..d9fcae0 100644
--- a/sound/synth/emux/emux_hwdep.c
+++ b/sound/synth/emux/emux_hwdep.c
@@ -22,9 +22,9 @@
 #include <sound/core.h>
 #include <sound/hwdep.h>
 #include <linux/uaccess.h>
+#include <linux/nospec.h>
 #include "emux_voice.h"
 
-
 #define TMP_CLIENT_ID	0x1001
 
 /*
@@ -66,13 +66,16 @@
 		return -EFAULT;
 	if (info.mode < 0 || info.mode >= EMUX_MD_END)
 		return -EINVAL;
+	info.mode = array_index_nospec(info.mode, EMUX_MD_END);
 
 	if (info.port < 0) {
 		for (i = 0; i < emu->num_ports; i++)
 			emu->portptrs[i]->ctrls[info.mode] = info.value;
 	} else {
-		if (info.port < emu->num_ports)
+		if (info.port < emu->num_ports) {
+			info.port = array_index_nospec(info.port, emu->num_ports);
 			emu->portptrs[info.port]->ctrls[info.mode] = info.value;
+		}
 	}
 	return 0;
 }
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 8ecf5bb..e5b52a6 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -324,7 +324,7 @@
 		h1 = snd_usb_find_csint_desc(host_iface->extra,
 							 host_iface->extralen,
 							 NULL, UAC_HEADER);
-		if (!h1) {
+		if (!h1 || h1->bLength < sizeof(*h1)) {
 			dev_err(&dev->dev, "cannot find UAC_HEADER\n");
 			return -EINVAL;
 		}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d7389ed..2ea0519 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -753,8 +753,9 @@
 				       struct uac_mixer_unit_descriptor *desc)
 {
 	int mu_channels;
+	void *c;
 
-	if (desc->bLength < 11)
+	if (desc->bLength < sizeof(*desc))
 		return -EINVAL;
 	if (!desc->bNrInPins)
 		return -EINVAL;
@@ -763,6 +764,8 @@
 	case UAC_VERSION_1:
 	case UAC_VERSION_2:
 	default:
+		if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
+			return 0; /* no bmControls -> skip */
 		mu_channels = uac_mixer_unit_bNrChannels(desc);
 		break;
 	case UAC_VERSION_3:
@@ -772,7 +775,11 @@
 	}
 
 	if (!mu_channels)
-		return -EINVAL;
+		return 0;
+
+	c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
+	if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
+		return 0; /* no bmControls -> skip */
 
 	return mu_channels;
 }
@@ -944,7 +951,7 @@
 				struct uac_mixer_unit_descriptor *d = p1;
 
 				err = uac_mixer_unit_get_channels(state, d);
-				if (err < 0)
+				if (err <= 0)
 					return err;
 
 				term->channels = err;
@@ -2070,11 +2077,15 @@
 
 	if (state->mixer->protocol == UAC_VERSION_2) {
 		struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
+		if (d_v2->bLength < sizeof(*d_v2))
+			return -EINVAL;
 		control = UAC2_TE_CONNECTOR;
 		term_id = d_v2->bTerminalID;
 		bmctls = le16_to_cpu(d_v2->bmControls);
 	} else if (state->mixer->protocol == UAC_VERSION_3) {
 		struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
+		if (d_v3->bLength < sizeof(*d_v3))
+			return -EINVAL;
 		control = UAC3_TE_INSERTION;
 		term_id = d_v3->bTerminalID;
 		bmctls = le32_to_cpu(d_v3->bmControls);
@@ -2120,7 +2131,7 @@
 		if (err < 0)
 			continue;
 		/* no bmControls field (e.g. Maya44) -> ignore */
-		if (desc->bLength <= 10 + input_pins)
+		if (!num_outs)
 			continue;
 		err = check_input_term(state, desc->baSourceID[pin], &iterm);
 		if (err < 0)
@@ -2316,7 +2327,7 @@
 				char *name)
 {
 	struct uac_processing_unit_descriptor *desc = raw_desc;
-	int num_ins = desc->bNrInPins;
+	int num_ins;
 	struct usb_mixer_elem_info *cval;
 	struct snd_kcontrol *kctl;
 	int i, err, nameid, type, len;
@@ -2331,7 +2342,13 @@
 		0, NULL, default_value_info
 	};
 
-	if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
+	if (desc->bLength < 13) {
+		usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
+		return -EINVAL;
+	}
+
+	num_ins = desc->bNrInPins;
+	if (desc->bLength < 13 + num_ins ||
 	    desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
 		usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
 		return -EINVAL;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index afba778..a893f5a 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -377,6 +377,9 @@
 	return 0;
 }
 
+/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
+ * applies. Returns 1 if a quirk was found.
+ */
 static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
 					 struct usb_device *dev,
 					 struct usb_interface_descriptor *altsd,
@@ -447,7 +450,7 @@
 
 	subs->data_endpoint->sync_master = subs->sync_endpoint;
 
-	return 0;
+	return 1;
 }
 
 static int set_sync_endpoint(struct snd_usb_substream *subs,
@@ -486,6 +489,10 @@
 	if (err < 0)
 		return err;
 
+	/* endpoint set by quirk */
+	if (err > 0)
+		return 0;
+
 	if (altsd->bNumEndpoints < 2)
 		return 0;
 
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 1c73b9e..57c6209 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3326,6 +3326,9 @@
 					}
 				}
 			},
+			{
+				.ifnum = -1
+			},
 		}
 	}
 },
@@ -3374,6 +3377,9 @@
 					}
 				}
 			},
+			{
+				.ifnum = -1
+			},
 		}
 	}
 },
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6623caf..d71e019 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1373,6 +1373,7 @@
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 		break;
 
+	case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
 	case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
 	case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
 	case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
@@ -1447,6 +1448,7 @@
 	case 0x20b1:  /* XMOS based devices */
 	case 0x152a:  /* Thesycon devices */
 	case 0x25ce:  /* Mytek devices */
+	case 0x2ab6:  /* T+A devices */
 		if (fp->dsd_raw)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 		break;
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index b843015..f6ce6d5 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -601,12 +601,8 @@
 		csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
 
 	if (!csep || csep->bLength < 7 ||
-	    csep->bDescriptorSubtype != UAC_EP_GENERAL) {
-		usb_audio_warn(chip,
-			       "%u:%d : no or invalid class specific endpoint descriptor\n",
-			       iface_no, altsd->bAlternateSetting);
-		return 0;
-	}
+	    csep->bDescriptorSubtype != UAC_EP_GENERAL)
+		goto error;
 
 	if (protocol == UAC_VERSION_1) {
 		attributes = csep->bmAttributes;
@@ -614,6 +610,8 @@
 		struct uac2_iso_endpoint_descriptor *csep2 =
 			(struct uac2_iso_endpoint_descriptor *) csep;
 
+		if (csep2->bLength < sizeof(*csep2))
+			goto error;
 		attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
 
 		/* emulate the endpoint attributes of a v1 device */
@@ -623,12 +621,20 @@
 		struct uac3_iso_endpoint_descriptor *csep3 =
 			(struct uac3_iso_endpoint_descriptor *) csep;
 
+		if (csep3->bLength < sizeof(*csep3))
+			goto error;
 		/* emulate the endpoint attributes of a v1 device */
 		if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
 			attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
 	}
 
 	return attributes;
+
+ error:
+	usb_audio_warn(chip,
+		       "%u:%d : no or invalid class specific endpoint descriptor\n",
+		       iface_no, altsd->bAlternateSetting);
+	return 0;
 }
 
 /* find an input terminal descriptor (either UAC1 or UAC2) with the given
@@ -636,13 +642,17 @@
  */
 static void *
 snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
-					       int terminal_id)
+				       int terminal_id, bool uac23)
 {
 	struct uac2_input_terminal_descriptor *term = NULL;
+	size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
+		sizeof(struct uac_input_terminal_descriptor);
 
 	while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
 					       ctrl_iface->extralen,
 					       term, UAC_INPUT_TERMINAL))) {
+		if (term->bLength < minlen)
+			continue;
 		if (term->bTerminalID == terminal_id)
 			return term;
 	}
@@ -660,7 +670,8 @@
 	while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
 					       ctrl_iface->extralen,
 					       term, UAC_OUTPUT_TERMINAL))) {
-		if (term->bTerminalID == terminal_id)
+		if (term->bLength >= sizeof(*term) &&
+		    term->bTerminalID == terminal_id)
 			return term;
 	}
 
@@ -734,7 +745,8 @@
 		format = le16_to_cpu(as->wFormatTag); /* remember the format value */
 
 		iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
-							     as->bTerminalLink);
+							       as->bTerminalLink,
+							       false);
 		if (iterm) {
 			num_channels = iterm->bNrChannels;
 			chconfig = le16_to_cpu(iterm->wChannelConfig);
@@ -769,7 +781,8 @@
 		 * to extract the clock
 		 */
 		input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
-								    as->bTerminalLink);
+								    as->bTerminalLink,
+								    true);
 		if (input_term) {
 			clock = input_term->bCSourceID;
 			if (!chconfig && (num_channels == input_term->bNrChannels))
@@ -1003,7 +1016,8 @@
 	 * to extract the clock
 	 */
 	input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
-							    as->bTerminalLink);
+							    as->bTerminalLink,
+							    true);
 	if (input_term) {
 		clock = input_term->bCSourceID;
 		goto found_clock;
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index ddbf62b..a942f1b 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -1229,13 +1229,13 @@
 {
 	struct uaudio_qmi_svc *svc = uaudio_svc;
 
-	uaudio_dbg("client node:%x\n", node);
 	if (svc->uaudio_svc_hdl != handle) {
 		uaudio_err("handle mismatch\n");
 		return;
 	}
 
 	if (svc->client_connected && svc->client_sq.sq_node == node) {
+		uaudio_dbg("node:\n", node);
 		queue_work(svc->uaudio_wq, &svc->qmi_disconnect_work);
 		svc->client_sq.sq_node = 0;
 		svc->client_sq.sq_port = 0;
@@ -1249,7 +1249,6 @@
 {
 	struct uaudio_qmi_svc *svc = uaudio_svc;
 
-	uaudio_dbg("client node:%x port:%x\n", node, port);
 	if (svc->uaudio_svc_hdl != handle) {
 		uaudio_err("handle mismatch\n");
 		return;
@@ -1257,6 +1256,7 @@
 
 	if (svc->client_connected && svc->client_sq.sq_node == node &&
 			svc->client_sq.sq_port == port) {
+		uaudio_dbg("client node:%x port:%x\n", node, port);
 		queue_work(svc->uaudio_wq, &svc->qmi_disconnect_work);
 		svc->client_sq.sq_node = 0;
 		svc->client_sq.sq_port = 0;
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index 9f420d9..8cb504d 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -203,6 +203,8 @@
 	       "SWAP  %15s%15s%15s\n"
 	       "      %15llu%15llu%15llums\n"
 	       "RECLAIM  %12s%15s%15s\n"
+	       "      %15llu%15llu%15llums\n"
+	       "THRASHING%12s%15s%15s\n"
 	       "      %15llu%15llu%15llums\n",
 	       "count", "real total", "virtual total",
 	       "delay total", "delay average",
@@ -222,7 +224,11 @@
 	       "count", "delay total", "delay average",
 	       (unsigned long long)t->freepages_count,
 	       (unsigned long long)t->freepages_delay_total,
-	       average_ms(t->freepages_delay_total, t->freepages_count));
+	       average_ms(t->freepages_delay_total, t->freepages_count),
+	       "count", "delay total", "delay average",
+	       (unsigned long long)t->thrashing_count,
+	       (unsigned long long)t->thrashing_delay_total,
+	       average_ms(t->thrashing_delay_total, t->thrashing_count));
 }
 
 static void task_context_switch_counts(struct taskstats *t)
diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
new file mode 100644
index 0000000..0b3cb52
--- /dev/null
+++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
+#define _UAPI_ASM_RISCV_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index b455930..ec73d83 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -370,6 +370,20 @@
 	return argv + i;
 }
 
+/* on per cpu maps we must copy the provided value on all value instances */
+static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
+{
+	unsigned int i, n, step;
+
+	if (!map_is_per_cpu(info->type))
+		return;
+
+	n = get_possible_cpus();
+	step = round_up(info->value_size, 8);
+	for (i = 1; i < n; i++)
+		memcpy(value + i * step, value, info->value_size);
+}
+
 static int parse_elem(char **argv, struct bpf_map_info *info,
 		      void *key, void *value, __u32 key_size, __u32 value_size,
 		      __u32 *flags, __u32 **value_fd)
@@ -449,6 +463,8 @@
 			argv = parse_bytes(argv, "value", value, value_size);
 			if (!argv)
 				return -1;
+
+			fill_per_cpu_value(info, value);
 		}
 
 		return parse_elem(argv, info, key, NULL, key_size, value_size,
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 0de024a..bbba0d6 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -109,13 +109,14 @@
 
 static int prog_fd_by_tag(unsigned char *tag)
 {
-	struct bpf_prog_info info = {};
-	__u32 len = sizeof(info);
 	unsigned int id = 0;
 	int err;
 	int fd;
 
 	while (true) {
+		struct bpf_prog_info info = {};
+		__u32 len = sizeof(info);
+
 		err = bpf_prog_get_next_id(id, &id);
 		if (err) {
 			p_err("%s", strerror(errno));
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index f216b2f..42a7878 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -79,8 +79,8 @@
          cplus-demangle                 \
          hello                          \
          libbabeltrace                  \
-         liberty                        \
-         liberty-z                      \
+         libbfd-liberty                 \
+         libbfd-liberty-z               \
          libunwind-debug-frame          \
          libunwind-debug-frame-arm      \
          libunwind-debug-frame-aarch64  \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 0516259..bf8a8eb 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -15,8 +15,8 @@
          test-libbfd.bin                        \
          test-disassembler-four-args.bin        \
          test-reallocarray.bin			\
-         test-liberty.bin                       \
-         test-liberty-z.bin                     \
+         test-libbfd-liberty.bin                \
+         test-libbfd-liberty-z.bin              \
          test-cplus-demangle.bin                \
          test-libelf.bin                        \
          test-libelf-getphdrnum.bin             \
@@ -200,7 +200,7 @@
 	$(BUILD)
 
 $(OUTPUT)test-libbfd.bin:
-	$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
+	$(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
 
 $(OUTPUT)test-disassembler-four-args.bin:
 	$(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
@@ -208,10 +208,10 @@
 $(OUTPUT)test-reallocarray.bin:
 	$(BUILD)
 
-$(OUTPUT)test-liberty.bin:
+$(OUTPUT)test-libbfd-liberty.bin:
 	$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
 
-$(OUTPUT)test-liberty-z.bin:
+$(OUTPUT)test-libbfd-liberty-z.bin:
 	$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz
 
 $(OUTPUT)test-cplus-demangle.bin:
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index bbb2a8e..d7e06fe 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1178,6 +1178,7 @@
 	FILE *file;
 	char cmd[PATH_MAX];
 	char *mac_addr;
+	int str_len;
 
 	/*
 	 * Set the configuration for the specified interface with
@@ -1301,8 +1302,18 @@
 	 * invoke the external script to do its magic.
 	 */
 
-	snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
-		 "hv_set_ifconfig", if_file);
+	str_len = snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
+			   "hv_set_ifconfig", if_file);
+	/*
+	 * This is a little overcautious, but it's necessary to suppress some
+	 * false warnings from gcc 8.0.1.
+	 */
+	if (str_len <= 0 || (unsigned int)str_len >= sizeof(cmd)) {
+		syslog(LOG_ERR, "Cmd '%s' (len=%d) may be too long",
+		       cmd, str_len);
+		return HV_E_FAIL;
+	}
+
 	if (system(cmd)) {
 		syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s",
 				cmd, errno, strerror(errno));
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 3040830..8454566 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -330,7 +330,7 @@
 
 int main(int argc, char **argv)
 {
-	unsigned long long num_loops = 2;
+	long long num_loops = 2;
 	unsigned long timedelay = 1000000;
 	unsigned long buf_len = 128;
 
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index 8dd6aef..57aaeaf 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -13,6 +13,10 @@
 #include "../../arch/mips/include/uapi/asm/bitsperlong.h"
 #elif defined(__ia64__)
 #include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
+#elif defined(__riscv)
+#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
+#elif defined(__alpha__)
+#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
 #else
 #include <asm-generic/bitsperlong.h>
 #endif
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 60aa4ca..7a00147 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -77,6 +77,7 @@
 {
 	__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
 	union bpf_attr attr;
+	int ret;
 
 	memset(&attr, '\0', sizeof(attr));
 
@@ -94,7 +95,15 @@
 	attr.map_ifindex = create_attr->map_ifindex;
 	attr.inner_map_fd = create_attr->inner_map_fd;
 
-	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+	ret = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+	if (ret < 0 && errno == EINVAL && create_attr->name) {
+		/* Retry the same syscall, but without the name.
+		 * Pre v4.14 kernels don't support map names.
+		 */
+		memset(attr.map_name, 0, sizeof(attr.map_name));
+		return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+	}
+	return ret;
 }
 
 int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
index 95563b8..ed61fb3 100644
--- a/tools/lib/subcmd/Makefile
+++ b/tools/lib/subcmd/Makefile
@@ -36,8 +36,6 @@
 CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 
 CFLAGS += -I$(srctree)/tools/include/
-CFLAGS += -I$(srctree)/include/uapi
-CFLAGS += -I$(srctree)/include
 
 SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
 
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index ce1e202..75de355 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4968,6 +4968,7 @@
 
 				if (arg->type == PRINT_BSTRING) {
 					trace_seq_puts(s, arg->string.string);
+					arg = arg->next;
 					break;
 				}
 
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index e30d20f..849b3be 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -294,6 +294,8 @@
   $(call feature_check,bionic)
   ifeq ($(feature-bionic), 1)
     BIONIC := 1
+    CFLAGS += -DLACKS_SIGQUEUE_PROTOTYPE
+    CFLAGS += -DLACKS_OPEN_MEMSTREAM_PROTOTYPE
     EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
     EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
   endif
@@ -686,18 +688,20 @@
 
 ifeq ($(feature-libbfd), 1)
   EXTLIBS += -lbfd
+else
+  # we are on a system that requires -liberty and (maybe) -lz
+  # to link against -lbfd; test each case individually here
 
   # call all detections now so we get correct
   # status in VF output
-  $(call feature_check,liberty)
-  $(call feature_check,liberty-z)
-  $(call feature_check,cplus-demangle)
+  $(call feature_check,libbfd-liberty)
+  $(call feature_check,libbfd-liberty-z)
 
-  ifeq ($(feature-liberty), 1)
-    EXTLIBS += -liberty
+  ifeq ($(feature-libbfd-liberty), 1)
+    EXTLIBS += -lbfd -liberty
   else
-    ifeq ($(feature-liberty-z), 1)
-      EXTLIBS += -liberty -lz
+    ifeq ($(feature-libbfd-liberty-z), 1)
+      EXTLIBS += -lbfd -liberty -lz
     endif
   endif
 endif
@@ -707,24 +711,24 @@
 else
   ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
     EXTLIBS += -liberty
-    CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
   else
-    ifneq ($(feature-libbfd), 1)
-      ifneq ($(feature-liberty), 1)
-        ifneq ($(feature-liberty-z), 1)
-          # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
-          # or any of 'bfd iberty z' trinity
-          ifeq ($(feature-cplus-demangle), 1)
-            EXTLIBS += -liberty
-            CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
-          else
-            msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
-            CFLAGS += -DNO_DEMANGLE
-          endif
-        endif
+    ifeq ($(filter -liberty,$(EXTLIBS)),)
+      $(call feature_check,cplus-demangle)
+
+      # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
+      # or any of 'bfd iberty z' trinity
+      ifeq ($(feature-cplus-demangle), 1)
+        EXTLIBS += -liberty
+      else
+        msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
+        CFLAGS += -DNO_DEMANGLE
       endif
     endif
   endif
+
+  ifneq ($(filter -liberty,$(EXTLIBS)),)
+    CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
+  endif
 endif
 
 ifneq ($(filter -lbfd,$(EXTLIBS)),)
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 82657c0..5f69fd0 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -200,3 +200,13 @@
 
 	return perf_env__lookup_binutils_path(env, "objdump", path);
 }
+
+/*
+ * Some architectures have a single address space for kernel and user addresses,
+ * which makes it possible to determine if an address is in kernel space or user
+ * space.
+ */
+bool perf_env__single_address_space(struct perf_env *env)
+{
+	return strcmp(perf_env__arch(env), "sparc");
+}
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index 2167001..c298a44 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -5,5 +5,6 @@
 #include "../util/env.h"
 
 int perf_env__lookup_objdump(struct perf_env *env, const char **path);
+bool perf_env__single_address_space(struct perf_env *env);
 
 #endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index db0ba8c..ba8ecaf 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -524,10 +524,21 @@
 				    struct perf_evsel *evsel)
 {
 	int err;
+	char c;
 
 	if (!evsel)
 		return 0;
 
+	/*
+	 * If supported, force pass-through config term (pt=1) even if user
+	 * sets pt=0, which avoids senseless kernel errors.
+	 */
+	if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
+	    !(evsel->attr.config & 1)) {
+		pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
+		evsel->attr.config |= 1;
+	}
+
 	err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
 				       "cyc_thresh", "caps/psb_cyc",
 				       evsel->attr.config);
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index b32409a..081353d 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -156,7 +156,7 @@
 	if (strstr(cpuid, "Intel")) {
 		kvm->exit_reasons = vmx_exit_reasons;
 		kvm->exit_reasons_isa = "VMX";
-	} else if (strstr(cpuid, "AMD")) {
+	} else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) {
 		kvm->exit_reasons = svm_exit_reasons;
 		kvm->exit_reasons_isa = "SVM";
 	} else
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index ba481d7..53c11fc 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -727,8 +727,8 @@
 		if (PRINT_FIELD(DSO)) {
 			memset(&alf, 0, sizeof(alf));
 			memset(&alt, 0, sizeof(alt));
-			thread__find_map(thread, sample->cpumode, from, &alf);
-			thread__find_map(thread, sample->cpumode, to, &alt);
+			thread__find_map_fb(thread, sample->cpumode, from, &alf);
+			thread__find_map_fb(thread, sample->cpumode, to, &alt);
 		}
 
 		printed += fprintf(fp, " 0x%"PRIx64, from);
@@ -774,8 +774,8 @@
 		from = br->entries[i].from;
 		to   = br->entries[i].to;
 
-		thread__find_symbol(thread, sample->cpumode, from, &alf);
-		thread__find_symbol(thread, sample->cpumode, to, &alt);
+		thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
+		thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
 
 		printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
 		if (PRINT_FIELD(DSO)) {
@@ -819,11 +819,11 @@
 		from = br->entries[i].from;
 		to   = br->entries[i].to;
 
-		if (thread__find_map(thread, sample->cpumode, from, &alf) &&
+		if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
 		    !alf.map->dso->adjust_symbols)
 			from = map__map_ip(alf.map, from);
 
-		if (thread__find_map(thread, sample->cpumode, to, &alt) &&
+		if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
 		    !alt.map->dso->adjust_symbols)
 			to = map__map_ip(alt.map, to);
 
@@ -1589,13 +1589,8 @@
 		.force_header = false,
 	};
 	struct perf_evsel *ev2;
-	static bool init;
 	u64 val;
 
-	if (!init) {
-		perf_stat__init_shadow_stats();
-		init = true;
-	}
 	if (!evsel->stats)
 		perf_evlist__alloc_stats(script->session->evlist, false);
 	if (evsel_script(evsel->leader)->gnum++ == 0)
@@ -1658,7 +1653,7 @@
 		return;
 	}
 
-	if (PRINT_FIELD(TRACE)) {
+	if (PRINT_FIELD(TRACE) && sample->raw_data) {
 		event_format__fprintf(evsel->tp_format, sample->cpu,
 				      sample->raw_data, sample->raw_size, fp);
 	}
@@ -2214,6 +2209,8 @@
 
 	signal(SIGINT, sig_handler);
 
+	perf_stat__init_shadow_stats();
+
 	/* override event processing functions */
 	if (script->show_task_events) {
 		script->tool.comm = process_comm_event;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d097b5b4..4072015 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1961,7 +1961,7 @@
 	return metricgroup__parse_groups(opt, str, &metric_events);
 }
 
-static const struct option stat_options[] = {
+static struct option stat_options[] = {
 	OPT_BOOLEAN('T', "transaction", &transaction_run,
 		    "hardware transaction statistics"),
 	OPT_CALLBACK('e', "event", &evsel_list, "event",
@@ -2847,6 +2847,12 @@
 		return -ENOMEM;
 
 	parse_events__shrink_config_terms();
+
+	/* String-parsing callback-based options would segfault when negated */
+	set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
+	set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
+	set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
+
 	argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
 					(const char **) stat_usage,
 					PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index a827919..775b998 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -43,6 +43,10 @@
 #include "util/data.h"
 #include "util/debug.h"
 
+#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
+FILE *open_memstream(char **ptr, size_t *sizeloc);
+#endif
+
 #define SUPPORT_OLD_POWER_EVENTS 1
 #define PWR_EVENT_EXIT -1
 
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 22ab8e6..3f43aed 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2263,19 +2263,30 @@
 
 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
 {
-	struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
+	bool found = false;
+	struct perf_evsel *evsel, *tmp;
+	struct parse_events_error err = { .idx = 0, };
+	int ret = parse_events(evlist, "probe:vfs_getname*", &err);
 
-	if (IS_ERR(evsel))
+	if (ret)
 		return false;
 
-	if (perf_evsel__field(evsel, "pathname") == NULL) {
+	evlist__for_each_entry_safe(evlist, evsel, tmp) {
+		if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
+			continue;
+
+		if (perf_evsel__field(evsel, "pathname")) {
+			evsel->handler = trace__vfs_getname;
+			found = true;
+			continue;
+		}
+
+		list_del_init(&evsel->node);
+		evsel->evlist = NULL;
 		perf_evsel__delete(evsel);
-		return false;
 	}
 
-	evsel->handler = trace__vfs_getname;
-	perf_evlist__add(evlist, evsel);
-	return true;
+	return found;
 }
 
 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 36c903f..71e9737 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -73,7 +73,7 @@
     },
     {
         "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 36c903f..71e9737 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -73,7 +73,7 @@
     },
     {
         "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index ff9b60b..44090a9 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -116,7 +116,7 @@
             if not self.has_key(t) or not other.has_key(t):
                 continue
             if not data_equal(self[t], other[t]):
-		log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
+                log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
 
 # Test file description needs to have following sections:
 # [config]
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index a467615..910e25e 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -291,12 +291,20 @@
 
 bool test__bp_signal_is_supported(void)
 {
-/*
- * The powerpc so far does not have support to even create
- * instruction breakpoint using the perf event interface.
- * Once it's there we can release this.
- */
-#if defined(__powerpc__) || defined(__s390x__)
+	/*
+	 * PowerPC and S390 do not support creation of instruction
+	 * breakpoints using the perf_event interface.
+	 *
+	 * ARM requires explicit rounding down of the instruction
+	 * pointer in Thumb mode, and then requires the single-step
+	 * to be handled explicitly in the overflow handler to avoid
+	 * stepping into the SIGIO handler and getting stuck on the
+	 * breakpointed instruction.
+	 *
+	 * Just disable the test for these architectures until these
+	 * issues are resolved.
+	 */
+#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
 	return false;
 #else
 	return true;
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 699561f..67bcbf8 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -17,7 +17,7 @@
 		return -1;
 	}
 
-	is_signed = !!(field->flags | FIELD_IS_SIGNED);
+	is_signed = !!(field->flags & FIELD_IS_SIGNED);
 	if (should_be_signed && !is_signed) {
 		pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
 			 evsel->name, name, is_signed, should_be_signed);
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 1c16e56..7cb99b4 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -13,7 +13,8 @@
 	local verbose=$1
 	if [ $had_vfs_getname -eq 1 ] ; then
 		line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
-		perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
+		perf probe -q       "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
+		perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
 	fi
 }
 
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 32ef7bd..dc2212e 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -766,6 +766,7 @@
 			cnode->cycles_count += node->branch_flags.cycles;
 			cnode->iter_count += node->nr_loop_iter;
 			cnode->iter_cycles += node->iter_cycles;
+			cnode->from_count++;
 		}
 	}
 
@@ -1345,10 +1346,10 @@
 static int branch_from_str(char *bf, int bfsize,
 			   u64 branch_count,
 			   u64 cycles_count, u64 iter_count,
-			   u64 iter_cycles)
+			   u64 iter_cycles, u64 from_count)
 {
 	int printed = 0, i = 0;
-	u64 cycles;
+	u64 cycles, v = 0;
 
 	cycles = cycles_count / branch_count;
 	if (cycles) {
@@ -1357,14 +1358,16 @@
 				bf + printed, bfsize - printed);
 	}
 
-	if (iter_count) {
-		printed += count_pri64_printf(i++, "iter",
-				iter_count,
-				bf + printed, bfsize - printed);
+	if (iter_count && from_count) {
+		v = iter_count / from_count;
+		if (v) {
+			printed += count_pri64_printf(i++, "iter",
+					v, bf + printed, bfsize - printed);
 
-		printed += count_pri64_printf(i++, "avg_cycles",
-				iter_cycles / iter_count,
-				bf + printed, bfsize - printed);
+			printed += count_pri64_printf(i++, "avg_cycles",
+					iter_cycles / iter_count,
+					bf + printed, bfsize - printed);
+		}
 	}
 
 	if (i)
@@ -1377,6 +1380,7 @@
 			     u64 branch_count, u64 predicted_count,
 			     u64 abort_count, u64 cycles_count,
 			     u64 iter_count, u64 iter_cycles,
+			     u64 from_count,
 			     struct branch_type_stat *brtype_stat)
 {
 	int printed;
@@ -1389,7 +1393,8 @@
 				predicted_count, abort_count, brtype_stat);
 	} else {
 		printed = branch_from_str(bf, bfsize, branch_count,
-				cycles_count, iter_count, iter_cycles);
+				cycles_count, iter_count, iter_cycles,
+				from_count);
 	}
 
 	if (!printed)
@@ -1402,13 +1407,14 @@
 				   u64 branch_count, u64 predicted_count,
 				   u64 abort_count, u64 cycles_count,
 				   u64 iter_count, u64 iter_cycles,
+				   u64 from_count,
 				   struct branch_type_stat *brtype_stat)
 {
 	char str[256];
 
 	counts_str_build(str, sizeof(str), branch_count,
 			 predicted_count, abort_count, cycles_count,
-			 iter_count, iter_cycles, brtype_stat);
+			 iter_count, iter_cycles, from_count, brtype_stat);
 
 	if (fp)
 		return fprintf(fp, "%s", str);
@@ -1422,6 +1428,7 @@
 	u64 branch_count, predicted_count;
 	u64 abort_count, cycles_count;
 	u64 iter_count, iter_cycles;
+	u64 from_count;
 
 	branch_count = clist->branch_count;
 	predicted_count = clist->predicted_count;
@@ -1429,11 +1436,12 @@
 	cycles_count = clist->cycles_count;
 	iter_count = clist->iter_count;
 	iter_cycles = clist->iter_cycles;
+	from_count = clist->from_count;
 
 	return callchain_counts_printf(fp, bf, bfsize, branch_count,
 				       predicted_count, abort_count,
 				       cycles_count, iter_count, iter_cycles,
-				       &clist->brtype_stat);
+				       from_count, &clist->brtype_stat);
 }
 
 static void free_callchain_node(struct callchain_node *node)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 154560b..99d38ac 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -118,6 +118,7 @@
 		bool		has_children;
 	};
 	u64			branch_count;
+	u64			from_count;
 	u64			predicted_count;
 	u64			abort_count;
 	u64			cycles_count;
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 1ccbd33..383674f 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -134,7 +134,12 @@
 	if (!cpu_list)
 		return cpu_map__read_all_cpu_map();
 
-	if (!isdigit(*cpu_list))
+	/*
+	 * must handle the case of empty cpumap to cover
+	 * TOPOLOGY header for NUMA nodes with no CPU
+	 * ( e.g., because of CPU hotplug)
+	 */
+	if (!isdigit(*cpu_list) && *cpu_list != '\0')
 		goto out;
 
 	while (isdigit(*cpu_list)) {
@@ -181,8 +186,10 @@
 
 	if (nr_cpus > 0)
 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
-	else
+	else if (*cpu_list != '\0')
 		cpus = cpu_map__default_new();
+	else
+		cpus = cpu_map__dummy_new();
 invalid:
 	free(tmp_cpus);
 out:
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index ca57765..7b5e15c 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -1005,7 +1005,7 @@
 	}
 
 swap_packet:
-	if (etmq->etm->synth_opts.last_branch) {
+	if (etm->sample_branches || etm->synth_opts.last_branch) {
 		/*
 		 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
 		 * the next incoming packet.
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index bbed90e..cee717a 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -295,7 +295,7 @@
 		unlink(tmpbuf);
 
 	if (pathname && (fd >= 0))
-		strncpy(pathname, tmpbuf, len);
+		strlcpy(pathname, tmpbuf, len);
 
 	return fd;
 }
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 59f38c7..4c23779 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -166,7 +166,7 @@
 	struct utsname uts;
 	char *arch_name;
 
-	if (!env) { /* Assume local operation */
+	if (!env || !env->arch) { /* Assume local operation */
 		if (uname(&uts) < 0)
 			return NULL;
 		arch_name = uts.machine;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index bc64618..aa9c7df 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1576,6 +1576,24 @@
 	return al->map;
 }
 
+/*
+ * For branch stacks or branch samples, the sample cpumode might not be correct
+ * because it applies only to the sample 'ip' and not necessary to 'addr' or
+ * branch stack addresses. If possible, use a fallback to deal with those cases.
+ */
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
+				struct addr_location *al)
+{
+	struct map *map = thread__find_map(thread, cpumode, addr, al);
+	struct machine *machine = thread->mg->machine;
+	u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
+
+	if (map || addr_cpumode == cpumode)
+		return map;
+
+	return thread__find_map(thread, addr_cpumode, addr, al);
+}
+
 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
 				   u64 addr, struct addr_location *al)
 {
@@ -1585,6 +1603,15 @@
 	return al->sym;
 }
 
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
+				      u64 addr, struct addr_location *al)
+{
+	al->sym = NULL;
+	if (thread__find_map_fb(thread, cpumode, addr, al))
+		al->sym = map__find_symbol(al->map, al->addr);
+	return al->sym;
+}
+
 /*
  * Callers need to drop the reference to al->thread, obtained in
  * machine__findnew_thread()
@@ -1678,7 +1705,7 @@
 void thread__resolve(struct thread *thread, struct addr_location *al,
 		     struct perf_sample *sample)
 {
-	thread__find_map(thread, sample->cpumode, sample->addr, al);
+	thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
 
 	al->cpu = sample->cpu;
 	al->sym = NULL;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index be440df..819aa44 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -34,6 +34,10 @@
 #include <linux/log2.h>
 #include <linux/err.h>
 
+#ifdef LACKS_SIGQUEUE_PROTOTYPE
+int sigqueue(pid_t pid, int sig, const union sigval value);
+#endif
+
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
 
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 3cadc25..bd9226b 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2636,6 +2636,7 @@
 	struct perf_header *header = &session->header;
 	int fd = perf_data__fd(session->data);
 	struct stat st;
+	time_t stctime;
 	int ret, bit;
 
 	hd.fp = fp;
@@ -2645,7 +2646,8 @@
 	if (ret == -1)
 		return -1;
 
-	fprintf(fp, "# captured on    : %s", ctime(&st.st_ctime));
+	stctime = st.st_ctime;
+	fprintf(fp, "# captured on    : %s", ctime(&stctime));
 
 	fprintf(fp, "# header version : %u\n", header->version);
 	fprintf(fp, "# data offset    : %" PRIu64 "\n", header->data_offset);
@@ -3521,7 +3523,7 @@
 	if (ev == NULL)
 		return -ENOMEM;
 
-	strncpy(ev->data, evsel->unit, size);
+	strlcpy(ev->data, evsel->unit, size + 1);
 	err = process(tool, (union perf_event *)ev, NULL, NULL);
 	free(ev);
 	return err;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8ee8ab3..b1508ce 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1988,7 +1988,7 @@
 {
 	int i;
 
-	iter->nr_loop_iter = nr;
+	iter->nr_loop_iter++;
 	iter->cycles = 0;
 
 	for (i = 0; i < nr; i++)
@@ -2575,6 +2575,33 @@
 	return err;
 }
 
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
+{
+	u8 addr_cpumode = cpumode;
+	bool kernel_ip;
+
+	if (!machine->single_address_space)
+		goto out;
+
+	kernel_ip = machine__kernel_ip(machine, addr);
+	switch (cpumode) {
+	case PERF_RECORD_MISC_KERNEL:
+	case PERF_RECORD_MISC_USER:
+		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
+					   PERF_RECORD_MISC_USER;
+		break;
+	case PERF_RECORD_MISC_GUEST_KERNEL:
+	case PERF_RECORD_MISC_GUEST_USER:
+		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
+					   PERF_RECORD_MISC_GUEST_USER;
+		break;
+	default:
+		break;
+	}
+out:
+	return addr_cpumode;
+}
+
 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
 {
 	return dsos__findnew(&machine->dsos, filename);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d856b85..ebde3ea 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -42,6 +42,7 @@
 	u16		  id_hdr_size;
 	bool		  comm_exec;
 	bool		  kptr_restrict_warned;
+	bool		  single_address_space;
 	char		  *root_dir;
 	char		  *mmap_name;
 	struct threads    threads[THREADS__TABLE_SIZE];
@@ -99,6 +100,8 @@
 	return ip >= kernel_start;
 }
 
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
+
 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
 				    pid_t tid);
 struct comm *machine__thread_exec_comm(struct machine *machine,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f8cd3e7..ebb18a9 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2454,7 +2454,7 @@
 		if (!name_only && strlen(syms->alias))
 			snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
 		else
-			strncpy(name, syms->symbol, MAX_NAME_LEN);
+			strlcpy(name, syms->symbol, MAX_NAME_LEN);
 
 		evt_list[evt_i] = strdup(name);
 		if (evt_list[evt_i] == NULL)
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7e49baa..7348eea 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -145,7 +145,7 @@
 	int fd, ret = -1;
 	char path[PATH_MAX];
 
-	snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+	scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
 
 	fd = open(path, O_RDONLY);
 	if (fd == -1)
@@ -175,7 +175,7 @@
 	ssize_t sret;
 	int fd;
 
-	snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
+	scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
 
 	fd = open(path, O_RDONLY);
 	if (fd == -1)
@@ -205,7 +205,7 @@
 	char path[PATH_MAX];
 	int fd;
 
-	snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
+	scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
 
 	fd = open(path, O_RDONLY);
 	if (fd == -1)
@@ -223,7 +223,7 @@
 	char path[PATH_MAX];
 	int fd;
 
-	snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
+	scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
 
 	fd = open(path, O_RDONLY);
 	if (fd == -1)
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index b76088f..6a65488 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -424,7 +424,7 @@
 
 	if (target && build_id_cache__cached(target)) {
 		/* This is a cached buildid */
-		strncpy(sbuildid, target, SBUILD_ID_SIZE);
+		strlcpy(sbuildid, target, SBUILD_ID_SIZE);
 		dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
 		goto found;
 	}
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index ce501ba..69f5f61 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -939,7 +939,8 @@
 
 		file = PyFile_FromFile(fp, "perf", "r", NULL);
 #else
-		file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1, NULL, NULL, NULL, 1);
+		file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1,
+				     NULL, NULL, NULL, 0);
 #endif
 		if (file == NULL)
 			goto free_list;
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index d2c78ff..aa7f8c1 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -499,7 +499,7 @@
 	aux_ts = get_trailer_time(buf);
 	if (!aux_ts) {
 		pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n",
-		       sfq->buffer->data_offset);
+		       (s64)sfq->buffer->data_offset);
 		aux_ts = ~0ULL;
 		goto out;
 	}
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index dfc6093..05d95de 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -494,14 +494,14 @@
 		pydict_set_item_string_decref(pyelem, "cycles",
 		    PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
 
-		thread__find_map(thread, sample->cpumode,
-				 br->entries[i].from, &al);
+		thread__find_map_fb(thread, sample->cpumode,
+				    br->entries[i].from, &al);
 		dsoname = get_dsoname(al.map);
 		pydict_set_item_string_decref(pyelem, "from_dsoname",
 					      _PyUnicode_FromString(dsoname));
 
-		thread__find_map(thread, sample->cpumode,
-				 br->entries[i].to, &al);
+		thread__find_map_fb(thread, sample->cpumode,
+				    br->entries[i].to, &al);
 		dsoname = get_dsoname(al.map);
 		pydict_set_item_string_decref(pyelem, "to_dsoname",
 					      _PyUnicode_FromString(dsoname));
@@ -576,14 +576,14 @@
 		if (!pyelem)
 			Py_FatalError("couldn't create Python dictionary");
 
-		thread__find_symbol(thread, sample->cpumode,
-				    br->entries[i].from, &al);
+		thread__find_symbol_fb(thread, sample->cpumode,
+				       br->entries[i].from, &al);
 		get_symoff(al.sym, &al, true, bf, sizeof(bf));
 		pydict_set_item_string_decref(pyelem, "from",
 					      _PyUnicode_FromString(bf));
 
-		thread__find_symbol(thread, sample->cpumode,
-				    br->entries[i].to, &al);
+		thread__find_symbol_fb(thread, sample->cpumode,
+				       br->entries[i].to, &al);
 		get_symoff(al.sym, &al, true, bf, sizeof(bf));
 		pydict_set_item_string_decref(pyelem, "to",
 					      _PyUnicode_FromString(bf));
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 8b93693..1108609 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -24,6 +24,7 @@
 #include "thread.h"
 #include "thread-stack.h"
 #include "stat.h"
+#include "arch/common.h"
 
 static int perf_session__deliver_event(struct perf_session *session,
 				       union perf_event *event,
@@ -150,6 +151,9 @@
 		session->machines.host.env = &perf_env;
 	}
 
+	session->machines.host.single_address_space =
+		perf_env__single_address_space(session->machines.host.env);
+
 	if (!data || perf_data__is_write(data)) {
 		/*
 		 * In O_RDONLY mode this will be performed when reading the
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 1cbada2..f735ee0 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -334,7 +334,7 @@
 	if (file) {
 		while (fgets(buf, 255, file)) {
 			if (strstr(buf, "model name")) {
-				strncpy(cpu_m, &buf[13], 255);
+				strlcpy(cpu_m, &buf[13], 255);
 				break;
 			}
 		}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 6e70cc0..a701a8a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -87,6 +87,11 @@
 	return GELF_ST_TYPE(sym->st_info);
 }
 
+static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
+{
+	return GELF_ST_VISIBILITY(sym->st_other);
+}
+
 #ifndef STT_GNU_IFUNC
 #define STT_GNU_IFUNC 10
 #endif
@@ -111,7 +116,9 @@
 	return elf_sym__type(sym) == STT_NOTYPE &&
 		sym->st_name != 0 &&
 		sym->st_shndx != SHN_UNDEF &&
-		sym->st_shndx != SHN_ABS;
+		sym->st_shndx != SHN_ABS &&
+		elf_sym__visibility(sym) != STV_HIDDEN &&
+		elf_sym__visibility(sym) != STV_INTERNAL;
 }
 
 static bool elf_sym__filter(GElf_Sym *sym)
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 07606aa..4e2c3cb 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -94,9 +94,13 @@
 
 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
 			     struct addr_location *al);
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
+				struct addr_location *al);
 
 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
 				   u64 addr, struct addr_location *al);
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
+				      u64 addr, struct addr_location *al);
 
 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
 					struct addr_location *al);
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
index 84e2b64..2fa3c57 100755
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
@@ -585,9 +585,9 @@
 
 read_trace_data(filename)
 
-clear_trace_file()
-# Free the memory
 if interval:
+    clear_trace_file()
+    # Free the memory
     free_trace_buffer()
 
 if graph_data_present == False:
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index ff9d3a5..c6635fe 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -104,16 +104,29 @@
 }
 EXPORT_SYMBOL(__wrap_devm_memremap);
 
+static void nfit_test_kill(void *_pgmap)
+{
+	struct dev_pagemap *pgmap = _pgmap;
+
+	pgmap->kill(pgmap->ref);
+}
+
 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 {
 	resource_size_t offset = pgmap->res.start;
 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 
-	if (nfit_res)
+	if (nfit_res) {
+		int rc;
+
+		rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
+		if (rc)
+			return ERR_PTR(rc);
 		return nfit_res->buf + offset - nfit_res->res.start;
+	}
 	return devm_memremap_pages(dev, pgmap);
 }
-EXPORT_SYMBOL(__wrap_devm_memremap_pages);
+EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
 
 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
 {
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
index d9a7254..72c25a3 100644
--- a/tools/testing/selftests/android/Makefile
+++ b/tools/testing/selftests/android/Makefile
@@ -6,7 +6,7 @@
 
 include ../lib.mk
 
-all: khdr
+all:
 	@for DIR in $(SUBDIRS); do		\
 		BUILD_TARGET=$(OUTPUT)/$$DIR;	\
 		mkdir $$BUILD_TARGET  -p;	\
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index fff7fb1..f3f874b 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -124,6 +124,16 @@
 endif
 endif
 
+# Have one program compiled without "-target bpf" to test whether libbpf loads
+# it successfully
+$(OUTPUT)/test_xdp.o: test_xdp.c
+	$(CLANG) $(CLANG_FLAGS) \
+		-O2 -emit-llvm -c $< -o - | \
+	$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+	$(BTF_PAHOLE) -J $@
+endif
+
 $(OUTPUT)/%.o: %.c
 	$(CLANG) $(CLANG_FLAGS) \
 		 -O2 -target bpf -emit-llvm -c $< -o - |      \
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 315a44f..84fd6f1 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -13,7 +13,7 @@
 	unsigned int start, end, possible_cpus = 0;
 	char buff[128];
 	FILE *fp;
-	int n;
+	int len, n, i, j = 0;
 
 	fp = fopen(fcpu, "r");
 	if (!fp) {
@@ -21,17 +21,27 @@
 		exit(1);
 	}
 
-	while (fgets(buff, sizeof(buff), fp)) {
-		n = sscanf(buff, "%u-%u", &start, &end);
-		if (n == 0) {
-			printf("Failed to retrieve # possible CPUs!\n");
-			exit(1);
-		} else if (n == 1) {
-			end = start;
-		}
-		possible_cpus = start == 0 ? end + 1 : 0;
-		break;
+	if (!fgets(buff, sizeof(buff), fp)) {
+		printf("Failed to read %s!\n", fcpu);
+		exit(1);
 	}
+
+	len = strlen(buff);
+	for (i = 0; i <= len; i++) {
+		if (buff[i] == ',' || buff[i] == '\0') {
+			buff[i] = '\0';
+			n = sscanf(&buff[j], "%u-%u", &start, &end);
+			if (n <= 0) {
+				printf("Failed to retrieve # possible CPUs!\n");
+				exit(1);
+			} else if (n == 1) {
+				end = start;
+			}
+			possible_cpus += end - start + 1;
+			j = i + 1;
+		}
+	}
+
 	fclose(fp);
 
 	return possible_cpus;
diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
index d97dc91..8b1bc96 100755
--- a/tools/testing/selftests/bpf/test_libbpf.sh
+++ b/tools/testing/selftests/bpf/test_libbpf.sh
@@ -33,17 +33,11 @@
 
 libbpf_open_file test_l4lb.o
 
-# TODO: fix libbpf to load noinline functions
-# [warning] libbpf: incorrect bpf_call opcode
-#libbpf_open_file test_l4lb_noinline.o
+# Load a program with BPF-to-BPF calls
+libbpf_open_file test_l4lb_noinline.o
 
-# TODO: fix test_xdp_meta.c to load with libbpf
-# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version
-#libbpf_open_file test_xdp_meta.o
-
-# TODO: fix libbpf to handle .eh_frame
-# [warning] libbpf: relocation failed: no section(10)
-#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o
+# Load a program compiled without the "-target bpf" flag
+libbpf_open_file test_xdp.o
 
 # Success
 exit 0
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 0ef6820..89f8b0d 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -51,10 +51,10 @@
 	struct iphdr iph;
 	struct tcphdr tcp;
 } __packed pkt_v4 = {
-	.eth.h_proto = bpf_htons(ETH_P_IP),
+	.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
 	.iph.ihl = 5,
 	.iph.protocol = 6,
-	.iph.tot_len = bpf_htons(MAGIC_BYTES),
+	.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
 	.tcp.urg_ptr = 123,
 };
 
@@ -64,9 +64,9 @@
 	struct ipv6hdr iph;
 	struct tcphdr tcp;
 } __packed pkt_v6 = {
-	.eth.h_proto = bpf_htons(ETH_P_IPV6),
+	.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
 	.iph.nexthdr = 6,
-	.iph.payload_len = bpf_htons(MAGIC_BYTES),
+	.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
 	.tcp.urg_ptr = 123,
 };
 
@@ -1136,7 +1136,9 @@
 	int i, j;
 	struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
 	int build_id_matches = 0;
+	int retry = 1;
 
+retry:
 	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
 	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
 		goto out;
@@ -1249,6 +1251,19 @@
 		previous_key = key;
 	} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
 
+	/* stack_map_get_build_id_offset() is racy and sometimes can return
+	 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+	 * try it one more time.
+	 */
+	if (build_id_matches < 1 && retry--) {
+		ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+		close(pmu_fd);
+		bpf_object__close(obj);
+		printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+		       __func__);
+		goto retry;
+	}
+
 	if (CHECK(build_id_matches < 1, "build id match",
 		  "Didn't find expected build ID from the map\n"))
 		goto disable_pmu;
@@ -1289,7 +1304,9 @@
 	int i, j;
 	struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
 	int build_id_matches = 0;
+	int retry = 1;
 
+retry:
 	err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
 	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
 		return;
@@ -1384,6 +1401,19 @@
 		previous_key = key;
 	} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
 
+	/* stack_map_get_build_id_offset() is racy and sometimes can return
+	 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+	 * try it one more time.
+	 */
+	if (build_id_matches < 1 && retry--) {
+		ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+		close(pmu_fd);
+		bpf_object__close(obj);
+		printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+		       __func__);
+		goto retry;
+	}
+
 	if (CHECK(build_id_matches < 1, "build id match",
 		  "Didn't find expected build ID from the map\n"))
 		goto disable_pmu;
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index aeeb76a..e38f1cb 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -44,6 +44,7 @@
 #define SERV6_V4MAPPED_IP	"::ffff:192.168.0.4"
 #define SRC6_IP			"::1"
 #define SRC6_REWRITE_IP		"::6"
+#define WILDCARD6_IP		"::"
 #define SERV6_PORT		6060
 #define SERV6_REWRITE_PORT	6666
 
@@ -85,12 +86,14 @@
 static int bind6_prog_load(const struct sock_addr_test *test);
 static int connect4_prog_load(const struct sock_addr_test *test);
 static int connect6_prog_load(const struct sock_addr_test *test);
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
 static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
 static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
 
 static struct sock_addr_test tests[] = {
 	/* bind */
@@ -463,6 +466,34 @@
 		SYSCALL_ENOTSUPP,
 	},
 	{
+		"sendmsg6: set dst IP = [::] (BSD'ism)",
+		sendmsg6_rw_wildcard_prog_load,
+		BPF_CGROUP_UDP6_SENDMSG,
+		BPF_CGROUP_UDP6_SENDMSG,
+		AF_INET6,
+		SOCK_DGRAM,
+		SERV6_IP,
+		SERV6_PORT,
+		SERV6_REWRITE_IP,
+		SERV6_REWRITE_PORT,
+		SRC6_REWRITE_IP,
+		SUCCESS,
+	},
+	{
+		"sendmsg6: preserve dst IP = [::] (BSD'ism)",
+		sendmsg_allow_prog_load,
+		BPF_CGROUP_UDP6_SENDMSG,
+		BPF_CGROUP_UDP6_SENDMSG,
+		AF_INET6,
+		SOCK_DGRAM,
+		WILDCARD6_IP,
+		SERV6_PORT,
+		SERV6_REWRITE_IP,
+		SERV6_PORT,
+		SRC6_IP,
+		SUCCESS,
+	},
+	{
 		"sendmsg6: deny call",
 		sendmsg_deny_prog_load,
 		BPF_CGROUP_UDP6_SENDMSG,
@@ -714,16 +745,27 @@
 	return load_path(test, CONNECT6_PROG_PATH);
 }
 
-static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
+				      int32_t rc)
 {
 	struct bpf_insn insns[] = {
-		/* return 0 */
-		BPF_MOV64_IMM(BPF_REG_0, 0),
+		/* return rc */
+		BPF_MOV64_IMM(BPF_REG_0, rc),
 		BPF_EXIT_INSN(),
 	};
 	return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
 }
 
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
+{
+	return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
+}
+
+static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+{
+	return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
+}
+
 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
 {
 	struct sockaddr_in dst4_rw_addr;
@@ -844,6 +886,11 @@
 	return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
 }
 
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
+{
+	return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
+}
+
 static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
 {
 	return load_path(test, SENDMSG6_PROG_PATH);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index e436b67f..9db5a73 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2749,6 +2749,19 @@
 		.result = ACCEPT,
 	},
 	{
+		"alu32: mov u32 const",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_7, 0),
+			BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
+			BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
 		"unpriv: partial copy of pointer",
 		.insns = {
 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
index bab13dd..0d26b5e 100755
--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
@@ -37,6 +37,10 @@
 		exit $ksft_skip
 	fi
 
+	present_cpus=`cat $SYSFS/devices/system/cpu/present`
+	present_max=${present_cpus##*-}
+	echo "present_cpus = $present_cpus present_max = $present_max"
+
 	echo -e "\t Cpus in online state: $online_cpus"
 
 	offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
@@ -151,6 +155,8 @@
 online_max=0
 offline_cpus=0
 offline_max=0
+present_cpus=0
+present_max=0
 
 while getopts e:ahp: opt; do
 	case $opt in
@@ -190,9 +196,10 @@
 	online_cpu_expect_success $online_max
 
 	if [[ $offline_cpus -gt 0 ]]; then
-		echo -e "\t offline to online to offline: cpu $offline_max"
-		online_cpu_expect_success $offline_max
-		offline_cpu_expect_success $offline_max
+		echo -e "\t offline to online to offline: cpu $present_max"
+		online_cpu_expect_success $present_max
+		offline_cpu_expect_success $present_max
+		online_cpu $present_max
 	fi
 	exit 0
 else
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 6c5f1b2..1cbb12e 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -91,7 +91,7 @@
 	if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
 		if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
 			echo "usermode helper disabled so ignoring test"
-			exit $ksft_skip
+			exit 0
 		fi
 	fi
 }
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index ad1eeb1..30996306 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -19,6 +19,7 @@
 TEST_PROGS := run.sh
 
 top_srcdir = ../../../../..
+KSFT_KHDR_INSTALL := 1
 include ../../lib.mk
 
 $(TEST_GEN_FILES): $(HEADERS)
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 4665cdb..59ea4c46 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -9,6 +9,7 @@
 EXTRA_OBJS += ../gpiogpio-hammer-in.o ../gpiogpio-utils.o ../gpiolsgpio-in.o
 EXTRA_OBJS += ../gpiolsgpio.o
 
+KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
 all: $(BINARIES)
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
index f8d468f..aaa1e9f 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -37,7 +37,7 @@
 	struct libmnt_table *tb;
 	struct libmnt_iter *itr = NULL;
 	struct libmnt_fs *fs;
-	int found = 0;
+	int found = 0, ret;
 
 	cxt = mnt_new_context();
 	if (!cxt)
@@ -58,8 +58,11 @@
 			break;
 		}
 	}
-	if (found)
-		asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+	if (found) {
+		ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+		if (ret < 0)
+			err(EXIT_FAILURE, "failed to format string");
+	}
 
 	mnt_free_iter(itr);
 	mnt_free_context(cxt);
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 6ae3730..76d654e 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -354,7 +354,7 @@
  * ASSERT_EQ(expected, measured): expected == measured
  */
 #define ASSERT_EQ(expected, seen) \
-	__EXPECT(expected, seen, ==, 1)
+	__EXPECT(expected, #expected, seen, #seen, ==, 1)
 
 /**
  * ASSERT_NE(expected, seen)
@@ -365,7 +365,7 @@
  * ASSERT_NE(expected, measured): expected != measured
  */
 #define ASSERT_NE(expected, seen) \
-	__EXPECT(expected, seen, !=, 1)
+	__EXPECT(expected, #expected, seen, #seen, !=, 1)
 
 /**
  * ASSERT_LT(expected, seen)
@@ -376,7 +376,7 @@
  * ASSERT_LT(expected, measured): expected < measured
  */
 #define ASSERT_LT(expected, seen) \
-	__EXPECT(expected, seen, <, 1)
+	__EXPECT(expected, #expected, seen, #seen, <, 1)
 
 /**
  * ASSERT_LE(expected, seen)
@@ -387,7 +387,7 @@
  * ASSERT_LE(expected, measured): expected <= measured
  */
 #define ASSERT_LE(expected, seen) \
-	__EXPECT(expected, seen, <=, 1)
+	__EXPECT(expected, #expected, seen, #seen, <=, 1)
 
 /**
  * ASSERT_GT(expected, seen)
@@ -398,7 +398,7 @@
  * ASSERT_GT(expected, measured): expected > measured
  */
 #define ASSERT_GT(expected, seen) \
-	__EXPECT(expected, seen, >, 1)
+	__EXPECT(expected, #expected, seen, #seen, >, 1)
 
 /**
  * ASSERT_GE(expected, seen)
@@ -409,7 +409,7 @@
  * ASSERT_GE(expected, measured): expected >= measured
  */
 #define ASSERT_GE(expected, seen) \
-	__EXPECT(expected, seen, >=, 1)
+	__EXPECT(expected, #expected, seen, #seen, >=, 1)
 
 /**
  * ASSERT_NULL(seen)
@@ -419,7 +419,7 @@
  * ASSERT_NULL(measured): NULL == measured
  */
 #define ASSERT_NULL(seen) \
-	__EXPECT(NULL, seen, ==, 1)
+	__EXPECT(NULL, "NULL", seen, #seen, ==, 1)
 
 /**
  * ASSERT_TRUE(seen)
@@ -429,7 +429,7 @@
  * ASSERT_TRUE(measured): measured != 0
  */
 #define ASSERT_TRUE(seen) \
-	ASSERT_NE(0, seen)
+	__EXPECT(0, "0", seen, #seen, !=, 1)
 
 /**
  * ASSERT_FALSE(seen)
@@ -439,7 +439,7 @@
  * ASSERT_FALSE(measured): measured == 0
  */
 #define ASSERT_FALSE(seen) \
-	ASSERT_EQ(0, seen)
+	__EXPECT(0, "0", seen, #seen, ==, 1)
 
 /**
  * ASSERT_STREQ(expected, seen)
@@ -472,7 +472,7 @@
  * EXPECT_EQ(expected, measured): expected == measured
  */
 #define EXPECT_EQ(expected, seen) \
-	__EXPECT(expected, seen, ==, 0)
+	__EXPECT(expected, #expected, seen, #seen, ==, 0)
 
 /**
  * EXPECT_NE(expected, seen)
@@ -483,7 +483,7 @@
  * EXPECT_NE(expected, measured): expected != measured
  */
 #define EXPECT_NE(expected, seen) \
-	__EXPECT(expected, seen, !=, 0)
+	__EXPECT(expected, #expected, seen, #seen, !=, 0)
 
 /**
  * EXPECT_LT(expected, seen)
@@ -494,7 +494,7 @@
  * EXPECT_LT(expected, measured): expected < measured
  */
 #define EXPECT_LT(expected, seen) \
-	__EXPECT(expected, seen, <, 0)
+	__EXPECT(expected, #expected, seen, #seen, <, 0)
 
 /**
  * EXPECT_LE(expected, seen)
@@ -505,7 +505,7 @@
  * EXPECT_LE(expected, measured): expected <= measured
  */
 #define EXPECT_LE(expected, seen) \
-	__EXPECT(expected, seen, <=, 0)
+	__EXPECT(expected, #expected, seen, #seen, <=, 0)
 
 /**
  * EXPECT_GT(expected, seen)
@@ -516,7 +516,7 @@
  * EXPECT_GT(expected, measured): expected > measured
  */
 #define EXPECT_GT(expected, seen) \
-	__EXPECT(expected, seen, >, 0)
+	__EXPECT(expected, #expected, seen, #seen, >, 0)
 
 /**
  * EXPECT_GE(expected, seen)
@@ -527,7 +527,7 @@
  * EXPECT_GE(expected, measured): expected >= measured
  */
 #define EXPECT_GE(expected, seen) \
-	__EXPECT(expected, seen, >=, 0)
+	__EXPECT(expected, #expected, seen, #seen, >=, 0)
 
 /**
  * EXPECT_NULL(seen)
@@ -537,7 +537,7 @@
  * EXPECT_NULL(measured): NULL == measured
  */
 #define EXPECT_NULL(seen) \
-	__EXPECT(NULL, seen, ==, 0)
+	__EXPECT(NULL, "NULL", seen, #seen, ==, 0)
 
 /**
  * EXPECT_TRUE(seen)
@@ -547,7 +547,7 @@
  * EXPECT_TRUE(measured): 0 != measured
  */
 #define EXPECT_TRUE(seen) \
-	EXPECT_NE(0, seen)
+	__EXPECT(0, "0", seen, #seen, !=, 0)
 
 /**
  * EXPECT_FALSE(seen)
@@ -557,7 +557,7 @@
  * EXPECT_FALSE(measured): 0 == measured
  */
 #define EXPECT_FALSE(seen) \
-	EXPECT_EQ(0, seen)
+	__EXPECT(0, "0", seen, #seen, ==, 0)
 
 /**
  * EXPECT_STREQ(expected, seen)
@@ -597,7 +597,7 @@
 	if (_metadata->passed && _metadata->step < 255) \
 		_metadata->step++;
 
-#define __EXPECT(_expected, _seen, _t, _assert) do { \
+#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
 	/* Avoid multiple evaluation of the cases */ \
 	__typeof__(_expected) __exp = (_expected); \
 	__typeof__(_seen) __seen = (_seen); \
@@ -606,8 +606,8 @@
 		unsigned long long __exp_print = (uintptr_t)__exp; \
 		unsigned long long __seen_print = (uintptr_t)__seen; \
 		__TH_LOG("Expected %s (%llu) %s %s (%llu)", \
-			 #_expected, __exp_print, #_t, \
-			 #_seen, __seen_print); \
+			 _expected_str, __exp_print, #_t, \
+			 _seen_str, __seen_print); \
 		_metadata->passed = 0; \
 		/* Ensure the optional handler is triggered */ \
 		_metadata->trigger = 1; \
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index ec32dad..cc83e2f 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -1,6 +1,7 @@
 all:
 
 top_srcdir = ../../../../
+KSFT_KHDR_INSTALL := 1
 UNAME_M := $(shell uname -m)
 
 LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
@@ -40,4 +41,3 @@
 
 all: $(STATIC_LIBS)
 $(TEST_GEN_PROGS): $(STATIC_LIBS)
-$(STATIC_LIBS):| khdr
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 6fd8c08..fb5d2d1 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -590,7 +590,7 @@
 	 * already exist.
 	 */
 	region = (struct userspace_mem_region *) userspace_mem_region_find(
-		vm, guest_paddr, guest_paddr + npages * vm->page_size);
+		vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
 	if (region != NULL)
 		TEST_ASSERT(false, "overlapping userspace_mem_region already "
 			"exists\n"
@@ -606,15 +606,10 @@
 		region = region->next) {
 		if (region->region.slot == slot)
 			break;
-		if ((guest_paddr <= (region->region.guest_phys_addr
-				+ region->region.memory_size))
-			&& ((guest_paddr + npages * vm->page_size)
-				>= region->region.guest_phys_addr))
-			break;
 	}
 	if (region != NULL)
 		TEST_ASSERT(false, "A mem region with the requested slot "
-			"or overlapping physical memory range already exists.\n"
+			"already exists.\n"
 			"  requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
 			"  existing slot: %u paddr: 0x%lx size: 0x%lx",
 			slot, guest_paddr, npages,
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 0a8e758..8b0f164 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -16,18 +16,18 @@
 TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
 TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
 
+ifdef KSFT_KHDR_INSTALL
 top_srcdir ?= ../../../..
 include $(top_srcdir)/scripts/subarch.include
 ARCH		?= $(SUBARCH)
 
-all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
-
 .PHONY: khdr
 khdr:
 	make ARCH=$(ARCH) -C $(top_srcdir) headers_install
 
-ifdef KSFT_KHDR_INSTALL
-$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
+all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
+else
+all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
 endif
 
 .ONESHELL:
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 10baa16..c67d32e 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -54,6 +54,22 @@
 	return fd;
 }
 
+static int mfd_assert_reopen_fd(int fd_in)
+{
+	int r, fd;
+	char path[100];
+
+	sprintf(path, "/proc/self/fd/%d", fd_in);
+
+	fd = open(path, O_RDWR);
+	if (fd < 0) {
+		printf("re-open of existing fd %d failed\n", fd_in);
+		abort();
+	}
+
+	return fd;
+}
+
 static void mfd_fail_new(const char *name, unsigned int flags)
 {
 	int r;
@@ -255,6 +271,25 @@
 	munmap(p, mfd_def_size);
 }
 
+/* Test that PROT_READ + MAP_SHARED mappings work. */
+static void mfd_assert_read_shared(int fd)
+{
+	void *p;
+
+	/* verify PROT_READ and MAP_SHARED *is* allowed */
+	p = mmap(NULL,
+		 mfd_def_size,
+		 PROT_READ,
+		 MAP_SHARED,
+		 fd,
+		 0);
+	if (p == MAP_FAILED) {
+		printf("mmap() failed: %m\n");
+		abort();
+	}
+	munmap(p, mfd_def_size);
+}
+
 static void mfd_assert_write(int fd)
 {
 	ssize_t l;
@@ -693,6 +728,44 @@
 }
 
 /*
+ * Test SEAL_FUTURE_WRITE
+ * Test whether SEAL_FUTURE_WRITE actually prevents modifications.
+ */
+static void test_seal_future_write(void)
+{
+	int fd, fd2;
+	void *p;
+
+	printf("%s SEAL-FUTURE-WRITE\n", memfd_str);
+
+	fd = mfd_assert_new("kern_memfd_seal_future_write",
+			    mfd_def_size,
+			    MFD_CLOEXEC | MFD_ALLOW_SEALING);
+
+	p = mfd_assert_mmap_shared(fd);
+
+	mfd_assert_has_seals(fd, 0);
+
+	mfd_assert_add_seals(fd, F_SEAL_FUTURE_WRITE);
+	mfd_assert_has_seals(fd, F_SEAL_FUTURE_WRITE);
+
+	/* read should pass, writes should fail */
+	mfd_assert_read(fd);
+	mfd_assert_read_shared(fd);
+	mfd_fail_write(fd);
+
+	fd2 = mfd_assert_reopen_fd(fd);
+	/* read should pass, writes should still fail */
+	mfd_assert_read(fd2);
+	mfd_assert_read_shared(fd2);
+	mfd_fail_write(fd2);
+
+	munmap(p, mfd_def_size);
+	close(fd2);
+	close(fd);
+}
+
+/*
  * Test SEAL_SHRINK
  * Test whether SEAL_SHRINK actually prevents shrinking
  */
@@ -945,6 +1018,7 @@
 	test_basic();
 
 	test_seal_write();
+	test_seal_future_write();
 	test_seal_shrink();
 	test_seal_grow();
 	test_seal_resize();
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 919aa2a..9a3764a 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -18,6 +18,6 @@
 KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
-$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
+$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
 $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
 $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
index d8313d0..b90dff8 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
+ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
 NUM_NETIFS=4
 CHECK_TC="yes"
 source lib.sh
@@ -96,6 +96,51 @@
 	flood_test $swp2 $h1 $h2
 }
 
+vlan_deletion()
+{
+	# Test that the deletion of a VLAN on a bridge port does not affect
+	# the PVID VLAN
+	log_info "Add and delete a VLAN on bridge port $swp1"
+
+	bridge vlan add vid 10 dev $swp1
+	bridge vlan del vid 10 dev $swp1
+
+	ping_ipv4
+	ping_ipv6
+}
+
+extern_learn()
+{
+	local mac=de:ad:be:ef:13:37
+	local ageing_time
+
+	# Test that externally learned FDB entries can roam, but not age out
+	RET=0
+
+	bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
+
+	bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+	check_err $? "Did not find FDB entry when should"
+
+	# Wait for 10 seconds after the ageing time to make sure the FDB entry
+	# was not aged out
+	ageing_time=$(bridge_ageing_time_get br0)
+	sleep $((ageing_time + 10))
+
+	bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+	check_err $? "FDB entry was aged out when should not"
+
+	$MZ $h2 -c 1 -p 64 -a $mac -t ip -q
+
+	bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
+	check_err $? "FDB entry did not roam when should"
+
+	log_test "Externally learned FDB entry - ageing & roaming"
+
+	bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
+	bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
+}
+
 trap cleanup EXIT
 
 setup_prepare
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 47ed6ce..c9ff2b4 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
index 1017313..59caa8f 100644
--- a/tools/testing/selftests/netfilter/config
+++ b/tools/testing/selftests/netfilter/config
@@ -1,2 +1,2 @@
 CONFIG_NET_NS=y
-NF_TABLES_INET=y
+CONFIG_NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
new file mode 100755
index 0000000..8ec7668
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -0,0 +1,762 @@
+#!/bin/bash
+#
+# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without nft tool"
+	exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without ip tool"
+	exit $ksft_skip
+fi
+
+ip netns add ns0
+ip netns add ns1
+ip netns add ns2
+
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ip -net ns0 link set lo up
+ip -net ns0 link set veth0 up
+ip -net ns0 addr add 10.0.1.1/24 dev veth0
+ip -net ns0 addr add dead:1::1/64 dev veth0
+
+ip -net ns0 link set veth1 up
+ip -net ns0 addr add 10.0.2.1/24 dev veth1
+ip -net ns0 addr add dead:2::1/64 dev veth1
+
+for i in 1 2; do
+  ip -net ns$i link set lo up
+  ip -net ns$i link set eth0 up
+  ip -net ns$i addr add 10.0.$i.99/24 dev eth0
+  ip -net ns$i route add default via 10.0.$i.1
+  ip -net ns$i addr add dead:$i::99/64 dev eth0
+  ip -net ns$i route add default via dead:$i::1
+done
+
+bad_counter()
+{
+	local ns=$1
+	local counter=$2
+	local expect=$3
+
+	echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
+	ip netns exec $ns nft list counter inet filter $counter 1>&2
+}
+
+check_counters()
+{
+	ns=$1
+	local lret=0
+
+	cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
+	if [ $? -ne 0 ]; then
+		bad_counter $ns ns0in "packets 1 bytes 84"
+		lret=1
+	fi
+	cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
+	if [ $? -ne 0 ]; then
+		bad_counter $ns ns0out "packets 1 bytes 84"
+		lret=1
+	fi
+
+	expect="packets 1 bytes 104"
+	cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
+	if [ $? -ne 0 ]; then
+		bad_counter $ns ns0in6 "$expect"
+		lret=1
+	fi
+	cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
+	if [ $? -ne 0 ]; then
+		bad_counter $ns ns0out6 "$expect"
+		lret=1
+	fi
+
+	return $lret
+}
+
+check_ns0_counters()
+{
+	local ns=$1
+	local lret=0
+
+	cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
+	if [ $? -ne 0 ]; then
+		bad_counter ns0 ns0in "packets 0 bytes 0"
+		lret=1
+	fi
+
+	cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
+	if [ $? -ne 0 ]; then
+		bad_counter ns0 ns0in6 "packets 0 bytes 0"
+		lret=1
+	fi
+
+	cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
+	if [ $? -ne 0 ]; then
+		bad_counter ns0 ns0out "packets 0 bytes 0"
+		lret=1
+	fi
+	cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
+	if [ $? -ne 0 ]; then
+		bad_counter ns0 ns0out6 "packets 0 bytes 0"
+		lret=1
+	fi
+
+	for dir in "in" "out" ; do
+		expect="packets 1 bytes 84"
+		cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 $ns$dir "$expect"
+			lret=1
+		fi
+
+		expect="packets 1 bytes 104"
+		cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 $ns$dir6 "$expect"
+			lret=1
+		fi
+	done
+
+	return $lret
+}
+
+reset_counters()
+{
+	for i in 0 1 2;do
+		ip netns exec ns$i nft reset counters inet > /dev/null
+	done
+}
+
+test_local_dnat6()
+{
+	local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+	chain output {
+		type nat hook output priority 0; policy accept;
+		ip6 daddr dead:1::99 dnat to dead:2::99
+	}
+}
+EOF
+	if [ $? -ne 0 ]; then
+		echo "SKIP: Could not add add ip6 dnat hook"
+		return $ksft_skip
+	fi
+
+	# ping netns1, expect rewrite to netns2
+	ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
+	if [ $? -ne 0 ]; then
+		lret=1
+		echo "ERROR: ping6 failed"
+		return $lret
+	fi
+
+	expect="packets 0 bytes 0"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns2$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 0 count in ns1
+	expect="packets 0 bytes 0"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 1 packet in ns2
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
+	ip netns exec ns0 nft flush chain ip6 nat output
+
+	return $lret
+}
+
+test_local_dnat()
+{
+	local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+	chain output {
+		type nat hook output priority 0; policy accept;
+		ip daddr 10.0.1.99 dnat to 10.0.2.99
+	}
+}
+EOF
+	# ping netns1, expect rewrite to netns2
+	ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+	if [ $? -ne 0 ]; then
+		lret=1
+		echo "ERROR: ping failed"
+		return $lret
+	fi
+
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns2$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 0 count in ns1
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 1 packet in ns2
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
+
+	ip netns exec ns0 nft flush chain ip nat output
+
+	reset_counters
+	ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+	if [ $? -ne 0 ]; then
+		lret=1
+		echo "ERROR: ping failed"
+		return $lret
+	fi
+
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns2$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 1 count in ns1
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 0 packet in ns2
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns2$dir "$expect"
+			lret=1
+		fi
+	done
+
+	test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
+
+	return $lret
+}
+
+
+test_masquerade6()
+{
+	local lret=0
+
+	ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+	ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 via ipv6"
+		return 1
+		lret=1
+	fi
+
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns2$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+	chain postrouting {
+		type nat hook postrouting priority 0; policy accept;
+		meta oif veth0 masquerade
+	}
+}
+EOF
+	ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+		lret=1
+	fi
+
+	# ns1 should have seen packets from ns0, due to masquerade
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# ns1 should not have seen packets from ns2, due to masquerade
+	expect="packets 0 bytes 0"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	ip netns exec ns0 nft flush chain ip6 nat postrouting
+	if [ $? -ne 0 ]; then
+		echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
+		lret=1
+	fi
+
+	test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+
+	return $lret
+}
+
+test_masquerade()
+{
+	local lret=0
+
+	ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+	ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+	ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: canot ping ns1 from ns2"
+		lret=1
+	fi
+
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns2$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+	chain postrouting {
+		type nat hook postrouting priority 0; policy accept;
+		meta oif veth0 masquerade
+	}
+}
+EOF
+	ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+		lret=1
+	fi
+
+	# ns1 should have seen packets from ns0, due to masquerade
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# ns1 should not have seen packets from ns2, due to masquerade
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	ip netns exec ns0 nft flush chain ip nat postrouting
+	if [ $? -ne 0 ]; then
+		echo "ERROR: Could not flush nat postrouting" 1>&2
+		lret=1
+	fi
+
+	test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+
+	return $lret
+}
+
+test_redirect6()
+{
+	local lret=0
+
+	ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+	ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
+		lret=1
+	fi
+
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns2$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+	chain prerouting {
+		type nat hook prerouting priority 0; policy accept;
+		meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
+	}
+}
+EOF
+	ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
+		lret=1
+	fi
+
+	# ns1 should have seen no packets from ns2, due to redirection
+	expect="packets 0 bytes 0"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# ns0 should have seen packets from ns2, due to masquerade
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	ip netns exec ns0 nft delete table ip6 nat
+	if [ $? -ne 0 ]; then
+		echo "ERROR: Could not delete ip6 nat table" 1>&2
+		lret=1
+	fi
+
+	test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
+
+	return $lret
+}
+
+test_redirect()
+{
+	local lret=0
+
+	ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+	ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+	ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2"
+		lret=1
+	fi
+
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns2$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+	chain prerouting {
+		type nat hook prerouting priority 0; policy accept;
+		meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
+	}
+}
+EOF
+	ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
+		lret=1
+	fi
+
+	# ns1 should have seen no packets from ns2, due to redirection
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# ns0 should have seen packets from ns2, due to masquerade
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	ip netns exec ns0 nft delete table ip nat
+	if [ $? -ne 0 ]; then
+		echo "ERROR: Could not delete nat table" 1>&2
+		lret=1
+	fi
+
+	test $lret -eq 0 && echo "PASS: IP redirection for ns2"
+
+	return $lret
+}
+
+
+# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
+for i in 0 1 2; do
+ip netns exec ns$i nft -f - <<EOF
+table inet filter {
+	counter ns0in {}
+	counter ns1in {}
+	counter ns2in {}
+
+	counter ns0out {}
+	counter ns1out {}
+	counter ns2out {}
+
+	counter ns0in6 {}
+	counter ns1in6 {}
+	counter ns2in6 {}
+
+	counter ns0out6 {}
+	counter ns1out6 {}
+	counter ns2out6 {}
+
+	map nsincounter {
+		type ipv4_addr : counter
+		elements = { 10.0.1.1 : "ns0in",
+			     10.0.2.1 : "ns0in",
+			     10.0.1.99 : "ns1in",
+			     10.0.2.99 : "ns2in" }
+	}
+
+	map nsincounter6 {
+		type ipv6_addr : counter
+		elements = { dead:1::1 : "ns0in6",
+			     dead:2::1 : "ns0in6",
+			     dead:1::99 : "ns1in6",
+			     dead:2::99 : "ns2in6" }
+	}
+
+	map nsoutcounter {
+		type ipv4_addr : counter
+		elements = { 10.0.1.1 : "ns0out",
+			     10.0.2.1 : "ns0out",
+			     10.0.1.99: "ns1out",
+			     10.0.2.99: "ns2out" }
+	}
+
+	map nsoutcounter6 {
+		type ipv6_addr : counter
+		elements = { dead:1::1 : "ns0out6",
+			     dead:2::1 : "ns0out6",
+			     dead:1::99 : "ns1out6",
+			     dead:2::99 : "ns2out6" }
+	}
+
+	chain input {
+		type filter hook input priority 0; policy accept;
+		counter name ip saddr map @nsincounter
+		icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
+	}
+	chain output {
+		type filter hook output priority 0; policy accept;
+		counter name ip daddr map @nsoutcounter
+		icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
+	}
+}
+EOF
+done
+
+sleep 3
+# test basic connectivity
+for i in 1 2; do
+  ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
+  if [ $? -ne 0 ];then
+  	echo "ERROR: Could not reach other namespace(s)" 1>&2
+	ret=1
+  fi
+
+  ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
+  if [ $? -ne 0 ];then
+	echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
+	ret=1
+  fi
+  check_counters ns$i
+  if [ $? -ne 0 ]; then
+	ret=1
+  fi
+
+  check_ns0_counters ns$i
+  if [ $? -ne 0 ]; then
+	ret=1
+  fi
+  reset_counters
+done
+
+if [ $ret -eq 0 ];then
+	echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
+fi
+
+reset_counters
+test_local_dnat
+test_local_dnat6
+
+reset_counters
+test_masquerade
+test_masquerade6
+
+reset_counters
+test_redirect
+test_redirect6
+
+for i in 0 1 2; do ip netns del ns$i;done
+
+exit $ret
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index 14cfcf0..c46c0ee 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -6,6 +6,7 @@
 all: $(TEST_PROGS)
 
 top_srcdir = ../../../../..
+KSFT_KHDR_INSTALL := 1
 include ../../lib.mk
 
 clean:
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 82121a8..29bac5e 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -10,4 +10,5 @@
 /proc-uptime-002
 /read
 /self
+/setns-dcache
 /thread-self
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index 1c12c34..434d033 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -14,6 +14,7 @@
 TEST_GEN_PROGS += proc-uptime-002
 TEST_GEN_PROGS += read
 TEST_GEN_PROGS += self
+TEST_GEN_PROGS += setns-dcache
 TEST_GEN_PROGS += thread-self
 
 include ../lib.mk
diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
new file mode 100644
index 0000000..60ab197
--- /dev/null
+++ b/tools/testing/selftests/proc/setns-dcache.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Test that setns(CLONE_NEWNET) points to new /proc/net content even
+ * if old one is in dcache.
+ *
+ * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
+ */
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+
+static pid_t pid = -1;
+
+static void f(void)
+{
+	if (pid > 0) {
+		kill(pid, SIGTERM);
+	}
+}
+
+int main(void)
+{
+	int fd[2];
+	char _ = 0;
+	int nsfd;
+
+	atexit(f);
+
+	/* Check for priviledges and syscall availability straight away. */
+	if (unshare(CLONE_NEWNET) == -1) {
+		if (errno == ENOSYS || errno == EPERM) {
+			return 4;
+		}
+		return 1;
+	}
+	/* Distinguisher between two otherwise empty net namespaces. */
+	if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
+		return 1;
+	}
+
+	if (pipe(fd) == -1) {
+		return 1;
+	}
+
+	pid = fork();
+	if (pid == -1) {
+		return 1;
+	}
+
+	if (pid == 0) {
+		if (unshare(CLONE_NEWNET) == -1) {
+			return 1;
+		}
+
+		if (write(fd[1], &_, 1) != 1) {
+			return 1;
+		}
+
+		pause();
+
+		return 0;
+	}
+
+	if (read(fd[0], &_, 1) != 1) {
+		return 1;
+	}
+
+	{
+		char buf[64];
+		snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
+		nsfd = open(buf, O_RDONLY);
+		if (nsfd == -1) {
+			return 1;
+		}
+	}
+
+	/* Reliably pin dentry into dcache. */
+	(void)open("/proc/net/unix", O_RDONLY);
+
+	if (setns(nsfd, CLONE_NEWNET) == -1) {
+		return 1;
+	}
+
+	kill(pid, SIGTERM);
+	pid = 0;
+
+	{
+		char buf[4096];
+		ssize_t rv;
+		int fd;
+
+		fd = open("/proc/net/unix", O_RDONLY);
+		if (fd == -1) {
+			return 1;
+		}
+
+#define S "Num       RefCount Protocol Flags    Type St Inode Path\n"
+		rv = read(fd, buf, sizeof(buf));
+
+		assert(rv == strlen(S));
+		assert(memcmp(buf, S, strlen(S)) == 0);
+	}
+
+	return 0;
+}
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index e20b017..b206553 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -145,15 +145,12 @@
 
 	rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
 	ASSERT_NE(-1, rc);
-	EXPECT_NE(0, rc);
+	ASSERT_NE(0, rc);
 
 	/* Disable alarm interrupts */
 	rc = ioctl(self->fd, RTC_AIE_OFF, 0);
 	ASSERT_NE(-1, rc);
 
-	if (rc == 0)
-		return;
-
 	rc = read(self->fd, &data, sizeof(unsigned long));
 	ASSERT_NE(-1, rc);
 	TH_LOG("data: %lx", data);
@@ -202,7 +199,109 @@
 
 	rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
 	ASSERT_NE(-1, rc);
-	EXPECT_NE(0, rc);
+	ASSERT_NE(0, rc);
+
+	rc = read(self->fd, &data, sizeof(unsigned long));
+	ASSERT_NE(-1, rc);
+
+	rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+	ASSERT_NE(-1, rc);
+
+	new = timegm((struct tm *)&tm);
+	ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_alm_set_minute) {
+	struct timeval tv = { .tv_sec = 62 };
+	unsigned long data;
+	struct rtc_time tm;
+	fd_set readfds;
+	time_t secs, new;
+	int rc;
+
+	rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+	ASSERT_NE(-1, rc);
+
+	secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec;
+	gmtime_r(&secs, (struct tm *)&tm);
+
+	rc = ioctl(self->fd, RTC_ALM_SET, &tm);
+	if (rc == -1) {
+		ASSERT_EQ(EINVAL, errno);
+		TH_LOG("skip alarms are not supported.");
+		return;
+	}
+
+	rc = ioctl(self->fd, RTC_ALM_READ, &tm);
+	ASSERT_NE(-1, rc);
+
+	TH_LOG("Alarm time now set to %02d:%02d:%02d.",
+	       tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+	/* Enable alarm interrupts */
+	rc = ioctl(self->fd, RTC_AIE_ON, 0);
+	ASSERT_NE(-1, rc);
+
+	FD_ZERO(&readfds);
+	FD_SET(self->fd, &readfds);
+
+	rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+	ASSERT_NE(-1, rc);
+	ASSERT_NE(0, rc);
+
+	/* Disable alarm interrupts */
+	rc = ioctl(self->fd, RTC_AIE_OFF, 0);
+	ASSERT_NE(-1, rc);
+
+	rc = read(self->fd, &data, sizeof(unsigned long));
+	ASSERT_NE(-1, rc);
+	TH_LOG("data: %lx", data);
+
+	rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+	ASSERT_NE(-1, rc);
+
+	new = timegm((struct tm *)&tm);
+	ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_wkalm_set_minute) {
+	struct timeval tv = { .tv_sec = 62 };
+	struct rtc_wkalrm alarm = { 0 };
+	struct rtc_time tm;
+	unsigned long data;
+	fd_set readfds;
+	time_t secs, new;
+	int rc;
+
+	rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
+	ASSERT_NE(-1, rc);
+
+	secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec;
+	gmtime_r(&secs, (struct tm *)&alarm.time);
+
+	alarm.enabled = 1;
+
+	rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
+	if (rc == -1) {
+		ASSERT_EQ(EINVAL, errno);
+		TH_LOG("skip alarms are not supported.");
+		return;
+	}
+
+	rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
+	ASSERT_NE(-1, rc);
+
+	TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
+	       alarm.time.tm_mday, alarm.time.tm_mon + 1,
+	       alarm.time.tm_year + 1900, alarm.time.tm_hour,
+	       alarm.time.tm_min, alarm.time.tm_sec);
+
+	FD_ZERO(&readfds);
+	FD_SET(self->fd, &readfds);
+
+	rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+	ASSERT_NE(-1, rc);
+	ASSERT_NE(0, rc);
 
 	rc = read(self->fd, &data, sizeof(unsigned long));
 	ASSERT_NE(-1, rc);
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index fce7f4c..1760b3e 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -9,7 +9,7 @@
 CFLAGS += -Wl,-no-as-needed -Wall
 
 seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
-	$(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@
+	$(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@
 
 TEST_PROGS += $(BINARIES)
 EXTRA_CLEAN := $(BINARIES)
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index e147323..83057fa 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1563,7 +1563,16 @@
 #ifdef SYSCALL_NUM_RET_SHARE_REG
 # define EXPECT_SYSCALL_RETURN(val, action)	EXPECT_EQ(-1, action)
 #else
-# define EXPECT_SYSCALL_RETURN(val, action)	EXPECT_EQ(val, action)
+# define EXPECT_SYSCALL_RETURN(val, action)		\
+	do {						\
+		errno = 0;				\
+		if (val < 0) {				\
+			EXPECT_EQ(-1, action);		\
+			EXPECT_EQ(-(val), errno);	\
+		} else {				\
+			EXPECT_EQ(val, action);		\
+		}					\
+	} while (0)
 #endif
 
 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
@@ -1602,7 +1611,7 @@
 
 /* Architecture-specific syscall changing routine. */
 void change_syscall(struct __test_metadata *_metadata,
-		    pid_t tracee, int syscall)
+		    pid_t tracee, int syscall, int result)
 {
 	int ret;
 	ARCH_REGS regs;
@@ -1661,7 +1670,7 @@
 #ifdef SYSCALL_NUM_RET_SHARE_REG
 		TH_LOG("Can't modify syscall return on this architecture");
 #else
-		regs.SYSCALL_RET = EPERM;
+		regs.SYSCALL_RET = result;
 #endif
 
 #ifdef HAVE_GETREGS
@@ -1689,14 +1698,19 @@
 	case 0x1002:
 		/* change getpid to getppid. */
 		EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
-		change_syscall(_metadata, tracee, __NR_getppid);
+		change_syscall(_metadata, tracee, __NR_getppid, 0);
 		break;
 	case 0x1003:
-		/* skip gettid. */
+		/* skip gettid with valid return code. */
 		EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
-		change_syscall(_metadata, tracee, -1);
+		change_syscall(_metadata, tracee, -1, 45000);
 		break;
 	case 0x1004:
+		/* skip openat with error. */
+		EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
+		change_syscall(_metadata, tracee, -1, -ESRCH);
+		break;
+	case 0x1005:
 		/* do nothing (allow getppid) */
 		EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
 		break;
@@ -1729,9 +1743,11 @@
 	nr = get_syscall(_metadata, tracee);
 
 	if (nr == __NR_getpid)
-		change_syscall(_metadata, tracee, __NR_getppid);
+		change_syscall(_metadata, tracee, __NR_getppid, 0);
+	if (nr == __NR_gettid)
+		change_syscall(_metadata, tracee, -1, 45000);
 	if (nr == __NR_openat)
-		change_syscall(_metadata, tracee, -1);
+		change_syscall(_metadata, tracee, -1, -ESRCH);
 }
 
 FIXTURE_DATA(TRACE_syscall) {
@@ -1748,8 +1764,10 @@
 		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
 		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
 		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
-		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
 		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
+		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
 		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
 	};
 
@@ -1797,15 +1815,26 @@
 	EXPECT_NE(self->mypid, syscall(__NR_getpid));
 }
 
-TEST_F(TRACE_syscall, ptrace_syscall_dropped)
+TEST_F(TRACE_syscall, ptrace_syscall_errno)
 {
 	/* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
 	teardown_trace_fixture(_metadata, self->tracer);
 	self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
 					   true);
 
-	/* Tracer should skip the open syscall, resulting in EPERM. */
-	EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
+	/* Tracer should skip the open syscall, resulting in ESRCH. */
+	EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, ptrace_syscall_faked)
+{
+	/* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
+	teardown_trace_fixture(_metadata, self->tracer);
+	self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
+					   true);
+
+	/* Tracer should skip the gettid syscall, resulting fake pid. */
+	EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
 }
 
 TEST_F(TRACE_syscall, syscall_allowed)
@@ -1838,7 +1867,21 @@
 	EXPECT_NE(self->mypid, syscall(__NR_getpid));
 }
 
-TEST_F(TRACE_syscall, syscall_dropped)
+TEST_F(TRACE_syscall, syscall_errno)
+{
+	long ret;
+
+	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+	ASSERT_EQ(0, ret);
+
+	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
+	ASSERT_EQ(0, ret);
+
+	/* openat has been skipped and an errno return. */
+	EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, syscall_faked)
 {
 	long ret;
 
@@ -1849,8 +1892,7 @@
 	ASSERT_EQ(0, ret);
 
 	/* gettid has been skipped and an altered return value stored. */
-	EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
-	EXPECT_NE(self->mytid, syscall(__NR_gettid));
+	EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
 }
 
 TEST_F(TRACE_syscall, skip_after_RET_TRACE)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 637ea02..0da3545 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -17,7 +17,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 2",
-        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -41,7 +41,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 2",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -65,7 +65,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 2",
-        "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
+        "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -89,7 +89,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 2",
-        "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
+        "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -113,7 +113,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 2",
-        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
+        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -137,7 +137,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 2",
-        "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
+        "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -161,7 +161,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 90",
-        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
+        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -185,7 +185,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action ife index 90",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
         "matchCount": "0",
         "teardown": []
     },
@@ -207,7 +207,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 9",
-        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -231,7 +231,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 9",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -255,7 +255,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 9",
-        "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
+        "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -279,7 +279,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 9",
-        "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
+        "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -303,7 +303,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 9",
-        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
+        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -327,7 +327,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 9",
-        "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
+        "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -351,7 +351,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 99",
-        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
+        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -375,7 +375,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action ife index 99",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
         "matchCount": "0",
         "teardown": []
     },
@@ -397,7 +397,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -421,7 +421,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -445,7 +445,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+        "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -469,7 +469,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+        "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -493,7 +493,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 77",
-        "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
+        "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -517,7 +517,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 77",
-        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
+        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -541,7 +541,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 77",
-        "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
+        "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -565,7 +565,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -589,7 +589,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
         "matchCount": "0",
         "teardown": []
     },
@@ -611,7 +611,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -635,7 +635,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
+        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -659,7 +659,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 11",
-        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -683,7 +683,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -707,7 +707,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 21",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -731,7 +731,7 @@
         "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 21",
-        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
+        "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -739,7 +739,7 @@
     },
     {
         "id": "fac3",
-        "name": "Create valid ife encode action with index at 32-bit maximnum",
+        "name": "Create valid ife encode action with index at 32-bit maximum",
         "category": [
             "actions",
             "ife"
@@ -755,7 +755,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 4294967295",
-        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -779,7 +779,7 @@
         "cmdUnderTest": "$TC actions add action ife decode pass index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+        "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -803,7 +803,7 @@
         "cmdUnderTest": "$TC actions add action ife decode pipe index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+        "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -827,7 +827,7 @@
         "cmdUnderTest": "$TC actions add action ife decode continue index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+        "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -851,7 +851,7 @@
         "cmdUnderTest": "$TC actions add action ife decode drop index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+        "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -875,7 +875,7 @@
         "cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+        "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -899,7 +899,7 @@
         "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 1",
-        "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+        "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action ife"
@@ -923,7 +923,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action ife index 4294967295999",
-        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
         "matchCount": "0",
         "teardown": []
     },
@@ -945,7 +945,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action ife index 4",
-        "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
+        "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
         "matchCount": "0",
         "teardown": []
     },
@@ -967,7 +967,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action ife index 4",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
         "matchCount": "1",
         "teardown": [
            "$TC actions flush action ife"
@@ -991,7 +991,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action ife index 4",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
         "matchCount": "0",
         "teardown": []
     },
@@ -1013,7 +1013,7 @@
         "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action ife index 4",
-        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
+        "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
         "matchCount": "0",
         "teardown": []
     },
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index 10b2d89..e7e15a7 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -82,35 +82,6 @@
         ]
     },
     {
-        "id": "ba4e",
-        "name": "Add tunnel_key set action with missing mandatory id parameter",
-        "category": [
-            "actions",
-            "tunnel_key"
-        ],
-        "setup": [
-            [
-                "$TC actions flush action tunnel_key",
-                0,
-                1,
-                255
-            ]
-        ],
-        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
-        "expExitCode": "255",
-        "verifyCmd": "$TC actions list action tunnel_key",
-        "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
-        "matchCount": "0",
-        "teardown": [
-	    [
-		"$TC actions flush action tunnel_key",
-		0,
-		1,
-		255
-	    ]
-        ]
-    },
-    {
         "id": "a5e0",
         "name": "Add tunnel_key set action with invalid src_ip parameter",
         "category": [
@@ -634,7 +605,7 @@
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 4",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index c02683c..7656c7c 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 CFLAGS += -O3 -Wl,-no-as-needed -Wall
-LDFLAGS += -lrt -lpthread -lm
+LDLIBS += -lrt -lpthread -lm
 
 # these are all "safe" tests that don't modify
 # system time or require escalated privileges
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index e94b7b1..dc68340 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -24,6 +24,7 @@
 
 TEST_PROGS := run_vmtests
 
+KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
 $(OUTPUT)/userfaultfd: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index 36df551..9601bc2 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -22,6 +22,7 @@
 	__u64 size;
 	__u32 nr_pages_per_call;
 	__u32 flags;
+	__u64 expansion[10];	/* For future use */
 };
 
 int main(int argc, char **argv)
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 460b4bd..5d546dc 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -1133,6 +1133,21 @@
 	pkey_assert(err);
 }
 
+void become_child(void)
+{
+	pid_t forkret;
+
+	forkret = fork();
+	pkey_assert(forkret >= 0);
+	dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
+
+	if (!forkret) {
+		/* in the child */
+		return;
+	}
+	exit(0);
+}
+
 /* Assumes that all pkeys other than 'pkey' are unallocated */
 void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
 {
@@ -1141,7 +1156,7 @@
 	int nr_allocated_pkeys = 0;
 	int i;
 
-	for (i = 0; i < NR_PKEYS*2; i++) {
+	for (i = 0; i < NR_PKEYS*3; i++) {
 		int new_pkey;
 		dprintf1("%s() alloc loop: %d\n", __func__, i);
 		new_pkey = alloc_pkey();
@@ -1152,21 +1167,27 @@
 		if ((new_pkey == -1) && (errno == ENOSPC)) {
 			dprintf2("%s() failed to allocate pkey after %d tries\n",
 				__func__, nr_allocated_pkeys);
-			break;
+		} else {
+			/*
+			 * Ensure the number of successes never
+			 * exceeds the number of keys supported
+			 * in the hardware.
+			 */
+			pkey_assert(nr_allocated_pkeys < NR_PKEYS);
+			allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
 		}
-		pkey_assert(nr_allocated_pkeys < NR_PKEYS);
-		allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
+
+		/*
+		 * Make sure that allocation state is properly
+		 * preserved across fork().
+		 */
+		if (i == NR_PKEYS*2)
+			become_child();
 	}
 
 	dprintf3("%s()::%d\n", __func__, __LINE__);
 
 	/*
-	 * ensure it did not reach the end of the loop without
-	 * failure:
-	 */
-	pkey_assert(i < NR_PKEYS*2);
-
-	/*
 	 * There are 16 pkeys supported in hardware.  Three are
 	 * allocated by the time we get here:
 	 *   1. The default key (0)
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index fb22bcc..7ef45a4 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -23,6 +23,10 @@
 #define PAGE_MASK (~(PAGE_SIZE-1))
 #define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
 
+/* generic data direction definitions */
+#define READ                    0
+#define WRITE                   1
+
 typedef unsigned long long phys_addr_t;
 typedef unsigned long long dma_addr_t;
 typedef size_t __kernel_size_t;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 8fb31a7..9149504 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -66,7 +66,7 @@
 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u32 kvm_next_vmid;
 static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_RWLOCK(kvm_vmid_lock);
+static DEFINE_SPINLOCK(kvm_vmid_lock);
 
 static bool vgic_present;
 
@@ -482,7 +482,9 @@
  */
 static bool need_new_vmid_gen(struct kvm *kvm)
 {
-	return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+	u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
+	smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
+	return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
 }
 
 /**
@@ -497,16 +499,11 @@
 {
 	phys_addr_t pgd_phys;
 	u64 vmid;
-	bool new_gen;
 
-	read_lock(&kvm_vmid_lock);
-	new_gen = need_new_vmid_gen(kvm);
-	read_unlock(&kvm_vmid_lock);
-
-	if (!new_gen)
+	if (!need_new_vmid_gen(kvm))
 		return;
 
-	write_lock(&kvm_vmid_lock);
+	spin_lock(&kvm_vmid_lock);
 
 	/*
 	 * We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -514,7 +511,7 @@
 	 * use the same vmid.
 	 */
 	if (!need_new_vmid_gen(kvm)) {
-		write_unlock(&kvm_vmid_lock);
+		spin_unlock(&kvm_vmid_lock);
 		return;
 	}
 
@@ -537,7 +534,6 @@
 		kvm_call_hyp(__kvm_flush_vm_context);
 	}
 
-	kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
 	kvm->arch.vmid = kvm_next_vmid;
 	kvm_next_vmid++;
 	kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
@@ -548,7 +544,10 @@
 	vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
 	kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
 
-	write_unlock(&kvm_vmid_lock);
+	smp_wmb();
+	WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
+
+	spin_unlock(&kvm_vmid_lock);
 }
 
 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index dac7ceb1..08443a1 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -117,6 +117,12 @@
 		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
 	}
 
+	/*
+	 * The MMIO instruction is emulated and should not be re-executed
+	 * in the guest.
+	 */
+	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
 	return 0;
 }
 
@@ -144,11 +150,6 @@
 	vcpu->arch.mmio_decode.sign_extend = sign_extend;
 	vcpu->arch.mmio_decode.rt = rt;
 
-	/*
-	 * The MMIO instruction is emulated and should not be re-executed
-	 * in the guest.
-	 */
-	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 	return 0;
 }
 
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index f56ff1c..ceeda7e 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -313,36 +313,30 @@
 
 	spin_lock_irqsave(&irq->irq_lock, flags);
 
-	/*
-	 * If this virtual IRQ was written into a list register, we
-	 * have to make sure the CPU that runs the VCPU thread has
-	 * synced back the LR state to the struct vgic_irq.
-	 *
-	 * As long as the conditions below are true, we know the VCPU thread
-	 * may be on its way back from the guest (we kicked the VCPU thread in
-	 * vgic_change_active_prepare)  and still has to sync back this IRQ,
-	 * so we release and re-acquire the spin_lock to let the other thread
-	 * sync back the IRQ.
-	 *
-	 * When accessing VGIC state from user space, requester_vcpu is
-	 * NULL, which is fine, because we guarantee that no VCPUs are running
-	 * when accessing VGIC state from user space so irq->vcpu->cpu is
-	 * always -1.
-	 */
-	while (irq->vcpu && /* IRQ may have state in an LR somewhere */
-	       irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
-	       irq->vcpu->cpu != -1) /* VCPU thread is running */
-		cond_resched_lock(&irq->irq_lock);
-
 	if (irq->hw) {
 		vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
 	} else {
 		u32 model = vcpu->kvm->arch.vgic.vgic_model;
+		u8 active_source;
 
 		irq->active = active;
+
+		/*
+		 * The GICv2 architecture indicates that the source CPUID for
+		 * an SGI should be provided during an EOI which implies that
+		 * the active state is stored somewhere, but at the same time
+		 * this state is not architecturally exposed anywhere and we
+		 * have no way of knowing the right source.
+		 *
+		 * This may lead to a VCPU not being able to receive
+		 * additional instances of a particular SGI after migration
+		 * for a GICv2 VM on some GIC implementations.  Oh well.
+		 */
+		active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
+
 		if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
 		    active && vgic_irq_is_sgi(irq->intid))
-			irq->active_source = requester_vcpu->vcpu_id;
+			irq->active_source = active_source;
 	}
 
 	if (irq->active)
@@ -368,14 +362,16 @@
  */
 static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
 {
-	if (intid > VGIC_NR_PRIVATE_IRQS)
+	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+	    intid > VGIC_NR_PRIVATE_IRQS)
 		kvm_arm_halt_guest(vcpu->kvm);
 }
 
 /* See vgic_change_active_prepare */
 static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
 {
-	if (intid > VGIC_NR_PRIVATE_IRQS)
+	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+	    intid > VGIC_NR_PRIVATE_IRQS)
 		kvm_arm_resume_guest(vcpu->kvm);
 }
 
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 7cfdfbc..f884a54 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -103,13 +103,13 @@
 {
 	/* SGIs and PPIs */
 	if (intid <= VGIC_MAX_PRIVATE) {
-		intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
+		intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
 		return &vcpu->arch.vgic_cpu.private_irqs[intid];
 	}
 
 	/* SPIs */
-	if (intid <= VGIC_MAX_SPI) {
-		intid = array_index_nospec(intid, VGIC_MAX_SPI);
+	if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
+		intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
 		return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
 	}
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f986e31f..0ffb02f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1959,7 +1959,8 @@
 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
 
 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			   void *data, int offset, unsigned long len)
+				  void *data, unsigned int offset,
+				  unsigned long len)
 {
 	struct kvm_memslots *slots = kvm_memslots(kvm);
 	int r;
@@ -2912,8 +2913,10 @@
 	if (ops->init)
 		ops->init(dev);
 
+	kvm_get_kvm(kvm);
 	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
 	if (ret < 0) {
+		kvm_put_kvm(kvm);
 		mutex_lock(&kvm->lock);
 		list_del(&dev->vm_node);
 		mutex_unlock(&kvm->lock);
@@ -2921,7 +2924,6 @@
 		return ret;
 	}
 
-	kvm_get_kvm(kvm);
 	cd->fd = ret;
 	return 0;
 }